ngram
listlengths
0
82k
[ "( (tuple, \"(\", \")\"), (list, \"[\", \"]\"), (set, \"{\", \"}\"),", "H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25)", "H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ),", "== H.span[\"hreprt-str\"]( \"hello this is a b...\" ) assert hshort(\"hello", "+ 1) == H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess(): assert hrepr(1,", "H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True)", "assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def", "hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers():", "lili = [li, li] assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "H.span[\"hreprt-str\"]( \"hello this is a bit long\" ) assert hshort(\"hello", "1, \"y\": 2}, \"{\", \"}\"), ): clsname = type(val).__name__ assert", "H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt = Point(1,", "\"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1,", "hrepr import hrepr as real_hrepr from hrepr.h import styledir from", "), ), H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"](", "hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None) ==", "H from hrepr import hrepr as real_hrepr from hrepr.h import", "assert hshort(\"hello this is a bit long\", string_cutoff=10) == H.span[", "bit long\", string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello t...\") assert hshort(\"hello", "\"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\",", "class Point: x: int y: int class Opaque: pass def", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ), )", "string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello this is a bit long\")", "assert hshort(\"hello this is a bit long\") == H.span[\"hreprt-str\"]( \"hello", "assert hrepr(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\"", "def test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\")", "bit long\" ) assert hshort(\"hello this is a bit long\")", "2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")),", "2}, \"{\", \"}\"), ): clsname = type(val).__name__ assert hrepr(val, max_depth=0)", "H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li,", "H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1) == H.inline(", "shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(", "css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass class Point:", "hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"),", "2, H.b(\"there\")) assert hrepr(tg) == tg def test_multiref(): li =", "\"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"),", "Opaque: pass def hshort(x, **kw): return hrepr(x, max_depth=0, **kw) @one_test_per_assert", "H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ),", "H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3", "type(val).__name__ assert hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o),", "t...\") assert hshort(\"hello this is a bit long\", string_cutoff=5) ==", "H.b(\"there\")) assert hrepr(tg) == tg def test_multiref(): li = [1,", "), ), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt = Point(1, 2)", "H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ),", "== H.span[ \"hreprt-str\" ](\"hello t...\") assert hshort(\"hello this is a", "is a bit long\", string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello this", "hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"),", "), ) ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"](", "assert hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess():", "test_multiref(): li = [1, 2] lili = [li, li] assert", "H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"](", "2), \"(\", \")\"), ([1, 2], \"[\", \"]\"), ({1, 2}, \"{\",", "H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\",", "test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert", "assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" ) def test_as_page():", "\"{\", \"}\"), ): clsname = typ.__name__ assert hrepr(typ((1, 2))) ==", "H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this", "H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg = H.span[\"hello\"](1, 2, H.b(\"there\"))", "H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\",", "== H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is a bit long\") ==", "test_preprocess(): assert hrepr(1, preprocess=lambda x, hrepr: x + 1) ==", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ),", "\"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili,", "assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert hshort(\"hello\") ==", "clsname = typ.__name__ assert hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\"", ") assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\",", "a bit long\", string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello this is", "\">\" ) def test_as_page(): utf8 = H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\",", "), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), )", "1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)),", "\"{\", \"}\"), ): clsname = type(val).__name__ assert hrepr(val, max_depth=0) ==", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ),", "), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li = [1] li.append(li) assert", "\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt =", "a bit long\") == H.span[\"hreprt-str\"]( \"hello this is a b...\"", "Point: x: int y: int class Opaque: pass def hshort(x,", "def test_as_page(): utf8 = H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" )", "H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"),", "{\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE", "long\", string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello t...\") assert hshort(\"hello this", "from dataclasses import dataclass from hrepr import H from hrepr", "H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert hrepr(pt, max_depth=0) == H.div[", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"),", "= typ.__name__ assert hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ](", "import dataclass from hrepr import H from hrepr import hrepr", "def test_structures(): for typ, o, c in ( (tuple, \"(\",", "li = [1, 2] lili = [li, li] assert hrepr(lili)", ") assert hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"),", "H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess(): assert hrepr(1, postprocess=lambda x, obj,", "H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for typ,", "== H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), )", "assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"](", "H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"),", "2} assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")),", "typ, o, c in ( (tuple, \"(\", \")\"), (list, \"[\",", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr(", "\"{\", \"}\"), (frozenset({1, 2}), \"{\", \"}\"), ({\"x\": 1, \"y\": 2},", "this is a b...\" ) assert hshort(\"hello this is a", "H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")),", "bit long\", string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello this", "assert hshort(\"hello this is a bit long\", string_cutoff=10000) == H.span[", "\"{\", \"}\"), (frozenset, \"{\", \"}\"), ): clsname = typ.__name__ assert", "H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li = [1]", "is a bit long\", string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\") assert", "test_tag(): tg = H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg) == tg", "test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert", "), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert hrepr(pt,", "utf8 = H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1)", "): clsname = type(val).__name__ assert hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\",", "== H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg():", "hshort(\"hello this is a bit long\", string_cutoff=10000) == H.span[ \"hreprt-str\"", ".common import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False)", "def test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" )", "hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"](", "@one_test_per_assert def test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\")", "H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert hrepr(1,", "a bit long\" ) assert hshort(\"hello this is a bit", "int y: int class Opaque: pass def hshort(x, **kw): return", "long\") == H.span[\"hreprt-str\"]( \"hello this is a b...\" ) assert", "x, hrepr: x + 1) == H.span[\"hreprt-int\"]( \"2\" ) def", "H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\",", "hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\",", "def test_dict(): pt = {\"x\": 1, \"y\": 2} assert hrepr(pt)", "== H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"](", "H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert hrepr(123)", "\"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag():", "H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ),", "\"]\"), ({1, 2}, \"{\", \"}\"), (frozenset({1, 2}), \"{\", \"}\"), ({\"x\":", ") ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li", "2}), \"{\", \"}\"), ({\"x\": 1, \"y\": 2}, \"{\", \"}\"), ):", "max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")),", "\"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def test_short_structures(): for val,", "assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")),", "test_dataclass(): pt = Point(1, 2) assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\",", "hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\")", "[1, 2] lili = [li, li] assert hrepr(lili) == H.div[\"hreprt-list\",", "in ( (tuple, \"(\", \")\"), (list, \"[\", \"]\"), (set, \"{\",", "a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function(): assert", "2] lili = [li, li] assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"](", "\"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ), ) assert", "pt = Point(1, 2) assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"](", "dataclasses import dataclass from hrepr import H from hrepr import", "typ.__name__ assert hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o),", "hrepr = real_hrepr.variant(fill_resources=False) @dataclass class Point: x: int y: int", "H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ),", "]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\",", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(", "hshort(\"hello this is a bit long\", string_cutoff=5) == H.span[ \"hreprt-str\"", "H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),),", "hrepr(1, postprocess=lambda x, obj, hrepr: x[\"newclass\"]) == H.span[ \"newclass\", \"hreprt-int\"", "long\") == H.span[\"hreprt-str\"]( \"hello this is a bit long\" )", "\"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg = H.span[\"hello\"](1, 2, H.b(\"there\")) assert", "a b...\" ) assert hshort(\"hello this is a bit long\",", "o, c in ( (tuple, \"(\", \")\"), (list, \"[\", \"]\"),", "this is a bit long\") == H.span[\"hreprt-str\"]( \"hello this is", "test_structures(): for typ, o, c in ( (tuple, \"(\", \")\"),", "a bit long\") @one_test_per_assert def test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\")", "\"hreprt-str\" ](\"hello this is a bit long\") @one_test_per_assert def test_bytes():", "hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this is a", "== H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert hrepr(1, preprocess=lambda", "\"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg =", "hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" ) def test_as_page(): utf8", "shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\",", "bit long\") == H.span[\"hreprt-str\"]( \"hello this is a b...\" )", "import styledir from .common import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read()", "assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert hrepr(123) ==", "is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function():", "hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures():", "test_dict(): pt = {\"x\": 1, \"y\": 2} assert hrepr(pt) ==", "H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), ) def", ") def test_preprocess(): assert hrepr(1, preprocess=lambda x, hrepr: x +", "\", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for typ, o, c in", "), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), )", "c in ( (tuple, \"(\", \")\"), (list, \"[\", \"]\"), (set,", "tg def test_multiref(): li = [1, 2] lili = [li,", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def test_short_structures(): for", "hshort(x, **kw): return hrepr(x, max_depth=0, **kw) @one_test_per_assert def test_singletons(): assert", "li = [1] li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1,", "hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert", "hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this", "preprocess=lambda x, hrepr: x + 1) == H.span[\"hreprt-int\"]( \"2\" )", "string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello t...\") assert hshort(\"hello this is", "charset=\"UTF-8\" ) assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)),", "H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert hrepr(1, 2) ==", "H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(", ") def test_short_structures(): for val, o, c in ( ((1,", "), H.div[\"hrepr-close\"](c), ) def test_short_structures(): for val, o, c in", "H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" ) def test_as_page(): utf8 = H.meta(", "), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive():", "H.span[ \"hreprt-str\" ](\"hello this is a bit long\") @one_test_per_assert def", "assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\":", "H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert", "H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict(): pt = {\"x\": 1,", ") def test_tag(): tg = H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg)", "in ( ((1, 2), \"(\", \")\"), ([1, 2], \"[\", \"]\"),", "({\"x\": 1, \"y\": 2}, \"{\", \"}\"), ): clsname = type(val).__name__", "H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ),", "H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello this is a bit long\",", "open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass class Point: x: int", "content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8,", "\"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported():", "assert hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\",", "== H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for", "bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function(): assert hrepr(Opaque)", "long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function(): assert hrepr(Opaque) ==", "\"]\"), (set, \"{\", \"}\"), (frozenset, \"{\", \"}\"), ): clsname =", "](\"he...\") assert hshort(\"hello this is a bit long\", string_cutoff=10000) ==", "for val, o, c in ( ((1, 2), \"(\", \")\"),", "test_hrepr_multiarg(): assert hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def", "), H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\",", "class Opaque: pass def hshort(x, **kw): return hrepr(x, max_depth=0, **kw)", "for typ, o, c in ( (tuple, \"(\", \")\"), (list,", "== H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1,", "hrepr(\"hello this is a bit long\") == H.span[\"hreprt-str\"]( \"hello this", "H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert hrepr(pt, max_depth=0)", "H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")),", "val, o, c in ( ((1, 2), \"(\", \")\"), ([1,", "= type(val).__name__ assert hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ](", "encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass class Point: x: int y:", "H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\",", "is a bit long\") @one_test_per_assert def test_bytes(): assert hrepr(b\"hello\") ==", "= Point(1, 2) assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"),", "(set, \"{\", \"}\"), (frozenset, \"{\", \"}\"), ): clsname = typ.__name__", "assert hrepr(tg) == tg def test_multiref(): li = [1, 2]", "return hrepr(x, max_depth=0, **kw) @one_test_per_assert def test_singletons(): assert hrepr(True) ==", "this is a bit long\" ) assert hshort(\"hello this is", "assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None)", "((1, 2), \"(\", \")\"), ([1, 2], \"[\", \"]\"), ({1, 2},", "bit long\", string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello this is a", "), ) def test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\",", ") def test_dict(): pt = {\"x\": 1, \"y\": 2} assert", "long\", string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello this is a bit", "x: int y: int class Opaque: pass def hshort(x, **kw):", "is a bit long\" ) assert hshort(\"hello this is a", "H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ), )", "def test_postprocess(): assert hrepr(1, postprocess=lambda x, obj, hrepr: x[\"newclass\"]) ==", "long\" ) assert hshort(\"hello this is a bit long\") ==", "H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass():", "def test_tag(): tg = H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg) ==", "H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt = Point(1, 2) assert hrepr(pt)", "\"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ),", "bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this is", "def test_dataclass(): pt = Point(1, 2) assert hrepr(pt) == H.div[\"hreprt-Point\",", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\",", "H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), ) assert", ") def test_hrepr_multiarg(): assert hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"),", "test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert", "== H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello", "H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\",", "li] assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(", "test_postprocess(): assert hrepr(1, postprocess=lambda x, obj, hrepr: x[\"newclass\"]) == H.span[", "\"hreprt-str\" ](\"hello t...\") assert hshort(\"hello this is a bit long\",", "== H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" ) def test_as_page(): utf8 =", "\"(\", \")\"), ([1, 2], \"[\", \"]\"), ({1, 2}, \"{\", \"}\"),", "H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ),", "\"{\", \"}\"), ({\"x\": 1, \"y\": 2}, \"{\", \"}\"), ): clsname", "H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"](", "H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this is a bit long\")", "H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")),", "H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this is a bit long\") ==", "hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this is a", "def test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\")", "== H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert", "@dataclass class Point: x: int y: int class Opaque: pass", "@one_test_per_assert def test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False) ==", "def test_short_structures(): for val, o, c in ( ((1, 2),", "import hrepr as real_hrepr from hrepr.h import styledir from .common", "== H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert", "is a b...\" ) assert hshort(\"hello this is a bit", "a bit long\", string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello", "is a bit long\", string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello t...\")", "bit long\") == H.span[\"hreprt-str\"]( \"hello this is a bit long\"", "(list, \"[\", \"]\"), (set, \"{\", \"}\"), (frozenset, \"{\", \"}\"), ):", "H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),),", "{\"x\": 1, \"y\": 2} assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"),", ") ), ), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True) == H.div[", "assert hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\",", "a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this", "assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "\"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ),", "\"hello this is a b...\" ) assert hshort(\"hello this is", "hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"](", "\")\"), (list, \"[\", \"]\"), (set, \"{\", \"}\"), (frozenset, \"{\", \"}\"),", "\"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), )", "pass def hshort(x, **kw): return hrepr(x, max_depth=0, **kw) @one_test_per_assert def", "test_recursive(): li = [1] li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\",", "\"}\"), (frozenset({1, 2}), \"{\", \"}\"), ({\"x\": 1, \"y\": 2}, \"{\",", "\"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict(): pt = {\"x\": 1, \"y\":", "1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"](", "[1] li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\",", "H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\":", "]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg = H.span[\"hello\"](1,", "H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def test_short_structures(): for val, o,", "(frozenset({1, 2}), \"{\", \"}\"), ({\"x\": 1, \"y\": 2}, \"{\", \"}\"),", "assert hrepr(1, postprocess=lambda x, obj, hrepr: x[\"newclass\"]) == H.span[ \"newclass\",", "this is a bit long\") @one_test_per_assert def test_bytes(): assert hrepr(b\"hello\")", "H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1,", "), ), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\",", "\"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)),", "\"<\", \"tests.test_hrepr.Opaque\", \">\" ) def test_as_page(): utf8 = H.meta( {\"http-equiv\":", "H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert hshort(\"hello\")", "test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3", "), ) assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"),", "hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\")", "H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict(): pt = {\"x\":", "]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict(): pt =", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ), ) def", "def test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") ==", "1) == H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess(): assert hrepr(1, postprocess=lambda", "== H.span[\"hreprv-None\"](\"None\") @one_test_per_assert def test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert", "(frozenset, \"{\", \"}\"), ): clsname = typ.__name__ assert hrepr(typ((1, 2)))", ") assert hshort(\"hello this is a bit long\", string_cutoff=10) ==", "\"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"),", "\"tests.test_hrepr.Opaque\", \">\" ) def test_as_page(): utf8 = H.meta( {\"http-equiv\": \"Content-type\"},", "assert hshort(\"hello this is a bit long\", string_cutoff=5) == H.span[", "\"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict(): pt", ") assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),),", "H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), )", "from hrepr.h import styledir from .common import one_test_per_assert css_hrepr =", "](\"hello this is a bit long\") @one_test_per_assert def test_bytes(): assert", "== H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")),", "== H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string(): assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert", "assert hshort(\"hello\") == H.span[\"hreprt-str\"](\"hello\") assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\")", "2}, \"{\", \"}\"), (frozenset({1, 2}), \"{\", \"}\"), ({\"x\": 1, \"y\":", "), ) ), ), H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li, shortrefs=True)", "H.span[\"hreprt-str\"]( \"hello this is a b...\" ) assert hshort(\"hello this", "), ) assert hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\"", "([1, 2], \"[\", \"]\"), ({1, 2}, \"{\", \"}\"), (frozenset({1, 2}),", "hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is a bit long\")", "clsname = type(val).__name__ assert hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\"", "from .common import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr =", "== H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is", "\"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"),", "](\"hello t...\") assert hshort(\"hello this is a bit long\", string_cutoff=5)", "== H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"](", "assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is a bit", "\"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")),", "def test_hrepr_multiarg(): assert hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), )", ") def test_recursive(): li = [1] li.append(li) assert hrepr(li) ==", "import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass", "\"hello this is a bit long\" ) assert hshort(\"hello this", ") assert hshort(\"hello this is a bit long\") == H.span[\"hreprt-str\"](", "H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1,", "H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def test_short_structures(): for val, o, c", "): clsname = typ.__name__ assert hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\",", "== H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess(): assert hrepr(1, postprocess=lambda x,", "a bit long\") == H.span[\"hreprt-str\"]( \"hello this is a bit", "H.span[ \"hreprt-str\" ](\"hello t...\") assert hshort(\"hello this is a bit", ") assert hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ](", "H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert", "= real_hrepr.variant(fill_resources=False) @dataclass class Point: x: int y: int class", ") def test_structures(): for typ, o, c in ( (tuple,", "\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ),", "real_hrepr.variant(fill_resources=False) @dataclass class Point: x: int y: int class Opaque:", "H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), )", "hrepr(tg) == tg def test_multiref(): li = [1, 2] lili", "2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert hrepr(1,", "@one_test_per_assert def test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") ==", "\"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"),", "H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"),", "test_as_page(): utf8 = H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert", "== H.span[ \"hreprt-str\" ](\"hello this is a bit long\") @one_test_per_assert", "\"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div( H.div[\"hrepr-refbox\"](", "assert hrepr(1, preprocess=lambda x, hrepr: x + 1) == H.span[\"hreprt-int\"](", "== H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\")", "long\") @one_test_per_assert def test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\")", "\"hreprt-str\" ](\"he...\") assert hshort(\"hello this is a bit long\", string_cutoff=10000)", "def test_multiref(): li = [1, 2] lili = [li, li]", "this is a bit long\", string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello", "hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\",", "), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"),", "H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ) def", ") ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "H.div[\"hrepr-close\"](c), ) def test_short_structures(): for val, o, c in (", "= H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\" ) assert real_hrepr.page(1) ==", "1)), ), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li = [1] li.append(li)", "\"}\"), ({\"x\": 1, \"y\": 2}, \"{\", \"}\"), ): clsname =", "**kw): return hrepr(x, max_depth=0, **kw) @one_test_per_assert def test_singletons(): assert hrepr(True)", "this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def", "== H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")),", "2], \"[\", \"]\"), ({1, 2}, \"{\", \"}\"), (frozenset({1, 2}), \"{\",", "assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def", "dataclass from hrepr import H from hrepr import hrepr as", "== H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this is a bit", "(tuple, \"(\", \")\"), (list, \"[\", \"]\"), (set, \"{\", \"}\"), (frozenset,", "import H from hrepr import hrepr as real_hrepr from hrepr.h", "from hrepr import H from hrepr import hrepr as real_hrepr", "H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ),", "string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello this is a", "@one_test_per_assert def test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) ==", ") ), ), H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li, shortrefs=True) ==", "real_hrepr from hrepr.h import styledir from .common import one_test_per_assert css_hrepr", "assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello", "== H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this is a bit long\")", "H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li = [1] li.append(li) assert hrepr(li)", "is a bit long\") == H.span[\"hreprt-str\"]( \"hello this is a", "H.div[\"hrepr-close\"](c), ) def test_dict(): pt = {\"x\": 1, \"y\": 2}", "2) assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr(", "hrepr(1, preprocess=lambda x, hrepr: x + 1) == H.span[\"hreprt-int\"]( \"2\"", "long\", string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello this is", "== H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), )", "b...\" ) assert hshort(\"hello this is a bit long\", string_cutoff=10)", "is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello", "\"2\" ) def test_postprocess(): assert hrepr(1, postprocess=lambda x, obj, hrepr:", "H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")),", "1)), ), H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported(): assert hshort(Opaque()) ==", "H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"),", "y: int class Opaque: pass def hshort(x, **kw): return hrepr(x,", "hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def test_string():", "test_short_structures(): for val, o, c in ( ((1, 2), \"(\",", "= [1] li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"),", "pt = {\"x\": 1, \"y\": 2} assert hrepr(pt) == H.div[\"hreprt-dict\",", "html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert hrepr(1, 2)", "H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert hrepr(1, preprocess=lambda x, hrepr:", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\",", ") assert hshort(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"](", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\",", "H.div[ \"hreprt-list\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\",", "), ) ), ), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True) ==", "H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hshort(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert hrepr(b\"hello this is a", "H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def", "f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c), ) def test_dict():", "test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), )", "li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"](", "\"[\", \"]\"), (set, \"{\", \"}\"), (frozenset, \"{\", \"}\"), ): clsname", "), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ) def test_recursive(): li =", "H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\",", "this is a bit long\", string_cutoff=10000) == H.span[ \"hreprt-str\" ](\"hello", ") def test_as_page(): utf8 = H.meta( {\"http-equiv\": \"Content-type\"}, content=\"text/html\", charset=\"UTF-8\"", "H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), ) assert hrepr(pt, max_depth=0) ==", "f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c),", "def test_numbers(): assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\")", "hshort(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" )", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"), )", "), ), ) assert hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\",", "H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ),", "]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def", "\"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr(", "= open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass class Point: x:", "H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ),", "assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"](", "H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](c), ) def test_short_structures():", "hrepr import H from hrepr import hrepr as real_hrepr from", "H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for typ, o, c in (", "one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr = real_hrepr.variant(fill_resources=False) @dataclass class", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"](", "int class Opaque: pass def hshort(x, **kw): return hrepr(x, max_depth=0,", "styledir from .common import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\", encoding=\"utf-8\").read() hrepr", ") def test_dataclass(): pt = Point(1, 2) assert hrepr(pt) ==", "c in ( ((1, 2), \"(\", \")\"), ([1, 2], \"[\",", "spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this is a bit", "a bit long\", string_cutoff=10) == H.span[ \"hreprt-str\" ](\"hello t...\") assert", "= [li, li] assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\",", "\"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert hshort(b\"hello this is a bit long\") ==", "\"}\"), ): clsname = type(val).__name__ assert hrepr(val, max_depth=0) == H.div[", "H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg) == tg def test_multiref(): li", "assert hrepr(123) == H.span[\"hreprt-int\"](\"123\") assert hrepr(1.25) == H.span[\"hreprt-float\"](\"1.25\") @one_test_per_assert def", "hrepr(x, max_depth=0, **kw) @one_test_per_assert def test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\")", "from hrepr import hrepr as real_hrepr from hrepr.h import styledir", "assert real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), )", "== tg def test_multiref(): li = [1, 2] lili =", "as real_hrepr from hrepr.h import styledir from .common import one_test_per_assert", "= [1, 2] lili = [li, li] assert hrepr(lili) ==", "Point(1, 2) assert hrepr(pt) == H.div[\"hreprt-Point\", \"hrepr-instance\", \"hreprl-v\"]( H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"](", "\")\"), ([1, 2], \"[\", \"]\"), ({1, 2}, \"{\", \"}\"), (frozenset({1,", ") def test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\"", "), H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\"", "H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ), H.div(H.span[\"hrepr-ref\"](\"#\", 1)), ),", "), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\",", "({1, 2}, \"{\", \"}\"), (frozenset({1, 2}), \"{\", \"}\"), ({\"x\": 1,", "H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert hrepr(1, preprocess=lambda x,", "H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\" ) def test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"),", "H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"),", "== H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-str\"](\"x\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"1\")),", "assert hrepr(\"hello this is a bit long\") == H.span[\"hreprt-str\"]( \"hello", "\"(\", \")\"), (list, \"[\", \"]\"), (set, \"{\", \"}\"), (frozenset, \"{\",", "test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"]( \"<\", \"tests.test_hrepr.Opaque\", \">\" ) def", "H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")),", "H.div[\"hrepr-close\"](\"]\"), ) assert hrepr(lili, shortrefs=True) == H.div[ \"hreprt-list\", \"hrepr-bracketed\" ](", "hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\",", "this is a bit long\", string_cutoff=5) == H.span[ \"hreprt-str\" ](\"he...\")", "), H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported(): assert hshort(Opaque()) == H.span[\"hreprt-Opaque\"](", "H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert hrepr(1, preprocess=lambda x, hrepr: x", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"),", "== H.span[\"hreprt-str\"]( \"hello this is a bit long\" ) assert", "bit long\") @one_test_per_assert def test_bytes(): assert hrepr(b\"hello\") == H.span[\"hreprt-bytes\"](\"68656c6c6f\") assert", "\"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg", "\"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\",", "hshort(\"hello this is a bit long\", string_cutoff=10) == H.span[ \"hreprt-str\"", "hshort(\"hello this is a bit long\") == H.span[\"hreprt-str\"]( \"hello this", "H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported(): assert", "H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"⟳\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"](", ") def test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \",", "H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def test_hrepr_multiarg(): assert hrepr(1, 2) == H.inline(", "\" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for typ, o, c", "hrepr.h import styledir from .common import one_test_per_assert css_hrepr = open(f\"{styledir}/hrepr.css\",", "== H.span[ \"hreprt-str\" ](\"he...\") assert hshort(\"hello this is a bit", "\"}\"), (frozenset, \"{\", \"}\"), ): clsname = typ.__name__ assert hrepr(typ((1,", "), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt = Point(1, 2) assert", "max_depth=0, **kw) @one_test_per_assert def test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert", "spaces\") assert hrepr(\"hello this is a bit long\") == H.span[\"hreprt-str\"](", "1, \"y\": 2} assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"](", "assert hshort(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686...\"", "hrepr: x + 1) == H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess():", "assert hrepr(pt, max_depth=0) == H.div[ \"hreprt-Point\", \"hrepr-instance\", \"hreprl-s\" ]( H.div[\"hrepr-title\"](\"Point\"),", "[li, li] assert hrepr(lili) == H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"](", "def test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"),", "== H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"](", "H.div[\"hrepr-title\"](\"Point\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), ) def test_tag(): tg = H.span[\"hello\"](1, 2,", "H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div( H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1, \"=\"),", "H.span[\"hrepr-defn-key\"](\"class\"), \" \", H.span[\"hrepr-defn-name\"](\"Opaque\"), ) def test_structures(): for typ, o,", "assert hrepr(\"3 spaces\") == H.span[\"hreprt-str\"](\"3 spaces\") assert hrepr(\"hello this is", "H.div[\"hrepr-close\"](\"]\"), ), ) assert hrepr(li, shortrefs=True) == H.div[\"hrepr-refbox\"]( H.span[\"hrepr-ref\"](\"#\", 1,", "1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ),", "hrepr as real_hrepr from hrepr.h import styledir from .common import", "real_hrepr.page(1) == H.inline( H.raw(\"<!DOCTYPE html>\"), H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),), ) def", "max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")), H.div[\"hrepr-close\"](c),", "assert hrepr(typ((1, 2))) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-h\",", "tg = H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg) == tg def", "= {\"x\": 1, \"y\": 2} assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"](", "hrepr(val, max_depth=0) == H.div[ f\"hreprt-{clsname}\", \"hrepr-bracketed\" ]( H.div[\"hrepr-open\"](o), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\")),", "H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-h\", \"hrepr-body\"]( H.div(H.span[\"hreprt-int\"](\"1\")), H.div(H.span[\"hreprt-int\"](\"2\")), ), H.div[\"hrepr-close\"](\"]\"), ), ) ),", "**kw) @one_test_per_assert def test_singletons(): assert hrepr(True) == H.span[\"hreprv-True\"](\"True\") assert hrepr(False)", "\"[\", \"]\"), ({1, 2}, \"{\", \"}\"), (frozenset({1, 2}), \"{\", \"}\"),", "H.div(H.span[\"hrepr-ref\"](\"⟳\", 1)), ), H.div[\"hrepr-close\"](\"]\"), ), ) def test_unsupported(): assert hshort(Opaque())", "H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"), ) def test_dataclass(): pt", "hrepr(b\"hello this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" )", "= H.span[\"hello\"](1, 2, H.b(\"there\")) assert hrepr(tg) == tg def test_multiref():", "def test_preprocess(): assert hrepr(1, preprocess=lambda x, hrepr: x + 1)", "\"}\"), ): clsname = typ.__name__ assert hrepr(typ((1, 2))) == H.div[", ") def test_postprocess(): assert hrepr(1, postprocess=lambda x, obj, hrepr: x[\"newclass\"])", "H.span[\"hreprv-True\"](\"True\") assert hrepr(False) == H.span[\"hreprv-False\"](\"False\") assert hrepr(None) == H.span[\"hreprv-None\"](\"None\") @one_test_per_assert", "\"y\": 2}, \"{\", \"}\"), ): clsname = type(val).__name__ assert hrepr(val,", "o, c in ( ((1, 2), \"(\", \")\"), ([1, 2],", "\"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), ) ), ), H.div[\"hrepr-close\"](\"]\"),", "def test_recursive(): li = [1] li.append(li) assert hrepr(li) == H.div[\"hrepr-refbox\"](", "x + 1) == H.span[\"hreprt-int\"]( \"2\" ) def test_postprocess(): assert", "( ((1, 2), \"(\", \")\"), ([1, 2], \"[\", \"]\"), ({1,", "1, \"=\"), H.div[\"hreprt-list\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"[\"), H.div[\"hreprl-s\", \"hrepr-body\"](H.div(\"...\"),), H.div[\"hrepr-close\"](\"]\"), ), )", "\"68656c6c6f2074686...\" ) def test_function(): assert hrepr(Opaque) == H.span[\"hreprk-class\"]( H.span[\"hrepr-defn-key\"](\"class\"), \"", "\"y\": 2} assert hrepr(pt) == H.div[\"hreprt-dict\", \"hrepr-bracketed\"]( H.div[\"hrepr-open\"](\"{\"), H.table[\"hrepr-body\"]( H.tr(", "H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-str\"](\"y\")), H.td[\"hrepr-delim\"](\": \"), H.td(H.span[\"hreprt-int\"](\"2\")), ), ), H.div[\"hrepr-close\"](\"}\"),", "postprocess=lambda x, obj, hrepr: x[\"newclass\"]) == H.span[ \"newclass\", \"hreprt-int\" ](\"1\")", "this is a bit long\") == H.span[\"hreprt-bytes\"]( \"68656c6c6f2074686973206973206120626974206c6f6e67\" ) assert", "hrepr(1, 2) == H.inline( H.span[\"hreprt-int\"](\"1\"), H.span[\"hreprt-int\"](\"2\"), ) def test_preprocess(): assert", "H.div[\"hrepr-title\"](\"Point\"), H.table[\"hrepr-body\"]( H.tr( H.td(H.span[\"hreprt-symbol\"](\"x\")), H.td[\"hrepr-delim\"](\"=\"), H.td(H.span[\"hreprt-int\"](\"1\")), ), H.tr( H.td(H.span[\"hreprt-symbol\"](\"y\")), H.td[\"hrepr-delim\"](\"=\"),", "def hshort(x, **kw): return hrepr(x, max_depth=0, **kw) @one_test_per_assert def test_singletons():" ]
[ "assumptions): return self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from sympy.core.relational import", "inference. \"\"\" res, _res = None, None mro = inspect.getmro(type(expr))", "import Eq, Ne if self.func.name in ['is_true', 'is_false']: i =", "documentation for advanced usage. Examples ======== >>> from sympy import", "None: # since first resolutor was conclusive, we keep that", "is None # Try to check for higher classes if", "contexts. It is basically a thin wrapper to Python's set,", "from sympy.core.singleton import S from sympy.core.sympify import _sympify from sympy.logic.boolalg", "Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__", "res = eval_(expr, assumptions) # Do not stop if value", "obj = Boolean.__new__(cls) obj.name = name obj.handlers = handlers or", "for advanced usage. Examples ======== >>> from sympy import Q", "Q, Symbol >>> x = Symbol('x') >>> Q.integer(x) Q.integer(x) >>>", "is_Atom = True def __new__(cls, name, handlers=None): obj = Boolean.__new__(cls)", "def __call__(self, expr): return AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler)", "global assumptions, but you can also use this class to", "class of expressions resulting from applying a Predicate. Examples ========", "(2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other): if type(other)", "type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>>", "your own local assumptions contexts. It is basically a thin", "not attempt to decompose this @property def arg(self): \"\"\" Return", "mro = inspect.getmro(type(expr)) for handler in self.handlers: cls = get_class(handler)", "None mro = inspect.getmro(type(expr)) for handler in self.handlers: cls =", "is None: _res = res elif res is None: #", "@cacheit def sort_key(self, order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(),", "if not self: return \"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self))", "decompose this @property def arg(self): \"\"\" Return the expression used", "printer): if not self: return \"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__,", "function that returns a boolean value. Predicates merely wrap their", "is None: continue res = eval_(expr, assumptions) # Do not", "sympy.core.cache import cacheit from sympy.core.singleton import S from sympy.core.sympify import", "S from sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean from", "get_class(handler) for subclass in mro: eval_ = getattr(cls, subclass.__name__, None)", "def func(self): return self._args[0] @cacheit def sort_key(self, order=None): return (self.class_key(),", "resolutors have concluded if _res != res: raise ValueError('incompatible resolutors')", "\"\"\" res, _res = None, None mro = inspect.getmro(type(expr)) for", "merely wrap their argument and remain unevaluated: >>> from sympy", "concluded if _res != res: raise ValueError('incompatible resolutors') break return", "from contextlib import contextmanager class AssumptionsContext(set): \"\"\"Set representing assumptions. This", "binary_symbols(self): from sympy.core.relational import Eq, Ne if self.func.name in ['is_true',", "+ y))) None >>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x +", "representing assumptions. This is used to represent global assumptions, but", ">>> ask(Q.prime(7)) True The tautological predicate ``Q.is_true`` can be used", "sympy.core.singleton import S from sympy.core.sympify import _sympify from sympy.logic.boolalg import", "printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class of expressions", "def assuming(*assumptions): \"\"\" Context manager for assumptions Examples ======== >>>", "import get_class from contextlib import contextmanager class AssumptionsContext(set): \"\"\"Set representing", "import Q >>> from sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext()", "self.func.name in ['is_true', 'is_false']: i = self.arg if i.is_Boolean or", "*assumptions): \"\"\"Add an assumption.\"\"\" for a in assumptions: super().add(a) def", "(self.name,) def __call__(self, expr): return AppliedPredicate(self, expr) def add_handler(self, handler):", "return self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from sympy.core.relational import Eq,", "import Boolean from sympy.utilities.source import get_class from contextlib import contextmanager", "sympy.utilities.source import get_class from contextlib import contextmanager class AssumptionsContext(set): \"\"\"Set", "======== >>> from sympy import Q, Symbol >>> x =", "given assumptions. This uses only direct resolution methods, not logical", "is None: continue if _res is None: _res = res", "function ``ask``: >>> ask(Q.prime(7)) True The tautological predicate ``Q.is_true`` can", "= res elif res is None: # since first resolutor", "predicate is a function that returns a boolean value. Predicates", "_sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom = True # do", "@property def args(self): return self._args[1:] @property def func(self): return self._args[0]", "return AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler):", "name, handlers=None): obj = Boolean.__new__(cls) obj.name = name obj.handlers =", "but you can also use this class to create your", "expr): return AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self,", "+ 1 \"\"\" return self._args[1] @property def args(self): return self._args[1:]", "res, _res = None, None mro = inspect.getmro(type(expr)) for handler", "from sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean from sympy.utilities.source", "(self.name,)), S.One.sort_key(), S.One def eval(self, expr, assumptions=True): \"\"\" Evaluate self(expr)", "if eval_ is None: continue res = eval_(expr, assumptions) #", "# only check consistency if both resolutors have concluded if", "obtain the truth value of an expression containing predicates, use", ">>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\" def add(self,", "\"\"\"A predicate is a function that returns a boolean value.", "\"\"\" is_Atom = True def __new__(cls, name, handlers=None): obj =", "Q, Symbol >>> x = Symbol('x') >>> a = Q.integer(x", "have concluded if _res != res: raise ValueError('incompatible resolutors') break", "class AppliedPredicate(Boolean): \"\"\"The class of expressions resulting from applying a", "sympy import Q, Symbol >>> x = Symbol('x') >>> a", "tautological predicate ``Q.is_true`` can be used to wrap other objects:", "'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = () def __new__(cls, predicate, arg): arg", "assumptions. This uses only direct resolution methods, not logical inference.", "> 1) \"\"\" is_Atom = True def __new__(cls, name, handlers=None):", "uses only direct resolution methods, not logical inference. \"\"\" res,", "for subclass in mro: eval_ = getattr(cls, subclass.__name__, None) if", "create your own local assumptions contexts. It is basically a", "\"\"\" Evaluate self(expr) under the given assumptions. This uses only", "Q, ask >>> from sympy.abc import x, y >>> print(ask(Q.integer(x", "assumptions) @property def binary_symbols(self): from sympy.core.relational import Eq, Ne if", "args(self): return self._args[1:] @property def func(self): return self._args[0] @cacheit def", "y))) True \"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield finally:", "for higher classes if res is None: continue if _res", "logical inference. \"\"\" res, _res = None, None mro =", "and remain unevaluated: >>> from sympy import Q, ask >>>", "sympy.assumptions import assuming, Q, ask >>> from sympy.abc import x,", "type(other) is AppliedPredicate: return self._args == other._args return False def", "Q.is_true(x > 1) Q.is_true(x > 1) \"\"\" is_Atom = True", "to Python's set, so see its documentation for advanced usage.", "since first resolutor was conclusive, we keep that value res", "sort_key(self, order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def", "return (self.name,) def __call__(self, expr): return AppliedPredicate(self, expr) def add_handler(self,", "you can also use this class to create your own", "sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean from sympy.utilities.source import", "sympy.core.relational import Eq, Ne if self.func.name in ['is_true', 'is_false']: i", ">>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y))) True \"\"\"", "Q.prime(7) >>> _.func.name 'prime' To obtain the truth value of", "res @contextmanager def assuming(*assumptions): \"\"\" Context manager for assumptions Examples", "cls = get_class(handler) for subclass in mro: eval_ = getattr(cls,", "None >>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y))) True", "raise ValueError('incompatible resolutors') break return res @contextmanager def assuming(*assumptions): \"\"\"", "+ 1) >>> a.arg x + 1 \"\"\" return self._args[1]", "\"\"\"Add an assumption.\"\"\" for a in assumptions: super().add(a) def _sympystr(self,", "return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other):", "def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return self.class_key(),", "= inspect.getmro(type(expr)) for handler in self.handlers: cls = get_class(handler) for", "== other._args return False def __hash__(self): return super().__hash__() def _eval_ask(self,", "x = Symbol('x') >>> a = Q.integer(x + 1) >>>", "Do not stop if value returned is None # Try", "arg(self): \"\"\" Return the expression used by this assumption. Examples", "check consistency if both resolutors have concluded if _res !=", "sympy.abc import x >>> Q.is_true(x > 1) Q.is_true(x > 1)", "Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To obtain", "i.binary_symbols return set() class Predicate(Boolean): \"\"\"A predicate is a function", "= eval_(expr, assumptions) # Do not stop if value returned", "_.func.name 'prime' To obtain the truth value of an expression", "res = _res else: # only check consistency if both", "stop if value returned is None # Try to check", "self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from sympy.core.relational import Eq, Ne", "so see its documentation for advanced usage. Examples ======== >>>", "_sympystr(self, printer): if not self: return \"%s()\" % self.__class__.__name__ return", "if res is None: continue if _res is None: _res", "from sympy.abc import x, y >>> print(ask(Q.integer(x + y))) None", "Predicate(Boolean): \"\"\"A predicate is a function that returns a boolean", "only check consistency if both resolutors have concluded if _res", ">>> global_assumptions AssumptionsContext() >>> from sympy.abc import x >>> global_assumptions.add(Q.real(x))", "resolutors') break return res @contextmanager def assuming(*assumptions): \"\"\" Context manager", "def args(self): return self._args[1:] @property def func(self): return self._args[0] @cacheit", "AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\" def", "name obj.handlers = handlers or [] return obj def _hashable_content(self):", "S.One.sort_key(), S.One) def __eq__(self, other): if type(other) is AppliedPredicate: return", "to check for higher classes if res is None: continue", "remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return self.class_key(), (1,", "= getattr(cls, subclass.__name__, None) if eval_ is None: continue res", ">>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\" def add(self, *assumptions): \"\"\"Add", "import _sympify from sympy.logic.boolalg import Boolean from sympy.utilities.source import get_class", "attempt to decompose this @property def arg(self): \"\"\" Return the", "import S from sympy.core.sympify import _sympify from sympy.logic.boolalg import Boolean", "direct resolution methods, not logical inference. \"\"\" res, _res =", "ValueError('incompatible resolutors') break return res @contextmanager def assuming(*assumptions): \"\"\" Context", "from sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext() >>> from sympy.abc", "assumptions, but you can also use this class to create", "self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other): if type(other) is AppliedPredicate:", "Q >>> from sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext() >>>", "return (self.name,) def __getnewargs__(self): return (self.name,) def __call__(self, expr): return", "x + 1 \"\"\" return self._args[1] @property def args(self): return", "!= res: raise ValueError('incompatible resolutors') break return res @contextmanager def", "add(self, *assumptions): \"\"\"Add an assumption.\"\"\" for a in assumptions: super().add(a)", "print(ask(Q.integer(x + y))) True \"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try:", "the function ``ask``: >>> ask(Q.prime(7)) True The tautological predicate ``Q.is_true``", "arg) is_Atom = True # do not attempt to decompose", "assuming(*assumptions): \"\"\" Context manager for assumptions Examples ======== >>> from", "other objects: >>> from sympy.abc import x >>> Q.is_true(x >", "of expressions resulting from applying a Predicate. Examples ======== >>>", "Python's set, so see its documentation for advanced usage. Examples", "its documentation for advanced usage. Examples ======== >>> from sympy", "True # do not attempt to decompose this @property def", "S.One def eval(self, expr, assumptions=True): \"\"\" Evaluate self(expr) under the", "or i.is_Symbol or isinstance(i, (Eq, Ne)): return i.binary_symbols return set()", "return res @contextmanager def assuming(*assumptions): \"\"\" Context manager for assumptions", "from sympy import Q >>> from sympy.assumptions.assume import global_assumptions >>>", "@contextmanager def assuming(*assumptions): \"\"\" Context manager for assumptions Examples ========", "use this class to create your own local assumptions contexts.", "None) if eval_ is None: continue res = eval_(expr, assumptions)", "= Q.integer(x + 1) >>> a.arg x + 1 \"\"\"", "local assumptions contexts. It is basically a thin wrapper to", "return \"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext()", "get_class from contextlib import contextmanager class AssumptionsContext(set): \"\"\"Set representing assumptions.", "global_assumptions AssumptionsContext() >>> from sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>>", "also use this class to create your own local assumptions", "= Boolean.__new__(cls) obj.name = name obj.handlers = handlers or []", "class AssumptionsContext(set): \"\"\"Set representing assumptions. This is used to represent", "AssumptionsContext() >>> global_assumptions.clear() \"\"\" def add(self, *assumptions): \"\"\"Add an assumption.\"\"\"", "(self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other): if", "'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime'", "Q.is_true(x > 1) \"\"\" is_Atom = True def __new__(cls, name,", "======== >>> from sympy import Q >>> from sympy.assumptions.assume import", "applying a Predicate. Examples ======== >>> from sympy import Q,", "handler in self.handlers: cls = get_class(handler) for subclass in mro:", "to decompose this @property def arg(self): \"\"\" Return the expression", "To obtain the truth value of an expression containing predicates,", "manager for assumptions Examples ======== >>> from sympy.assumptions import assuming,", "for assumptions Examples ======== >>> from sympy.assumptions import assuming, Q,", "expression containing predicates, use the function ``ask``: >>> ask(Q.prime(7)) True", "None # Try to check for higher classes if res", "if _res != res: raise ValueError('incompatible resolutors') break return res", "remain unevaluated: >>> from sympy import Q, ask >>> type(Q.prime)", "global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>>", "contextlib import contextmanager class AssumptionsContext(set): \"\"\"Set representing assumptions. This is", "Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To obtain the truth value", "used to represent global assumptions, but you can also use", "def add(self, *assumptions): \"\"\"Add an assumption.\"\"\" for a in assumptions:", "wrap other objects: >>> from sympy.abc import x >>> Q.is_true(x", "this class to create your own local assumptions contexts. It", "advanced usage. Examples ======== >>> from sympy import Q >>>", "i = self.arg if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq,", "from sympy import Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>>", "obj def _hashable_content(self): return (self.name,) def __getnewargs__(self): return (self.name,) def", "Q.integer(y)): ... print(ask(Q.integer(x + y))) True \"\"\" old_global_assumptions = global_assumptions.copy()", "_hashable_content(self): return (self.name,) def __getnewargs__(self): return (self.name,) def __call__(self, expr):", "def __hash__(self): return super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions)", "value returned is None # Try to check for higher", "return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class", "_eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from sympy.core.relational", "thin wrapper to Python's set, so see its documentation for", "> 1) Q.is_true(x > 1) \"\"\" is_Atom = True def", "======== >>> from sympy.assumptions import assuming, Q, ask >>> from", "from sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>>", "resolution methods, not logical inference. \"\"\" res, _res = None,", "self(expr) under the given assumptions. This uses only direct resolution", "a in assumptions: super().add(a) def _sympystr(self, printer): if not self:", "predicates, use the function ``ask``: >>> ask(Q.prime(7)) True The tautological", "= self.arg if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)):", ">>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear()", "classes if res is None: continue if _res is None:", "Symbol >>> x = Symbol('x') >>> a = Q.integer(x +", "self: return \"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions =", "return obj def _hashable_content(self): return (self.name,) def __getnewargs__(self): return (self.name,)", "__hash__(self): return super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property", "for a in assumptions: super().add(a) def _sympystr(self, printer): if not", "subclass in mro: eval_ = getattr(cls, subclass.__name__, None) if eval_", "\"\"\"The class of expressions resulting from applying a Predicate. Examples", "getattr(cls, subclass.__name__, None) if eval_ is None: continue res =", "\"\"\" return self._args[1] @property def args(self): return self._args[1:] @property def", "1) >>> a.arg x + 1 \"\"\" return self._args[1] @property", "import Q, Symbol >>> x = Symbol('x') >>> Q.integer(x) Q.integer(x)", "resulting from applying a Predicate. Examples ======== >>> from sympy", "a boolean value. Predicates merely wrap their argument and remain", "return i.binary_symbols return set() class Predicate(Boolean): \"\"\"A predicate is a", "= name obj.handlers = handlers or [] return obj def", "conclusive, we keep that value res = _res else: #", "used to wrap other objects: >>> from sympy.abc import x", "sympy import Q, Symbol >>> x = Symbol('x') >>> Q.integer(x)", "res is None: continue if _res is None: _res =", "global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\" def add(self, *assumptions):", "None: continue res = eval_(expr, assumptions) # Do not stop", "is a function that returns a boolean value. Predicates merely", "\"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class", "handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return self.class_key(), (1, (self.name,)),", "this assumption. Examples ======== >>> from sympy import Q, Symbol", "order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self,", "Predicate. Examples ======== >>> from sympy import Q, Symbol >>>", "returns a boolean value. Predicates merely wrap their argument and", "ask >>> from sympy.abc import x, y >>> print(ask(Q.integer(x +", ">>> from sympy.abc import x, y >>> print(ask(Q.integer(x + y)))", "in mro: eval_ = getattr(cls, subclass.__name__, None) if eval_ is", "1) Q.is_true(x > 1) \"\"\" is_Atom = True def __new__(cls,", ">>> _.func.name 'prime' To obtain the truth value of an", "\"\"\" def add(self, *assumptions): \"\"\"Add an assumption.\"\"\" for a in", "methods, not logical inference. \"\"\" res, _res = None, None", "None: _res = res elif res is None: # since", "self.arg if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)): return", "predicate ``Q.is_true`` can be used to wrap other objects: >>>", ">>> from sympy import Q >>> from sympy.assumptions.assume import global_assumptions", "@property def binary_symbols(self): from sympy.core.relational import Eq, Ne if self.func.name", "\"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class of", "from sympy.logic.boolalg import Boolean from sympy.utilities.source import get_class from contextlib", "global_assumptions >>> global_assumptions AssumptionsContext() >>> from sympy.abc import x >>>", "res elif res is None: # since first resolutor was", "a thin wrapper to Python's set, so see its documentation", "i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)): return i.binary_symbols return", "in ['is_true', 'is_false']: i = self.arg if i.is_Boolean or i.is_Symbol", "= get_class(handler) for subclass in mro: eval_ = getattr(cls, subclass.__name__,", "use the function ``ask``: >>> ask(Q.prime(7)) True The tautological predicate", "of an expression containing predicates, use the function ``ask``: >>>", "eval_(expr, assumptions) # Do not stop if value returned is", "mro: eval_ = getattr(cls, subclass.__name__, None) if eval_ is None:", "x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions", "import assuming, Q, ask >>> from sympy.abc import x, y", "y))) None >>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y)))", "from sympy.abc import x >>> Q.is_true(x > 1) Q.is_true(x >", "returned is None # Try to check for higher classes", "assuming, Q, ask >>> from sympy.abc import x, y >>>", ">>> x = Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class", "is AppliedPredicate: return self._args == other._args return False def __hash__(self):", "handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None):", "return self._args == other._args return False def __hash__(self): return super().__hash__()", "() def __new__(cls, predicate, arg): arg = _sympify(arg) return Boolean.__new__(cls,", "Boolean from sympy.utilities.source import get_class from contextlib import contextmanager class", "def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property def binary_symbols(self): from", "is_Atom = True # do not attempt to decompose this", "for handler in self.handlers: cls = get_class(handler) for subclass in", "def __new__(cls, predicate, arg): arg = _sympify(arg) return Boolean.__new__(cls, predicate,", "def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def", "do not attempt to decompose this @property def arg(self): \"\"\"", "None: continue if _res is None: _res = res elif", "\"\"\" Context manager for assumptions Examples ======== >>> from sympy.assumptions", "sympy import Q >>> from sympy.assumptions.assume import global_assumptions >>> global_assumptions", "Boolean.__new__(cls, predicate, arg) is_Atom = True # do not attempt", "that returns a boolean value. Predicates merely wrap their argument", "from sympy.core.cache import cacheit from sympy.core.singleton import S from sympy.core.sympify", "under the given assumptions. This uses only direct resolution methods,", "assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y))) True \"\"\" old_global_assumptions =", "assumption.\"\"\" for a in assumptions: super().add(a) def _sympystr(self, printer): if", "other): if type(other) is AppliedPredicate: return self._args == other._args return", "add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self,", "Try to check for higher classes if res is None:", "assumptions Examples ======== >>> from sympy.assumptions import assuming, Q, ask", "Symbol('x') >>> a = Q.integer(x + 1) >>> a.arg x", "_res != res: raise ValueError('incompatible resolutors') break return res @contextmanager", "represent global assumptions, but you can also use this class", "elif res is None: # since first resolutor was conclusive,", "type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = () def __new__(cls, predicate,", "= True # do not attempt to decompose this @property", "if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)): return i.binary_symbols", "or isinstance(i, (Eq, Ne)): return i.binary_symbols return set() class Predicate(Boolean):", "boolean value. Predicates merely wrap their argument and remain unevaluated:", "assumptions) # Do not stop if value returned is None", "Return the expression used by this assumption. Examples ======== >>>", ">>> from sympy import Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'>", "order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def eval(self, expr,", "'is_false']: i = self.arg if i.is_Boolean or i.is_Symbol or isinstance(i,", "super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property def binary_symbols(self):", "True def __new__(cls, name, handlers=None): obj = Boolean.__new__(cls) obj.name =", "It is basically a thin wrapper to Python's set, so", "import Q, Symbol >>> x = Symbol('x') >>> a =", "sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext() >>> from sympy.abc import", ">>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To", "def _hashable_content(self): return (self.name,) def __getnewargs__(self): return (self.name,) def __call__(self,", "= _res else: # only check consistency if both resolutors", "\"\"\"Set representing assumptions. This is used to represent global assumptions,", "print(ask(Q.integer(x + y))) None >>> with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x", "return False def __hash__(self): return super().__hash__() def _eval_ask(self, assumptions): return", "Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = () def", "resolutor was conclusive, we keep that value res = _res", "was conclusive, we keep that value res = _res else:", "an assumption.\"\"\" for a in assumptions: super().add(a) def _sympystr(self, printer):", "_sympify from sympy.logic.boolalg import Boolean from sympy.utilities.source import get_class from", "the truth value of an expression containing predicates, use the", "objects: >>> from sympy.abc import x >>> Q.is_true(x > 1)", "break return res @contextmanager def assuming(*assumptions): \"\"\" Context manager for", ">>> global_assumptions.clear() \"\"\" def add(self, *assumptions): \"\"\"Add an assumption.\"\"\" for", "sort_key(self, order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def eval(self,", "first resolutor was conclusive, we keep that value res =", "@property def arg(self): \"\"\" Return the expression used by this", "return self._args[1:] @property def func(self): return self._args[0] @cacheit def sort_key(self,", "Ne)): return i.binary_symbols return set() class Predicate(Boolean): \"\"\"A predicate is", "AssumptionsContext() >>> from sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions", "self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The", "__getnewargs__(self): return (self.name,) def __call__(self, expr): return AppliedPredicate(self, expr) def", "expr) def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit", "eval(self, expr, assumptions=True): \"\"\" Evaluate self(expr) under the given assumptions.", "set, so see its documentation for advanced usage. Examples ========", "= handlers or [] return obj def _hashable_content(self): return (self.name,)", "_res is None: _res = res elif res is None:", "not logical inference. \"\"\" res, _res = None, None mro", "cacheit from sympy.core.singleton import S from sympy.core.sympify import _sympify from", "used by this assumption. Examples ======== >>> from sympy import", "arg = _sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom = True", "self._args[0] @cacheit def sort_key(self, order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())),", "handlers=None): obj = Boolean.__new__(cls) obj.name = name obj.handlers = handlers", "truth value of an expression containing predicates, use the function", "def __new__(cls, name, handlers=None): obj = Boolean.__new__(cls) obj.name = name", "1 \"\"\" return self._args[1] @property def args(self): return self._args[1:] @property", "AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler)", "with assuming(Q.integer(x), Q.integer(y)): ... print(ask(Q.integer(x + y))) True \"\"\" old_global_assumptions", ">>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To obtain the truth", "def eval(self, expr, assumptions=True): \"\"\" Evaluate self(expr) under the given", "# Do not stop if value returned is None #", "self._args == other._args return False def __hash__(self): return super().__hash__() def", "the expression used by this assumption. Examples ======== >>> from", "# since first resolutor was conclusive, we keep that value", "if type(other) is AppliedPredicate: return self._args == other._args return False", "from sympy.core.relational import Eq, Ne if self.func.name in ['is_true', 'is_false']:", ">>> from sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)})", "set() class Predicate(Boolean): \"\"\"A predicate is a function that returns", "__new__(cls, predicate, arg): arg = _sympify(arg) return Boolean.__new__(cls, predicate, arg)", "x = Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'>", "handlers or [] return obj def _hashable_content(self): return (self.name,) def", "def arg(self): \"\"\" Return the expression used by this assumption.", ">>> print(ask(Q.integer(x + y))) None >>> with assuming(Q.integer(x), Q.integer(y)): ...", "higher classes if res is None: continue if _res is", "<class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = () def __new__(cls, predicate, arg):", "assumptions. This is used to represent global assumptions, but you", ">>> from sympy.assumptions.assume import global_assumptions >>> global_assumptions AssumptionsContext() >>> from", "class Predicate(Boolean): \"\"\"A predicate is a function that returns a", ">>> from sympy.abc import x >>> Q.is_true(x > 1) Q.is_true(x", "def __eq__(self, other): if type(other) is AppliedPredicate: return self._args ==", "False def __hash__(self): return super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg,", "i.is_Symbol or isinstance(i, (Eq, Ne)): return i.binary_symbols return set() class", "assumptions contexts. It is basically a thin wrapper to Python's", "predicate, arg): arg = _sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom", "y >>> print(ask(Q.integer(x + y))) None >>> with assuming(Q.integer(x), Q.integer(y)):", "isinstance(i, (Eq, Ne)): return i.binary_symbols return set() class Predicate(Boolean): \"\"\"A", "assumption. Examples ======== >>> from sympy import Q, Symbol >>>", "that value res = _res else: # only check consistency", "\"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield finally: global_assumptions.clear() global_assumptions.update(old_global_assumptions)", "self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(),", "(Eq, Ne)): return i.binary_symbols return set() class Predicate(Boolean): \"\"\"A predicate", "(1, (self.name,)), S.One.sort_key(), S.One def eval(self, expr, assumptions=True): \"\"\" Evaluate", "import inspect from sympy.core.cache import cacheit from sympy.core.singleton import S", "Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>>", "if both resolutors have concluded if _res != res: raise", "to create your own local assumptions contexts. It is basically", "only direct resolution methods, not logical inference. \"\"\" res, _res", "unevaluated: >>> from sympy import Q, ask >>> type(Q.prime) <class", "S.One) def __eq__(self, other): if type(other) is AppliedPredicate: return self._args", "super().add(a) def _sympystr(self, printer): if not self: return \"%s()\" %", "AppliedPredicate(Boolean): \"\"\"The class of expressions resulting from applying a Predicate.", "global_assumptions.clear() \"\"\" def add(self, *assumptions): \"\"\"Add an assumption.\"\"\" for a", "return self._args[0] @cacheit def sort_key(self, order=None): return (self.class_key(), (2, (self.func.name,", "return self._args[1] @property def args(self): return self._args[1:] @property def func(self):", "# Try to check for higher classes if res is", "= None, None mro = inspect.getmro(type(expr)) for handler in self.handlers:", "_res else: # only check consistency if both resolutors have", "return super().__hash__() def _eval_ask(self, assumptions): return self.func.eval(self.arg, assumptions) @property def", "import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>>", "else: # only check consistency if both resolutors have concluded", "consistency if both resolutors have concluded if _res != res:", "self._args[1] @property def args(self): return self._args[1:] @property def func(self): return", "sympy.abc import x >>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x))", "__call__(self, expr): return AppliedPredicate(self, expr) def add_handler(self, handler): self.handlers.append(handler) def", "in self.handlers: cls = get_class(handler) for subclass in mro: eval_", "(self.name,) def __getnewargs__(self): return (self.name,) def __call__(self, expr): return AppliedPredicate(self,", "obj.handlers = handlers or [] return obj def _hashable_content(self): return", "assumptions=True): \"\"\" Evaluate self(expr) under the given assumptions. This uses", "contextmanager class AssumptionsContext(set): \"\"\"Set representing assumptions. This is used to", "Symbol >>> x = Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x))", "sympy.abc import x, y >>> print(ask(Q.integer(x + y))) None >>>", "def binary_symbols(self): from sympy.core.relational import Eq, Ne if self.func.name in", "a Predicate. Examples ======== >>> from sympy import Q, Symbol", "__slots__ = () def __new__(cls, predicate, arg): arg = _sympify(arg)", "expression used by this assumption. Examples ======== >>> from sympy", "a = Q.integer(x + 1) >>> a.arg x + 1", "\"\"\" Return the expression used by this assumption. Examples ========", "self.handlers: cls = get_class(handler) for subclass in mro: eval_ =", "Q.integer(x + 1) >>> a.arg x + 1 \"\"\" return", "value res = _res else: # only check consistency if", "global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\"", "import cacheit from sympy.core.singleton import S from sympy.core.sympify import _sympify", ">>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7)", "containing predicates, use the function ``ask``: >>> ask(Q.prime(7)) True The", "% self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean):", "subclass.__name__, None) if eval_ is None: continue res = eval_(expr,", "_res = None, None mro = inspect.getmro(type(expr)) for handler in", "'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name 'prime' To obtain the", "@cacheit def sort_key(self, order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One", "Examples ======== >>> from sympy.assumptions import assuming, Q, ask >>>", "value. Predicates merely wrap their argument and remain unevaluated: >>>", "other._args return False def __hash__(self): return super().__hash__() def _eval_ask(self, assumptions):", "from sympy.assumptions import assuming, Q, ask >>> from sympy.abc import", "= True def __new__(cls, name, handlers=None): obj = Boolean.__new__(cls) obj.name", "Examples ======== >>> from sympy import Q, Symbol >>> x", "global_assumptions = AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class of expressions resulting", "expressions resulting from applying a Predicate. Examples ======== >>> from", "value of an expression containing predicates, use the function ``ask``:", "__new__(cls, name, handlers=None): obj = Boolean.__new__(cls) obj.name = name obj.handlers", "Boolean.__new__(cls) obj.name = name obj.handlers = handlers or [] return", "self.handlers.append(handler) def remove_handler(self, handler): self.handlers.remove(handler) @cacheit def sort_key(self, order=None): return", "sympy import Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name", "argument and remain unevaluated: >>> from sympy import Q, ask", "check for higher classes if res is None: continue if", "or [] return obj def _hashable_content(self): return (self.name,) def __getnewargs__(self):", "res: raise ValueError('incompatible resolutors') break return res @contextmanager def assuming(*assumptions):", "class to create your own local assumptions contexts. It is", "= Symbol('x') >>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\"", "res is None: # since first resolutor was conclusive, we", "be used to wrap other objects: >>> from sympy.abc import", "ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7)", "def _sympystr(self, printer): if not self: return \"%s()\" % self.__class__.__name__", "predicate, arg) is_Atom = True # do not attempt to", ">>> from sympy import Q, Symbol >>> x = Symbol('x')", "= _sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom = True #", "Ne if self.func.name in ['is_true', 'is_false']: i = self.arg if", "def sort_key(self, order=None): return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def", "continue if _res is None: _res = res elif res", "import Q, ask >>> type(Q.prime) <class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime'", "1) \"\"\" is_Atom = True def __new__(cls, name, handlers=None): obj", "wrapper to Python's set, so see its documentation for advanced", "= Symbol('x') >>> a = Q.integer(x + 1) >>> a.arg", "return set() class Predicate(Boolean): \"\"\"A predicate is a function that", "import x, y >>> print(ask(Q.integer(x + y))) None >>> with", "if value returned is None # Try to check for", "<filename>sympy/assumptions/assume.py import inspect from sympy.core.cache import cacheit from sympy.core.singleton import", "def sort_key(self, order=None): return (self.class_key(), (2, (self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One)", "from sympy.utilities.source import get_class from contextlib import contextmanager class AssumptionsContext(set):", "this @property def arg(self): \"\"\" Return the expression used by", "<class 'sympy.assumptions.assume.Predicate'> >>> Q.prime.name 'prime' >>> Q.prime(7) Q.prime(7) >>> _.func.name", "True The tautological predicate ``Q.is_true`` can be used to wrap", "x >>> Q.is_true(x > 1) Q.is_true(x > 1) \"\"\" is_Atom", "... print(ask(Q.integer(x + y))) True \"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions)", "import contextmanager class AssumptionsContext(set): \"\"\"Set representing assumptions. This is used", "inspect from sympy.core.cache import cacheit from sympy.core.singleton import S from", "assumptions: super().add(a) def _sympystr(self, printer): if not self: return \"%s()\"", "AssumptionsContext(set): \"\"\"Set representing assumptions. This is used to represent global", "S.One.sort_key(), S.One def eval(self, expr, assumptions=True): \"\"\" Evaluate self(expr) under", ">>> Q.is_true(x > 1) Q.is_true(x > 1) \"\"\" is_Atom =", "is used to represent global assumptions, but you can also", "in assumptions: super().add(a) def _sympystr(self, printer): if not self: return", "\"\"\" __slots__ = () def __new__(cls, predicate, arg): arg =", "return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def eval(self, expr, assumptions=True):", "['is_true', 'is_false']: i = self.arg if i.is_Boolean or i.is_Symbol or", "def __getnewargs__(self): return (self.name,) def __call__(self, expr): return AppliedPredicate(self, expr)", "return Boolean.__new__(cls, predicate, arg) is_Atom = True # do not", "eval_ = getattr(cls, subclass.__name__, None) if eval_ is None: continue", "if self.func.name in ['is_true', 'is_false']: i = self.arg if i.is_Boolean", "not stop if value returned is None # Try to", "keep that value res = _res else: # only check", ">>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = () def __new__(cls,", "from sympy import Q, Symbol >>> x = Symbol('x') >>>", "= () def __new__(cls, predicate, arg): arg = _sympify(arg) return", "self._args[1:] @property def func(self): return self._args[0] @cacheit def sort_key(self, order=None):", "'prime' To obtain the truth value of an expression containing", ">>> a = Q.integer(x + 1) >>> a.arg x +", "is None: # since first resolutor was conclusive, we keep", "Evaluate self(expr) under the given assumptions. This uses only direct", "= AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class of expressions resulting from", "AssumptionsContext() class AppliedPredicate(Boolean): \"\"\"The class of expressions resulting from applying", "AppliedPredicate: return self._args == other._args return False def __hash__(self): return", "if _res is None: _res = res elif res is", "self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One def eval(self, expr, assumptions=True): \"\"\"", "``Q.is_true`` can be used to wrap other objects: >>> from", "This uses only direct resolution methods, not logical inference. \"\"\"", ">>> global_assumptions.add(Q.real(x)) >>> global_assumptions AssumptionsContext({Q.real(x)}) >>> global_assumptions.remove(Q.real(x)) >>> global_assumptions AssumptionsContext()", "to wrap other objects: >>> from sympy.abc import x >>>", "by this assumption. Examples ======== >>> from sympy import Q,", "sympy.logic.boolalg import Boolean from sympy.utilities.source import get_class from contextlib import", "obj.name = name obj.handlers = handlers or [] return obj", "This is used to represent global assumptions, but you can", "``ask``: >>> ask(Q.prime(7)) True The tautological predicate ``Q.is_true`` can be", "see its documentation for advanced usage. Examples ======== >>> from", "global_assumptions AssumptionsContext() >>> global_assumptions.clear() \"\"\" def add(self, *assumptions): \"\"\"Add an", "to represent global assumptions, but you can also use this", "not self: return \"%s()\" % self.__class__.__name__ return \"{}({})\".format(self.__class__.__name__, printer._print_set(self)) global_assumptions", "[] return obj def _hashable_content(self): return (self.name,) def __getnewargs__(self): return", "eval_ is None: continue res = eval_(expr, assumptions) # Do", "basically a thin wrapper to Python's set, so see its", "continue res = eval_(expr, assumptions) # Do not stop if", "(self.func.name, self.arg.sort_key())), S.One.sort_key(), S.One) def __eq__(self, other): if type(other) is", "The tautological predicate ``Q.is_true`` can be used to wrap other", "Eq, Ne if self.func.name in ['is_true', 'is_false']: i = self.arg", "both resolutors have concluded if _res != res: raise ValueError('incompatible", "wrap their argument and remain unevaluated: >>> from sympy import", ">>> Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ =", "can also use this class to create your own local", "own local assumptions contexts. It is basically a thin wrapper", "we keep that value res = _res else: # only", "__eq__(self, other): if type(other) is AppliedPredicate: return self._args == other._args", "can be used to wrap other objects: >>> from sympy.abc", "import global_assumptions >>> global_assumptions AssumptionsContext() >>> from sympy.abc import x", "import x >>> Q.is_true(x > 1) Q.is_true(x > 1) \"\"\"", "+ y))) True \"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield", "is basically a thin wrapper to Python's set, so see", "# do not attempt to decompose this @property def arg(self):", "their argument and remain unevaluated: >>> from sympy import Q,", "ask(Q.prime(7)) True The tautological predicate ``Q.is_true`` can be used to", "Examples ======== >>> from sympy import Q >>> from sympy.assumptions.assume", "a.arg x + 1 \"\"\" return self._args[1] @property def args(self):", "inspect.getmro(type(expr)) for handler in self.handlers: cls = get_class(handler) for subclass", "func(self): return self._args[0] @cacheit def sort_key(self, order=None): return (self.class_key(), (2,", "Context manager for assumptions Examples ======== >>> from sympy.assumptions import", "arg): arg = _sympify(arg) return Boolean.__new__(cls, predicate, arg) is_Atom =", "an expression containing predicates, use the function ``ask``: >>> ask(Q.prime(7))", "expr, assumptions=True): \"\"\" Evaluate self(expr) under the given assumptions. This", ">>> a.arg x + 1 \"\"\" return self._args[1] @property def", ">>> x = Symbol('x') >>> a = Q.integer(x + 1)", "Q.integer(x) Q.integer(x) >>> type(Q.integer(x)) <class 'sympy.assumptions.assume.AppliedPredicate'> \"\"\" __slots__ = ()", "usage. Examples ======== >>> from sympy import Q >>> from", "None, None mro = inspect.getmro(type(expr)) for handler in self.handlers: cls", "x, y >>> print(ask(Q.integer(x + y))) None >>> with assuming(Q.integer(x),", "a function that returns a boolean value. Predicates merely wrap", "Predicates merely wrap their argument and remain unevaluated: >>> from", "from applying a Predicate. Examples ======== >>> from sympy import", ">>> from sympy.assumptions import assuming, Q, ask >>> from sympy.abc", "the given assumptions. This uses only direct resolution methods, not", "True \"\"\" old_global_assumptions = global_assumptions.copy() global_assumptions.update(assumptions) try: yield finally: global_assumptions.clear()", "_res = res elif res is None: # since first", "@property def func(self): return self._args[0] @cacheit def sort_key(self, order=None): return" ]
[ "self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for column in range(0, 5): dm.process_column(column,", "4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05,", "np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8,", "column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct = np.full((5, 5),", "DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for diag in", "dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct", "= np.full_like(self.dist_matrix, np.nan, dtype=float) for diag in range(-8, self.dist_matrix.shape[1], 3):", "6.26]]) def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm", "class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix = np.array([ [8.67, 1.10, 1.77,", "1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15], [9.90, 4.51, 2.11,", "self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for column in [2,", "self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def", "npt from distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix class", "dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3],", "1] expected[1, 2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for diag", "4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37,", "4.41, 7.64], [6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07,", "dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm = DistanceMatrix()", "from distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase):", "5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92], [1.01, 0.91,", "np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0,", "np from unittest import TestCase import numpy.testing as npt from", "6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71,", "diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag)", "8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01,", "5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5,", "3): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind]", "self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct = np.full((5, 5), np.nan) correct[0:4,", "self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix,", "= self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] = np.nan expected[:, -1:] =", "3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8]))", "self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1,", "6.57, 1.79, 7.40, 4.41, 7.64], [6.26, 0.29, 6.44, 8.84, 1.24,", "4:8] correct[:, 4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self):", "npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct =", "as npt from distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix", "+ 1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix,", "correct[:, 4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm", "3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20,", "= DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for column", "column in [2, 3, 4, 5, 10, 11, 12]: dm.process_column(column,", "-1:] = np.nan npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind", "9.01, 0.39, 9.], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91,", "self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for diag in range(-8,", "5, 10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] =", "3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64,", "9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14,", "dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5, 5), np.nan) expected[0, 0]", "from unittest import TestCase import numpy.testing as npt from distancematrix.util", "3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69,", "import DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix = np.array([ [8.67,", "np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct =", "6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]]) def", "0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29,", "mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm = DistanceMatrix()", "7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94], [0.94, 8.70, 3.87,", "= self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind", "1.62, 0.20, 2.28, 7.11, 2.15], [9.90, 4.51, 2.11, 2.83, 5.52,", "unittest import TestCase import numpy.testing as npt from distancematrix.util import", "np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct", "= DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for diag", "diag in range(-8, self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag,", "DistanceMatrix() self.mock_initialise(dm) for column in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))", "[1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30,", "0] = self.dist_matrix[0, 0] expected[0, 1] = self.dist_matrix[0, 1] expected[1,", "1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88,", "4.71, 3.26, 7.29, 6.26]]) def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1])", "np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5, 5), np.nan) correct[0:3, 0:4] =", "npt.assert_equal(dm.distance_matrix, correct) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:,", "TestCase import numpy.testing as npt from distancematrix.util import diag_indices_of from", "= np.full((5, 5), np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix,", "dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5), np.nan) expected[0, 0] =", "5.09], [4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53,", "dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1,", "npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct =", "5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]]) def mock_initialise(self,", "self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm =", "5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1)", "8.33, 0.38], [7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56,", ":3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5), np.nan) expected[0,", "6.94], [0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43,", ":] = np.nan expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix, expected) for", "range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))", "np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[:2, 1]", "column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6,", "6.32, 1.62, 0.20, 2.28, 7.11, 2.15], [9.90, 4.51, 2.11, 2.83,", "= np.full((5, 5), np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8] correct[:,", "expected[1, 2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for diag in", "dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5, 5), np.nan)", "expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5):", "dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4,", "for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix,", "np.nan expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix, expected) for diag in", "12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix,", "self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind =", "6.41, 4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.10,", "2] npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5,", "2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.],", "range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1)", "TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix = np.array([ [8.67, 1.10, 1.77, 1.26,", "diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix,", "correct = np.full((5, 5), np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5]", "np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm =", "4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09], [4.33, 4.99,", "6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09], [4.33,", "diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix()", "1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07,", "= diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm", "2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58,", "test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0] +", "np.full((5, 5), np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8] correct[:, 4]", "7.88, 3.37, 4.70, 6.94], [0.94, 8.70, 3.87, 6.29, 0.32, 1.79,", "dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm", "as np from unittest import TestCase import numpy.testing as npt", "self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind =", "0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5, 5), np.nan) expected[0,", "def test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0,", "dm = DistanceMatrix() self.mock_initialise(dm) for column in range(0, self.dist_matrix.shape[1]): dm.process_column(column,", "7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70,", "self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:, :]", ":5]) dm.shift_query(1) dm.shift_series(3) correct = np.full((5, 5), np.nan) correct[0:4, 0:2]", "correct) def test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm) for column in", "= self.dist_matrix[0, 0] expected[0, 1] = self.dist_matrix[0, 1] expected[1, 2]", "npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1, 5, 5)", "in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix,", "0] expected[0, 1] = self.dist_matrix[0, 1] expected[1, 2] = self.dist_matrix[1,", "8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64], [6.26,", "test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0]))", "1] npt.assert_equal(dm.distance_matrix, expected) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5,", "diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))", "column])) correct[:, column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self):", ":5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected", "2.15], [9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58,", "def test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm) for column in range(0,", "in [2, 3, 4, 5, 10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:,", "in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self):", "diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix", "expected[:2, 1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for column in", "import TestCase import numpy.testing as npt from distancematrix.util import diag_indices_of", "[4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26,", "expected[-2:, :] = np.nan expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix, expected)", "in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag,", "def test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0]", "self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1,", "4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21,", "= np.nan expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix, expected) for diag", "np.array([ [8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64,", "expected[0, 0] = self.dist_matrix[0, 0] expected[0, 1] = self.dist_matrix[0, 1]", "range(-8, self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind]", "5), np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for", "def setUp(self): self.dist_matrix = np.array([ [8.67, 1.10, 1.77, 1.26, 1.91,", "correct = np.full((5, 5), np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8]", "6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.], [4.67,", "4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41,", "6.19], [0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90,", "correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for column in", "expected[0, 0] = self.dist_matrix[0, 0] expected[:2, 1] = self.dist_matrix[:2, 1]", "5.06, 6.41, 4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79,", "expected[0, 1] = self.dist_matrix[0, 1] expected[1, 2] = self.dist_matrix[1, 2]", "0.20, 2.28, 7.11, 2.15], [9.90, 4.51, 2.11, 2.83, 5.52, 8.55,", "npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct =", "5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37,", "numpy.testing as npt from distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix import", "from distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix =", "2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71,", "npt.assert_equal(dm.distance_matrix, expected) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:,", "for column in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix)", "self.dist_matrix = np.array([ [8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32,", "8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57,", "7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26,", "7.29, 6.26]]) def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self):", "dm.shift_series(3) correct = np.full((5, 5), np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5,", "3:5] npt.assert_equal(dm.distance_matrix, correct) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6,", "= np.nan npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind =", "9.77, 4.71, 3.26, 7.29, 6.26]]) def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0],", "for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix,", "distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase): def", "3.37, 4.70, 6.94], [0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80,", "6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24,", "in range(-8, self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))", "1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39,", "1.90, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37,", "expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] = np.nan expected[:, -1:]", "5.84, 8.90, 7.88, 3.37, 4.70, 6.94], [0.94, 8.70, 3.87, 6.29,", "8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64], [6.26, 0.29, 6.44,", "distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix = np.array([", "5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15], [9.90,", "5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[:2, 1] =", "0] expected[:2, 1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for column", "setUp(self): self.dist_matrix = np.array([ [8.67, 1.10, 1.77, 1.26, 1.91, 4.29,", "4, 5, 10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column]", "1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38], [7.30, 5.84, 9.63,", "dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected", "5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1)", "np.full_like(self.dist_matrix, np.nan, dtype=float) for diag in range(-8, self.dist_matrix.shape[1], 3): diag_ind", "dtype=float) for column in [2, 3, 4, 5, 10, 11,", "np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def", "dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] = np.nan expected[:,", "column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1, 5,", "3.71, 9.93, 8.33, 0.38], [7.30, 5.84, 9.63, 1.95, 3.76, 3.61,", "3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11,", "1:6].copy() expected[-2:, :] = np.nan expected[:, -1:] = np.nan npt.assert_equal(dm.distance_matrix,", "3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10,", "0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38], [7.30, 5.84,", "correct) def test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0,", "= self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for column in range(0, 5):", "correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm = DistanceMatrix()", "0.39, 9.], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46,", "4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78,", "5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.], [4.67, 8.88, 3.05,", "diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5), np.nan)", "2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64],", "8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01,", "npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:,", "= DistanceMatrix() self.mock_initialise(dm) for column in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:,", "correct[:, column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm", "1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67,", "0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39,", "6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32,", ":5]) dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] =", "= DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind =", "3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67],", "0] = self.dist_matrix[0, 0] expected[:2, 1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix,", "DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for column in", "5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1,", "self.dist_matrix[0, 0] expected[0, 1] = self.dist_matrix[0, 1] expected[1, 2] =", "4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92], [1.01,", "np.nan, dtype=float) for column in [2, 3, 4, 5, 10,", "column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct", "npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm) for column", "= self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm = DistanceMatrix()", "0.38], [7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09,", "2.28, 7.11, 2.15], [9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90,", "np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for column", "np.nan npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5,", "1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32,", "self.dist_matrix.shape[1]) def test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm) for diag in", "8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03,", "column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_column(self): dm =", "1])) expected = np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0,", "dm = DistanceMatrix() self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0] + 1,", "npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5],", "2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for diag in range(-4,5):", "diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected =", "np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[0, 1] = self.dist_matrix[0,", "self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm) for", "1] = self.dist_matrix[0, 1] expected[1, 2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix,", "1, self.dist_matrix.shape[1]): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix)", "= np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[0,", "6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28,", "8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94], [0.94, 8.70,", "dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] = self.dist_matrix[:, column] npt.assert_equal(dm.distance_matrix, correct)", "0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16,", "self.dist_matrix[3:6, 4:8] correct[:, 4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def", "9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16,", "4.26, 8.75, 3.71, 9.93, 8.33, 0.38], [7.30, 5.84, 9.63, 1.95,", "3.19, 8.16, 5.32, 9.01, 0.39, 9.], [4.67, 8.88, 3.05, 3.06,", "1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for column in range(0,", "9.], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25,", "test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float)", "9.30, 9.77, 4.71, 3.26, 7.29, 6.26]]) def mock_initialise(self, dm): dm.initialise(1,", "8.75, 3.71, 9.93, 8.33, 0.38], [7.30, 5.84, 9.63, 1.95, 3.76,", "numpy as np from unittest import TestCase import numpy.testing as", "1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06,", "= DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2,", "[6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19,", "dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5, 5), np.nan) correct[0:3,", "= np.full_like(self.dist_matrix, np.nan, dtype=float) for column in [2, 3, 4,", "5.10, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72,", "4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09],", "9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90,", "7.11, 2.15], [9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24,", "6.95, 6.57, 1.79, 7.40, 4.41, 7.64], [6.26, 0.29, 6.44, 8.84,", "np.nan, dtype=float) for diag in range(-8, self.dist_matrix.shape[1], 3): diag_ind =", "diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm =", "3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79,", "import numpy as np from unittest import TestCase import numpy.testing", "7.40, 4.41, 7.64], [6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25,", "dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm)", "5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3)", "8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62,", "np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy()", "4.70, 6.94], [0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61,", "DistanceMatrix() self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]): diag_ind", "self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] =", "= self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm)", "0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77,", "for column in [2, 3, 4, 5, 10, 11, 12]:", "expected) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column]))", "expected) for diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)", "1.79, 7.40, 4.41, 7.64], [6.26, 0.29, 6.44, 8.84, 1.24, 2.52,", "8.90, 7.88, 3.37, 4.70, 6.94], [0.94, 8.70, 3.87, 6.29, 0.32,", "= DistanceMatrix() self.mock_initialise(dm) for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]):", "4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14,", "np.full((5, 5), np.nan) correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct)", "self.mock_initialise(dm) for column in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix,", "import diag_indices_of from distancematrix.consumer.distance_matrix import DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self):", "correct) def test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0,", "DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1]))", "= self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm = DistanceMatrix()", "npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1, 5, 5)", "correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for column in [2, 3,", "dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm)", "5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[0, 1] =", "correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for diag in range(-8, self.dist_matrix.shape[1],", "4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm =", "= diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct)", "4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40,", "diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def", "5), np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8] correct[:, 4] =", "[4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78,", "= np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[:2,", "dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float) for", "3, 4, 5, 10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:,", "9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67], [2.17,", "2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93,", "test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0]))", "np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5, 5), np.nan) expected[0, 0] =", "3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5, 5),", "import numpy.testing as npt from distancematrix.util import diag_indices_of from distancematrix.consumer.distance_matrix", "5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99,", "= self.dist_matrix[0, 0] expected[:2, 1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected)", "dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5, 5),", "9.93, 8.33, 0.38], [7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42,", "6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38], [7.30,", "5.32, 9.01, 0.39, 9.], [4.67, 8.88, 3.05, 3.06, 2.36, 8.34,", "def test_streaming_process_column(self): dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0,", "dm.shift_query(1) dm.shift_series(3) correct = np.full((5, 5), np.nan) correct[0:4, 0:2] =", "2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15], [9.90, 4.51,", "1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15],", "0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11,", "diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5,", "self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for column in range(0, 5): dm.process_column(column,", "[8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06,", "np.full_like(self.dist_matrix, np.nan, dtype=float) for column in [2, 3, 4, 5,", "np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected = np.full((5, 5), np.nan)", "range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm", "2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26,", "dtype=float) for diag in range(-8, self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix,", "9.78, 0.03, 5.64, 5.10, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79,", "7.64], [6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55,", "dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5, 5), np.nan) correct[0:3, 0:4]", "diag_ind = diag_indices_of(self.dist_matrix, diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_diagonal_partial_calculation(self):", "= diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2)", "3.67], [2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37,", "0:2] = self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for column in range(0,", "9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92], [1.01, 0.91, 6.28,", "= diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5),", "[0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32,", "10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] = self.dist_matrix[:,", "self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] = np.nan expected[:, -1:] = np.nan", "= np.array([ [8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24,", "test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan, dtype=float)", "3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64], [6.26, 0.29,", "1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected = np.full((5, 5), np.nan) expected[0, 0]", "correct) for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column]))", "7.07, 1.90, 4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28,", "5.64, 5.10, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.50,", "self.dist_matrix[0, 0] expected[:2, 1] = self.dist_matrix[:2, 1] npt.assert_equal(dm.distance_matrix, expected) for", "np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[:2, 1] = self.dist_matrix[:2,", "5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94], [0.94,", "diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1)", "4.07, 4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.10, 6.26,", "0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91,", "5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35,", "5, 5) dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0])) dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1])) expected =", "dm = DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind", "test_process_column(self): dm = DistanceMatrix() self.mock_initialise(dm) for column in range(0, self.dist_matrix.shape[1]):", "8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92],", "DistanceMatrix class TestContextualMatrixProfile(TestCase): def setUp(self): self.dist_matrix = np.array([ [8.67, 1.10,", "dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7,", "np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected", "= self.dist_matrix[0, 1] expected[1, 2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected)", "3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.], [4.67, 8.88,", "0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]]) def mock_initialise(self, dm):", "1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61,", "for column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix,", "DistanceMatrix() dm.initialise(1, 5, 5) dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3,", "5.91, 6.83, 9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01, 4.36,", "self.dist_matrix) def test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix,", "4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94],", "in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])", "1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41,", "np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0, 0] expected[0, 1]", "0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind]))) expected =", "8.16, 5.32, 9.01, 0.39, 9.], [4.67, 8.88, 3.05, 3.06, 2.36,", "column in range(0, self.dist_matrix.shape[1]): dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def", "np.atleast_2d(self.dist_matrix[:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix) def test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm)", "diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) correct[diag_ind] = self.dist_matrix[diag_ind] npt.assert_equal(dm.distance_matrix, correct) def test_process_column(self):", "= self.dist_matrix[1:5, 3:5] npt.assert_equal(dm.distance_matrix, correct) for column in range(0, 5):", "def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm =", "[2, 3, 4, 5, 10, 11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))", "self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm) for diag", "in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8])", "8] npt.assert_equal(dm.distance_matrix, correct) def test_streaming_process_diagonal(self): dm = DistanceMatrix() dm.initialise(1, 5,", "6.92], [1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80,", "0.03, 5.64, 5.10, 3.58, 6.92], [1.01, 0.91, 6.28, 7.79, 0.68,", "9.26, 6.19], [0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81,", "11, 12]: dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column])) correct[:, column] = self.dist_matrix[:, column]", "range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5,", "4.67, 9.32, 5.09], [4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40,", "def test_process_diagonal_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan,", "5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33,", "3.26, 7.29, 6.26]]) def mock_initialise(self, dm): dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def", "column in range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5,", "2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83,", "9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78,", ":5][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct = np.full((5,", "np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8] correct[:, 4] = self.dist_matrix[3:8,", "8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38],", "6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19],", "for diag in range(-8, self.dist_matrix.shape[1], 3): diag_ind = diag_indices_of(self.dist_matrix, diag)", "0:4] = self.dist_matrix[3:6, 4:8] correct[:, 4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix,", "expected = np.full((5, 5), np.nan) expected[0, 0] = self.dist_matrix[0, 0]", "dm.shift_query(2) dm.shift_series(1) expected = self.dist_matrix[2:7, 1:6].copy() expected[-2:, :] = np.nan", "5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90,", "[2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95,", "8])) correct = np.full((5, 5), np.nan) correct[0:3, 0:4] = self.dist_matrix[3:6,", "correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8] correct[:, 4] = self.dist_matrix[3:8, 8]", "[7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07,", "self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct = np.full((5,", "for diag in range(-4,5): diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag,", "4.78, 1.06, 0.69, 3.67], [2.17, 8.37, 3.99, 4.28, 4.37, 2.86,", "npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5]) dm.shift_query(1) dm.shift_series(3) correct = np.full((5, 5), np.nan)", "column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2) dm.shift_series(1) dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8])) correct", "dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1]) def test_process_diagonal(self): dm = DistanceMatrix() self.mock_initialise(dm) for", "[9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26,", "[0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84,", "def test_process_column_partial_calculation(self): dm = DistanceMatrix() self.mock_initialise(dm) correct = np.full_like(self.dist_matrix, np.nan,", "range(0, 5): dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8]) dm.shift_query(2)", "diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag) dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind])) npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])", "self.dist_matrix[0, 1] expected[1, 2] = self.dist_matrix[1, 2] npt.assert_equal(dm.distance_matrix, expected) for", "= self.dist_matrix[3:6, 4:8] correct[:, 4] = self.dist_matrix[3:8, 8] npt.assert_equal(dm.distance_matrix, correct)", "5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]])", "dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0])) diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1) dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind])))", "4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75," ]
[ "RUNNING = \"running\" FREEZE = \"freeze\" SHUTDOWN = \"shutdown\" STOPPING", "with equal time # allocated. # Note that the time", "= \"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL =", "Enum): \"\"\"Startup types of Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM =", "= \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION =", "\"stable\" BETA = \"beta\" DEV = \"dev\" class CoreState(str, Enum):", "\"valid\" ATTR_VALUE = \"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST = \"version_latest\"", "= \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA =", "\"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE = \"initialize\"", "= \"usb\" ATTR_USER = \"user\" ATTR_USERNAME = \"username\" ATTR_UUID =", "= \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD =", "= \"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH = \"squash\" ATTR_SSD =", "= \"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION =", "\"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE = \"title\"", "= \"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED = \"deprecated\" class AddonState(str,", "= ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This needs to match", "ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot mode for", "= \"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS =", "\"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\"", "= \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME =", "= \"running\" FREEZE = \"freeze\" SHUTDOWN = \"shutdown\" STOPPING =", "\"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str,", "EXPERIMENTAL = \"experimental\" DEPRECATED = \"deprecated\" class AddonState(str, Enum): \"\"\"State", "# Note that the time is multiplied by CPU count.", "= \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN =", "the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The rt", "= \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN =", "= \"hostname\" ATTR_ICON = \"icon\" ATTR_ID = \"id\" ATTR_IMAGE =", "\"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP = \"totp\"", "= \"addons\" MAP_BACKUP = \"backup\" MAP_SHARE = \"share\" MAP_MEDIA =", "= \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA =", "Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\")", "= \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ =", "ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum):", "add-on.\"\"\" STARTED = \"started\" STOPPED = \"stopped\" UNKNOWN = \"unknown\"", "feature.\"\"\" HASSOS = \"hassos\" HOSTNAME = \"hostname\" NETWORK = \"network\"", "\"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\"", "\"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS = \"locals\"", "class CoreState(str, Enum): \"\"\"Represent current loading state.\"\"\" INITIALIZE = \"initialize\"", "file for Supervisor.\"\"\" from enum import Enum from ipaddress import", "= \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX =", "ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS", "\"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR = \"apparmor\"", "= \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS =", "\"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED = \"unsaved\"", "ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES", "ATTR_PARENT = \"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT = \"port\" ATTR_PORTS", "\"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI = \"wifi\"", "\"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS = \"domains\"", "\"initialize\" SETUP = \"setup\" STARTUP = \"startup\" RUNNING = \"running\"", "= \"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API =", "ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME", "to 950/5*4 = 760ms in RT priority # on a", "FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS =", "= \"once\" class AddonStage(str, Enum): \"\"\"Stage types of add-on.\"\"\" STABLE", "= \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM =", "mode for the add-on.\"\"\" AUTO = \"auto\" MANUAL = \"manual\"", "= \"network\" REBOOT = \"reboot\" SERVICES = \"services\" SHUTDOWN =", "\"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL = \"ssl\"", "= \"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE =", "\"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\"", "= \"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE =", "Supervisor.\"\"\" from enum import Enum from ipaddress import ip_network from", "FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG =", "\"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS = \"full_access\"", "= Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT =", "Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\")", "= \"error\" CRITICAL = \"critical\" class HostFeature(str, Enum): \"\"\"Host feature.\"\"\"", "ATTR_VALID = \"valid\" ATTR_VALUE = \"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST", "SYSTEM = \"system\" SERVICES = \"services\" APPLICATION = \"application\" ONCE", "= \"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT =", "INFO = \"info\" WARNING = \"warning\" ERROR = \"error\" CRITICAL", "ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV", "update channels.\"\"\" STABLE = \"stable\" BETA = \"beta\" DEV =", "= \"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS =", "\"\"\"Stage types of add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL = \"experimental\"", "\"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\"", "ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET", "= \"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER =", "\"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD = \"board\"", "\"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS = \"connections\"", "\"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS = \"folders\"", "ATTR_EVENT = \"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS", "\"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL = \"url\" ATTR_USB = \"usb\"", "ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM", "\"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\"", "by CPU count. This means that # a single container", "\"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT = \"parent\"", "\"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\"", "\"\"\"Constants file for Supervisor.\"\"\" from enum import Enum from ipaddress", "\"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR = \"supervisor\"", "\"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN = \"admin\"", "ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED", "\"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO = \"video\"", "= \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API =", "\"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\"", "\"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6 = \"ipv6\"", "level of system.\"\"\" DEBUG = \"debug\" INFO = \"info\" WARNING", "\"services\" APPLICATION = \"application\" ONCE = \"once\" class AddonStage(str, Enum):", "= \"deprecated\" class AddonState(str, Enum): \"\"\"State of add-on.\"\"\" STARTED =", "ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS", "= \"providers\" ATTR_PSK = \"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME =", "= \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL =", "META_SUPERVISOR = \"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE = \"message\" JSON_RESULT", "ATTR_DETACHED = \"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS", "ATTR_VOLUME = \"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG", "\"experimental\" DEPRECATED = \"deprecated\" class AddonState(str, Enum): \"\"\"State of add-on.\"\"\"", "\"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION", "ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE", "\"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED = \"privileged\"", "# allocated. # Note that the time is multiplied by", "= \"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE =", "ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT =", "\"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\"", "= \"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED =", "ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY", "ATTR_OPTIONS = \"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN", "ATTR_UPDATE_KEY = \"update_key\" ATTR_URL = \"url\" ATTR_USB = \"usb\" ATTR_USER", "\"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\"", "[ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot mode", "\".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\")", "\"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON = \"description\"", "ARCH_I386 = \"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386]", "ATTR_FEATURES = \"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS", "\"username\" ATTR_UUID = \"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE = \"value\"", "= \"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP =", "\"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER = \"observer\"", "needs to match the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000", "= \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON =", "WARNING = \"warning\" ERROR = \"error\" CRITICAL = \"critical\" class", "ATTR_SQUASH = \"squash\" ATTR_SSD = \"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL", "LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON", "ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE", "= \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE =", "= \"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE =", "\"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY = \"registry\"", "= \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY =", "\"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM = \"build_from\"", "= \"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE =", "NETWORK = \"network\" REBOOT = \"reboot\" SERVICES = \"services\" SHUTDOWN", "ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE", "ATTR_ADDON = \"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES", "\"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY = \"priority\"", "\"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH = \"squash\" ATTR_SSD = \"ssid\"", "= \"services\" APPLICATION = \"application\" ONCE = \"once\" class AddonStage(str,", "\"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN = \"vlan\"", "ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON", "= \"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT =", "\"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\"", "ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT", "\"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING = \"logging\"", "ATTR_CLI = \"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED", "\"icon\" ATTR_ID = \"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES = \"images\"", "ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA", "\"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG = \"changelog\"", "= \"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM =", "= \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG =", "= \"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS =", "\"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED = \"supported\"", "ATTR_CONFIG = \"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS", "ATTR_GPIO = \"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS", "\"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED = \"advanced\"", "= Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE =", "\"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON = \"location\"", "= \"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED =", "ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY", "\"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES", "= \"valid\" ATTR_VALUE = \"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST =", "REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE", "ATTR_DESCRIPTON = \"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE", "ATTR_IMAGES = \"images\" ATTR_INDEX = \"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY", "= Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK =", "ATTR_INSTALLED = \"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS", "\"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\"", "= Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA,", "\"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\"", "= Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE =", "FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES =", "Enum from ipaddress import ip_network from pathlib import Path SUPERVISOR_VERSION", "\"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY = \"discovery\"", "= \"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT =", "= \"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG =", "ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET", "= \"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE =", "ATTR_VLAN = \"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT", "= \"startup\" RUNNING = \"running\" FREEZE = \"freeze\" SHUTDOWN =", "CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS", "\"squash\" ATTR_SSD = \"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL = \"ssl\"", "\"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES = \"services\" ATTR_SESSION = \"session\"", "ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER", "\"initialize\" SYSTEM = \"system\" SERVICES = \"services\" APPLICATION = \"application\"", "\"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT", "\"application\" ONCE = \"once\" class AddonStage(str, Enum): \"\"\"Stage types of", "\"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT = \"environment\"", "# on a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL /", "= \"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION =", "hence we cannot allocate more # time than available! Support", "LogLevel(str, Enum): \"\"\"Logging level of system.\"\"\" DEBUG = \"debug\" INFO", "ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE", "= \"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE = \"state\" ATTR_STATIC =", "= \"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE = \"need\" WANT_SERVICE =", "= \"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP =", "\"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE = \"state\" ATTR_STATIC = \"static\"", "\"\"\"Logging level of system.\"\"\" DEBUG = \"debug\" INFO = \"info\"", "\"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\"", "ATTR_SIGNAL = \"signal\" ATTR_SIZE = \"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE", "ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD", "time # allocated. # Note that the time is multiplied", "ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT", "= \"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL =", "\"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP = \"startup\"", "\"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\"", "\"message\" ATTR_METHOD = \"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST = \"multicast\"", "\"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI = \"webui\"", "= \"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH =", "= \"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS =", "ATTR_HASSOS = \"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API", "= \"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES = \"services\" ATTR_SESSION =", "class AddonStage(str, Enum): \"\"\"Stage types of add-on.\"\"\" STABLE = \"stable\"", "= \"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH =", "ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME", "\"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK = \"psk\"", "= \"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY =", "FREEZE = \"freeze\" SHUTDOWN = \"shutdown\" STOPPING = \"stopping\" CLOSE", "MAP_ADDONS = \"addons\" MAP_BACKUP = \"backup\" MAP_SHARE = \"share\" MAP_MEDIA", "REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS", "Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\")", "from enum import Enum from ipaddress import ip_network from pathlib", "from pathlib import Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\"", "ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN", "\"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\"", "= \"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT =", "= \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK =", "for the add-on.\"\"\" AUTO = \"auto\" MANUAL = \"manual\" class", "Enum): \"\"\"Stage types of add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL =", "ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX", "\"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD = \"method\"", "= \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL =", "class HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS = \"hassos\" HOSTNAME =", "\"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT = \"event\"", "= \"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE =", "import Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR =", "= \"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL = \"url\" ATTR_USB =", "ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE", "= \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This", "ATTR_BRANCH = \"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD", "\"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER", "guarantees, hence we cannot allocate more # time than available!", "= \"images\" ATTR_INDEX = \"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY =", "URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA", "runtimes are guarantees, hence we cannot allocate more # time", "\"shutdown\" STOPPING = \"stopping\" CLOSE = \"close\" class LogLevel(str, Enum):", "= \"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO =", "= \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH =", "Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\")", "# This needs to match the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL", "= \"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE =", "\"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\"", "ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH = \"squash\" ATTR_SSD", "we cannot allocate more # time than available! Support up", "\"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE = \"need\" WANT_SERVICE = \"want\"", "= \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT =", "Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\")", "= \"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE =", "ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN", "SERVICES = \"services\" APPLICATION = \"application\" ONCE = \"once\" class", "\"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA = \"data\" ATTR_DATE = \"date\"", "\"\"\"Startup types of Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM = \"system\"", "\"debug\" INFO = \"info\" WARNING = \"warning\" ERROR = \"error\"", "\"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS = \"servers\"", "Support up to 5 containers with equal time # allocated.", "ATTR_DEVICES = \"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY", "= \"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK =", "= \"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS =", "\"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES = \"repositories\"", "ATTR_SCHEMA = \"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS", "ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE", "= \"beta\" DEV = \"dev\" class CoreState(str, Enum): \"\"\"Represent current", "[ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL =", "\"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK = \"network\"", "\"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\"", "= \"stopping\" CLOSE = \"close\" class LogLevel(str, Enum): \"\"\"Logging level", "\"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\"", "= \"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX =", "INITIALIZE = \"initialize\" SYSTEM = \"system\" SERVICES = \"services\" APPLICATION", "= \"warning\" ERROR = \"error\" CRITICAL = \"critical\" class HostFeature(str,", "ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS", "\"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD = \"password\"", "= 760ms in RT priority # on a quad core", "ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME", "= \"service\" ATTR_SERVICES = \"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL =", "= \"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY =", "= \"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES =", "\"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO = \"gpio\"", "= \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE =", "= \"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT =", "\"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API = \"hassio_api\"", "SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION", "\"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE = \"interface\"", "types of add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED", "= \"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST =", "\"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME = \"TZ\"", "int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE", "= \"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD =", "to 5 containers with equal time # allocated. # Note", "= \"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE =", "HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN", "ATTR_INPUT = \"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES", "ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON", "= \"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD = \"method\" ATTR_MODE =", "ATTR_RATING = \"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES", "import ip_network from pathlib import Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS", "add-on.\"\"\" AUTO = \"auto\" MANUAL = \"manual\" class AddonStartup(str, Enum):", "= \"initialize\" SYSTEM = \"system\" SERVICES = \"services\" APPLICATION =", "of add-on.\"\"\" STARTED = \"started\" STOPPED = \"stopped\" UNKNOWN =", "\"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS = \"suggestions\"", "= \"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS =", "ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS", "\"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC = \"host_ipc\"", "ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS", "SHUTDOWN = \"shutdown\" STOPPING = \"stopping\" CLOSE = \"close\" class", "LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION", "\"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE = \"disable\"", "ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT", "equal time # allocated. # Note that the time is", "FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT =", "= Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA,", "\"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR = \"application/tar\"", "\"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO = \"logo\"", "\"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY = \"security\"", "ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON", "\"\"\"Host feature.\"\"\" HASSOS = \"hassos\" HOSTNAME = \"hostname\" NETWORK =", "Enum): \"\"\"State of add-on.\"\"\" STARTED = \"started\" STOPPED = \"stopped\"", "pathlib import Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR", "= Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\")", "= Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA,", "\"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT = \"deployment\"", "= \"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED =", "allocate more # time than available! Support up to 5", "\"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS = \"tmpfs\"", "\"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS = \"panels\"", "\"crypto\" ATTR_DATA = \"data\" ATTR_DATE = \"date\" ATTR_DEBUG = \"debug\"", "ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD", "MAP_MEDIA = \"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64", "\"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL = \"partial\"", "= \"auto\" MANUAL = \"manual\" class AddonStartup(str, Enum): \"\"\"Startup types", "\"system\" SERVICES = \"services\" APPLICATION = \"application\" ONCE = \"once\"", "\"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH = \"auth\"", "= \"unknown\" ERROR = \"error\" class UpdateChannel(str, Enum): \"\"\"Core supported", "= \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN =", "\"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE = \"blk_write\"", "\"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE = \"profile\"", "to match the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 #", "= [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL", "= \"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET =", "ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT", "ATTR_MAINTAINER = \"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT", "\"warning\" ERROR = \"error\" CRITICAL = \"critical\" class HostFeature(str, Enum):", "= \"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS =", "ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY", "ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT", "Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG", "ATTR_SYSTEM = \"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE", "\"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\"", "ATTR_INIT = \"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED", "\"data\" ATTR_DATE = \"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\"", "\"close\" class LogLevel(str, Enum): \"\"\"Logging level of system.\"\"\" DEBUG =", "= \"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON =", "ATTR_HOSTNAME = \"hostname\" ATTR_ICON = \"icon\" ATTR_ID = \"id\" ATTR_IMAGE", "CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD", "= \"update_key\" ATTR_URL = \"url\" ATTR_USB = \"usb\" ATTR_USER =", "= \"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL =", "of add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED =", "= \"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER =", "\"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT = \"port\"", "= \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED =", "types of Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM = \"system\" SERVICES", "= Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA,", "ATTR_ENABLE = \"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT", "ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6", "ATTR_STAGE = \"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE = \"state\" ATTR_STATIC", "\"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\"", "= \"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST =", "/ 5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE =", "\"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT = \"port\" ATTR_PORTS = \"ports\"", "\"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\"", "ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY", "\"startup\" RUNNING = \"running\" FREEZE = \"freeze\" SHUTDOWN = \"shutdown\"", "INITIALIZE = \"initialize\" SETUP = \"setup\" STARTUP = \"startup\" RUNNING", "ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY", "ATTR_SERVICES = \"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE", "= \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS =", "UNKNOWN = \"unknown\" ERROR = \"error\" class UpdateChannel(str, Enum): \"\"\"Core", "\"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD = \"build\"", "ATTR_IPV4 = \"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL", "= \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME =", "= \"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP,", "ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN", "\"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This needs", "\"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\"", "\"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL = \"serial\"", "= \"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON =", "\"beta\" DEV = \"dev\" class CoreState(str, Enum): \"\"\"Represent current loading", "ATTR_FLAGS = \"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS", "= \"started\" STOPPED = \"stopped\" UNKNOWN = \"unknown\" ERROR =", "UpdateChannel(str, Enum): \"\"\"Core supported update channels.\"\"\" STABLE = \"stable\" BETA", "class UpdateChannel(str, Enum): \"\"\"Core supported update channels.\"\"\" STABLE = \"stable\"", "CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN", "= \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD =", "= \"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA = \"data\" ATTR_DATE =", "ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED", "ATTR_BUILD = \"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG", "= \"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN =", "RT priority # on a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION =", "the add-on.\"\"\" AUTO = \"auto\" MANUAL = \"manual\" class AddonStartup(str,", "= \"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME =", "= \"error\" class UpdateChannel(str, Enum): \"\"\"Core supported update channels.\"\"\" STABLE", "= \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS =", "ATTR_ACTIVE = \"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST", "= \"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE =", "= \"critical\" class HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS = \"hassos\"", "= \"hostname\" NETWORK = \"network\" REBOOT = \"reboot\" SERVICES =", "5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\"", "= \"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER =", "allocated. # Note that the time is multiplied by CPU", "match the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The", "ATTR_LABELS = \"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT", "\"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\"", "of Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM = \"system\" SERVICES =", "= \"setup\" STARTUP = \"startup\" RUNNING = \"running\" FREEZE =", "ATTR_CHANNEL = \"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI", "ATTR_SERVICE = \"service\" ATTR_SERVICES = \"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL", "\"application\" ATTR_ARCH = \"arch\" ATTR_ARGS = \"args\" ATTR_LABELS = \"labels\"", "\"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA = \"schema\"", "\"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT = \"timeout\"", "= \"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP = \"backup\" MAP_SHARE =", "Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM = \"system\" SERVICES = \"services\"", "\"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\"", "Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\",", "\"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI = \"cli\"", "\"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD = \"journald\"", "= \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT =", "ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS", "= \"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM =", "= \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE =", "Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\")", "= \"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL =", "\"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY = \"primary\"", "ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL", "\"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER = \"manager\"", "\"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT = \"init\"", "ATTR_MULTICAST = \"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK", "argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The rt runtimes are guarantees,", "\"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT = \"default\"", "ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL", "RESULT_OK = \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG", "\"deprecated\" class AddonState(str, Enum): \"\"\"State of add-on.\"\"\" STARTED = \"started\"", "= \"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA =", "APPLICATION = \"application\" ONCE = \"once\" class AddonStage(str, Enum): \"\"\"Stage", "= \"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT =", "= \"application\" ATTR_ARCH = \"arch\" ATTR_ARGS = \"args\" ATTR_LABELS =", "\"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP = \"backup\"", "\"unknown\" ERROR = \"error\" class UpdateChannel(str, Enum): \"\"\"Core supported update", "\"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL = \"kernel\"", "Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\")", "\"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES = \"issues\"", "= \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY =", "ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY", "\"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\"", "can schedule up to 950/5*4 = 760ms in RT priority", "ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER", "= \"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC =", "= \"type\" ATTR_UART = \"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY =", "ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI", "= \"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE =", "import Enum from ipaddress import ip_network from pathlib import Path", "950_000 # The rt runtimes are guarantees, hence we cannot", "up to 5 containers with equal time # allocated. #", "\"providers\" ATTR_PSK = \"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME = \"realtime\"", "COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME", "ATTR_MACHINE = \"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT", "\"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL = \"channel\"", "\"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\"", "= \"application\" ONCE = \"once\" class AddonStage(str, Enum): \"\"\"Stage types", "\"hostname\" NETWORK = \"network\" REBOOT = \"reboot\" SERVICES = \"services\"", "= \"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON = \"icon\" ATTR_ID =", "DEBUG = \"debug\" INFO = \"info\" WARNING = \"warning\" ERROR", "\"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN = \"admin\"", "\"\"\"Core supported update channels.\"\"\" STABLE = \"stable\" BETA = \"beta\"", "= \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV =", "\"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 = \"ipv4\"", "ATTR_USER = \"user\" ATTR_USERNAME = \"username\" ATTR_UUID = \"uuid\" ATTR_VALID", "\"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE = \"source\"", "= \"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL =", "HEADER_TOKEN_OLD = \"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO", "ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API", "ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE", "ATTR_MODE = \"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS", "ATTR_PANELS = \"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT", "= \"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED =", "ATTR_USERNAME = \"username\" ATTR_UUID = \"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE", "ATTR_USB = \"usb\" ATTR_USER = \"user\" ATTR_USERNAME = \"username\" ATTR_UUID", "Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\")", "DEV = \"dev\" class CoreState(str, Enum): \"\"\"Represent current loading state.\"\"\"", "\"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\")", "= \"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT =", "= \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE =", "\"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT = \"audio_input\"", "= \"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER =", "\"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386 = \"i386\"", "\"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386 = \"i386\" ARCH_ALL = [ARCH_ARMHF,", "This needs to match the dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL =", "\"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED = \"disk_used\"", "= \"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION =", "\"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION = \"application\"", "the time is multiplied by CPU count. This means that", "= \"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT =", "ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API", "= \"user\" ATTR_USERNAME = \"username\" ATTR_UUID = \"uuid\" ATTR_VALID =", "\"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 = \"aarch64\"", "ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386", "ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot mode for the", "ENV_TIME = \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY", "system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH", "= \"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE =", "= \"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API =", "ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS", "ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL = \"url\" ATTR_USB", "FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER =", "= Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\",", "= \"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING =", "= \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK =", "= \"signal\" ATTR_SIZE = \"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE =", "\"error\" CRITICAL = \"critical\" class HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS", "ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH", "ATTR_ID = \"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES = \"images\" ATTR_INDEX", "\"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS = \"addons/local\"", "\"dev\" class CoreState(str, Enum): \"\"\"Represent current loading state.\"\"\" INITIALIZE =", "\"image\" ATTR_IMAGES = \"images\" ATTR_INDEX = \"index\" ATTR_INGRESS = \"ingress\"", "\"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT = \"homeassistant\"", "\"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH = \"arch\" ATTR_ARGS = \"args\"", "\"url\" ATTR_USB = \"usb\" ATTR_USER = \"user\" ATTR_USERNAME = \"username\"", "ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY", "\"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG = \"config\"", "\"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\"", "\".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER =", "= \"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD =", "ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED", "ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY", "up to 950/5*4 = 760ms in RT priority # on", "PROVIDE_SERVICE = \"provide\" NEED_SERVICE = \"need\" WANT_SERVICE = \"want\" MAP_CONFIG", "ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot mode for the add-on.\"\"\" AUTO", "= \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6 =", "CPU count. This means that # a single container can", "\"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS", "\"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED = \"protected\"", "= \"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED =", "SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA,", "= \"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL =", "= \"freeze\" SHUTDOWN = \"shutdown\" STOPPING = \"stopping\" CLOSE =", "BETA = \"beta\" DEV = \"dev\" class CoreState(str, Enum): \"\"\"Represent", "class LogLevel(str, Enum): \"\"\"Logging level of system.\"\"\" DEBUG = \"debug\"", "rt runtimes are guarantees, hence we cannot allocate more #", "AddonStage(str, Enum): \"\"\"Stage types of add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL", "Enum): \"\"\"Boot mode for the add-on.\"\"\" AUTO = \"auto\" MANUAL", "# a single container can schedule up to 950/5*4 =", "\"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER = \"docker\"", "= \"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA =", "ATTR_MESSAGE = \"message\" ATTR_METHOD = \"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST", "ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT", "ATTR_TMPFS = \"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE", "FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER =", "JSON_DATA = \"data\" JSON_MESSAGE = \"message\" JSON_RESULT = \"result\" RESULT_ERROR", "ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON", "a single container can schedule up to 950/5*4 = 760ms", "= \"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT =", "ATTR_ARCH = \"arch\" ATTR_ARGS = \"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO", "= \"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED =", "ATTR_ISSUES = \"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT", "= \"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG =", "= \"homeassistant\" ROLE_BACKUP = \"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN =", "\"share\" MAP_MEDIA = \"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7 = \"armv7\"", "ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 = \"ipv4\" ATTR_IPV6 = \"ipv6\" ATTR_ISSUES", "= \"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS =", "ATTR_CHECKS = \"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION", "\"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS", "= \"image\" ATTR_IMAGES = \"images\" ATTR_INDEX = \"index\" ATTR_INGRESS =", "= \"hassos\" HOSTNAME = \"hostname\" NETWORK = \"network\" REBOOT =", "= \"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386 =", "ATTR_DATA = \"data\" ATTR_DATE = \"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK", "ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE", "= ip_network(\"172.30.33.0/24\") # This needs to match the dockerd --cpu-rt-runtime=", "= \"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT =", "= \"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG =", "\"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE = \"disk_free\"", "\"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"]", "\"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS = \"hassos\"", "ATTR_STDIN = \"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR", "ATTR_SOURCE = \"source\" ATTR_SQUASH = \"squash\" ATTR_SSD = \"ssid\" ATTR_SSID", "ATTR_CHASSIS = \"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG", "ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK", "\"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA = \"media\"", "\"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON = \"addon\"", "\"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN = \"panel_admin\"", "ATTR_DISK_USED = \"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API", "ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH", "\"need\" WANT_SERVICE = \"want\" MAP_CONFIG = \"config\" MAP_SSL = \"ssl\"", "\"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE = \"machine\"", "= \"system\" SERVICES = \"services\" APPLICATION = \"application\" ONCE =", "ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS", "supported update channels.\"\"\" STABLE = \"stable\" BETA = \"beta\" DEV", "= \"stable\" BETA = \"beta\" DEV = \"dev\" class CoreState(str,", "\"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST = \"host\"", "ATTR_DATE = \"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT", "CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT", "= Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE =", "add-on.\"\"\" STABLE = \"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED = \"deprecated\"", "\"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH = \"branch\"", "means that # a single container can schedule up to", "\"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\"", "ATTR_BOOT = \"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM", "\"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA,", "\"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\"", "ATTR_AVAILABLE = \"available\" ATTR_BLK_READ = \"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD", "\"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED = \"enabled\"", "AddonBoot(str, Enum): \"\"\"Boot mode for the add-on.\"\"\" AUTO = \"auto\"", "= \"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED =", "= \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE =", "= \"translations\" ATTR_TYPE = \"type\" ATTR_UART = \"uart\" ATTR_UDEV =", "quad core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX =", "= \"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP =", "Enum): \"\"\"Logging level of system.\"\"\" DEBUG = \"debug\" INFO =", "LABEL_VERSION = \"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR", "\"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA = \"data\"", "\"info\" WARNING = \"warning\" ERROR = \"error\" CRITICAL = \"critical\"", "ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST", "ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE = \"type\" ATTR_UART = \"uart\" ATTR_UDEV", "= \"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386 = \"i386\" ARCH_ALL =", "\"stopping\" CLOSE = \"close\" class LogLevel(str, Enum): \"\"\"Logging level of", "\"default\" ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED = \"detached\"", "= \"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE =", "\"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON = \"icon\"", "time than available! Support up to 5 containers with equal", "= \"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR =", "on a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5)", "ROLE_BACKUP = \"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL", "\"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS = \"containers\"", "CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR", "ATTR_APPLICATION = \"application\" ATTR_ARCH = \"arch\" ATTR_ARGS = \"args\" ATTR_LABELS", "ipaddress import ip_network from pathlib import Path SUPERVISOR_VERSION = \"DEV\"", "= \"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR =", "5 containers with equal time # allocated. # Note that", "= \"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO =", "ATTR_VIDEO = \"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN", "\"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM = \"build_from\" ATTR_CARD = \"card\"", "= \"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL =", "\"service\" ATTR_SERVICES = \"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL = \"signal\"", "\"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA = \"address-data\"", "ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK = \"psk\" ATTR_RATING", "\"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\"", "\"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ = \"blk_read\"", "= \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION =", "STARTUP = \"startup\" RUNNING = \"running\" FREEZE = \"freeze\" SHUTDOWN", "ATTR_IMAGE = \"image\" ATTR_IMAGES = \"images\" ATTR_INDEX = \"index\" ATTR_INGRESS", "ATTR_ARGS = \"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT", "container can schedule up to 950/5*4 = 760ms in RT", "ATTR_PRIMARY = \"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED", "\"translations\" ATTR_TYPE = \"type\" ATTR_UART = \"uart\" ATTR_UDEV = \"udev\"", "\"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE = \"message\" JSON_RESULT = \"result\"", "\"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD = \"X-Hassio-Key\"", "ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH", "ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE", "\"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE = \"service\"", "= \"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON =", "\"source\" ATTR_SQUASH = \"squash\" ATTR_SSD = \"ssid\" ATTR_SSID = \"ssid\"", "META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE", "Note that the time is multiplied by CPU count. This", "FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"] MACHINE_ID", "= \"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI =", "\"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\"", "\"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON = \"icon\" ATTR_ID = \"id\"", "= \"parent\" ATTR_PASSWORD = \"password\" ATTR_PORT = \"port\" ATTR_PORTS =", "\"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE = \"need\"", "\"data\" JSON_MESSAGE = \"message\" JSON_RESULT = \"result\" RESULT_ERROR = \"error\"", "= \"backup\" MAP_SHARE = \"share\" MAP_MEDIA = \"media\" ARCH_ARMHF =", "\"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\"", "= \"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES =", "ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE", "core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX = \"local.hass.io\"", "ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM", "ATTR_STARTUP = \"startup\" ATTR_STATE = \"state\" ATTR_STATIC = \"static\" ATTR_STDIN", "that # a single container can schedule up to 950/5*4", "= \"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES =", "STOPPED = \"stopped\" UNKNOWN = \"unknown\" ERROR = \"error\" class", "= \"labels\" ATTR_AUDIO = \"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT =", "\"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE = \"available\"", "RESULT_ERROR = \"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON", "= \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS =", "ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE = \"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT", "\"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED = \"connected\"", "= \"source\" ATTR_SQUASH = \"squash\" ATTR_SSD = \"ssid\" ATTR_SSID =", "\"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK = \"disk\"", "system.\"\"\" DEBUG = \"debug\" INFO = \"info\" WARNING = \"warning\"", "ATTR_DISK = \"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL", "\"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG = \"image/png\"", "\"backup\" MAP_SHARE = \"share\" MAP_MEDIA = \"media\" ARCH_ARMHF = \"armhf\"", "MANUAL = \"manual\" class AddonStartup(str, Enum): \"\"\"Startup types of Add-on.\"\"\"", "\"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER = \"maintainer\"", "CLOSE = \"close\" class LogLevel(str, Enum): \"\"\"Logging level of system.\"\"\"", "HASSOS = \"hassos\" HOSTNAME = \"hostname\" NETWORK = \"network\" REBOOT", "\"state\" ATTR_STATIC = \"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE = \"storage\"", "= \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE =", "= \"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME =", "ATTR_TITLE = \"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS", "\"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\"", "MAP_SSL = \"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP = \"backup\" MAP_SHARE", "\"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR = \"supervisor\"", "= \"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE = \"value\" ATTR_VERSION =", "class AddonState(str, Enum): \"\"\"State of add-on.\"\"\" STARTED = \"started\" STOPPED", "ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS", "ATTR_UART = \"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED", "class AddonStartup(str, Enum): \"\"\"Startup types of Add-on.\"\"\" INITIALIZE = \"initialize\"", "\"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS = \"providers\"", "ATTR_NAME = \"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION", "= \"manual\" class AddonStartup(str, Enum): \"\"\"Startup types of Add-on.\"\"\" INITIALIZE", "\"\"\"Represent current loading state.\"\"\" INITIALIZE = \"initialize\" SETUP = \"setup\"", "= \"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED =", "\"update_key\" ATTR_URL = \"url\" ATTR_USB = \"usb\" ATTR_USER = \"user\"", "\"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION = \"io.hass.version\"", "\"disk_used\" ATTR_DNS = \"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API = \"docker_api\"", "MAP_SHARE = \"share\" MAP_MEDIA = \"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7", "\"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE = \"share\"", "schedule up to 950/5*4 = 760ms in RT priority #", "ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES", "\"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE = \"timezone\"", "ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA", "\"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH = \"squash\"", "ATTR_BUILD_FROM = \"build_from\" ATTR_CARD = \"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL", "Enum): \"\"\"Core supported update channels.\"\"\" STABLE = \"stable\" BETA =", "= \"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT =", "FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION =", "from ipaddress import ip_network from pathlib import Path SUPERVISOR_VERSION =", "= \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS =", "\"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS = \"options\"", "\"config\" MAP_SSL = \"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP = \"backup\"", "ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT", "STARTED = \"started\" STOPPED = \"stopped\" UNKNOWN = \"unknown\" ERROR", "= \"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK = \"psk\" ATTR_RATING =", "\"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA = \"data\"", "\"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES = \"features\"", "= \"data\" JSON_MESSAGE = \"message\" JSON_RESULT = \"result\" RESULT_ERROR =", "for Supervisor.\"\"\" from enum import Enum from ipaddress import ip_network", "ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS", "= int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\"", "= \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS =", "= \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH", "that the time is multiplied by CPU count. This means", "ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD = \"method\" ATTR_MODE", "= \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS =", "\"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS = \"translations\"", "ROLE_MANAGER = \"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT,", "DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The rt runtimes are guarantees, hence", "= \"experimental\" DEPRECATED = \"deprecated\" class AddonState(str, Enum): \"\"\"State of", "than available! Support up to 5 containers with equal time", "= \"close\" class LogLevel(str, Enum): \"\"\"Logging level of system.\"\"\" DEBUG", "= \"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS = \"checks\" ATTR_CLI =", "= \"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 =", "\"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS = \"addons\"", "DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH = \"io.hass.arch\" LABEL_MACHINE = \"io.hass.machine\" LABEL_TYPE", "\"stopped\" UNKNOWN = \"unknown\" ERROR = \"error\" class UpdateChannel(str, Enum):", "= \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME =", "Enum): \"\"\"Host feature.\"\"\" HASSOS = \"hassos\" HOSTNAME = \"hostname\" NETWORK", "\"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE = \"message\"", "= \"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY =", "= \"message\" JSON_RESULT = \"result\" RESULT_ERROR = \"error\" RESULT_OK =", "ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID", "\"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 = \"amd64\"", "= \"refresh_token\" ATTR_REGISTRIES = \"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES =", "\"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL = \"full\"", "= \"stopped\" UNKNOWN = \"unknown\" ERROR = \"error\" class UpdateChannel(str,", "ATTR_INTERFACE = \"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4", "ATTR_PASSWORD = \"password\" ATTR_PORT = \"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION", "ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM = \"system\" ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT", "\"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK = \"psk\" ATTR_RATING = \"rating\"", "ATTR_ADMIN = \"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION", "= \"audio\" ATTR_AUDIO_INPUT = \"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH =", "channels.\"\"\" STABLE = \"stable\" BETA = \"beta\" DEV = \"dev\"", "\"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE = \"cpe\"", "loading state.\"\"\" INITIALIZE = \"initialize\" SETUP = \"setup\" STARTUP =", "# The rt runtimes are guarantees, hence we cannot allocate", "ATTR_OTA = \"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON", "\"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES = \"services\"", "= \"addon\" ATTR_ADDONS = \"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES =", "= [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot", "\"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE = \"active\"", "ATTR_URL = \"url\" ATTR_USB = \"usb\" ATTR_USER = \"user\" ATTR_USERNAME", "ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL", "\"volume\" ATTR_VPN = \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG = \"watchdog\"", "= \"share\" MAP_MEDIA = \"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7 =", "\"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\"", "--cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The rt runtimes are", "= \"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN] class", "\"user\" ATTR_USERNAME = \"username\" ATTR_UUID = \"uuid\" ATTR_VALID = \"valid\"", "DEPRECATED = \"deprecated\" class AddonState(str, Enum): \"\"\"State of add-on.\"\"\" STARTED", "= \"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE =", "= \"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID =", "ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS = \"providers\" ATTR_PSK", "= \"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS =", "\"arch\" ATTR_ARGS = \"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO = \"audio\"", "= \"username\" ATTR_UUID = \"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE =", "\"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE = \"state\"", "\"error\" class UpdateChannel(str, Enum): \"\"\"Core supported update channels.\"\"\" STABLE =", "FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY =", "\"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT = \"homeassistant\"", "enum import Enum from ipaddress import ip_network from pathlib import", "\"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\"", "\"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS = \"checks\"", "= \"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES =", "= \"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC =", "= \"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES = \"images\" ATTR_INDEX =", "ATTR_DEPLOYMENT = \"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES", "\"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES = \"devices\"", "\"usb\" ATTR_USER = \"user\" ATTR_USERNAME = \"username\" ATTR_UUID = \"uuid\"", "# time than available! Support up to 5 containers with", "MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE", "= \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT =", "= \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE =", "\"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY = \"legacy\"", "\"advanced\" ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH = \"arch\"", "ATTR_DOCKER = \"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS", "The rt runtimes are guarantees, hence we cannot allocate more", "\"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC = \"mac\"", "ATTR_GATEWAY = \"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE", "= \"url\" ATTR_USB = \"usb\" ATTR_USER = \"user\" ATTR_USERNAME =", "Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\")", "\"text/plain\" CONTENT_TYPE_URL = \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\"", "ATTR_PORT = \"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX", "ATTR_SSD = \"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE", "\"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS = \"nameservers\"", "ATTR_VERSION = \"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN", "\"auto\" MANUAL = \"manual\" class AddonStartup(str, Enum): \"\"\"Startup types of", "= \"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME =", "\"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\"", "\"result\" RESULT_ERROR = \"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\"", "STABLE = \"stable\" BETA = \"beta\" DEV = \"dev\" class", "Enum): \"\"\"Represent current loading state.\"\"\" INITIALIZE = \"initialize\" SETUP =", "\"setup\" STARTUP = \"startup\" RUNNING = \"running\" FREEZE = \"freeze\"", "= \"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE =", "ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This needs to match the", "= \"config\" ATTR_CONFIGURATION = \"configuration\" ATTR_CONNECTED = \"connected\" ATTR_CONNECTIONS =", "Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\"", "= \"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX =", "\"hostname\" ATTR_ICON = \"icon\" ATTR_ID = \"id\" ATTR_IMAGE = \"image\"", "SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK", "ARCH_AARCH64 = \"aarch64\" ARCH_AMD64 = \"amd64\" ARCH_I386 = \"i386\" ARCH_ALL", "= \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE =", "SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE", "ATTR_SERIAL = \"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES", "ATTR_DNS = \"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION", "= \"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE =", "ATTR_METHOD = \"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME", "ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER", "FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS =", "\"startup\" ATTR_STATE = \"state\" ATTR_STATIC = \"static\" ATTR_STDIN = \"stdin\"", "= \"connected\" ATTR_CONNECTIONS = \"connections\" ATTR_CONTAINERS = \"containers\" ATTR_CPE =", "\"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE = \"stage\"", "= \"debug\" INFO = \"info\" WARNING = \"warning\" ERROR =", "= \"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO =", "ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK", "ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX", "a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX", "ATTR_PSK = \"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN", "= \"squash\" ATTR_SSD = \"ssid\" ATTR_SSID = \"ssid\" ATTR_SSL =", "ROLE_ADMIN = \"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]", "ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE", "= \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS =", "= \"shutdown\" STOPPING = \"stopping\" CLOSE = \"close\" class LogLevel(str,", "= Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA,", "= \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY =", "ATTR_PROVIDERS = \"providers\" ATTR_PSK = \"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME", "Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\")", "DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") #", "= \"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT = \"environment\" ATTR_EVENT =", "\"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\"", "\"registries\" ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY = \"repository\"", "ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI = \"webui\" ATTR_WIFI", "= \"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS = \"ip_address\" ATTR_IPV4 =", "\"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX = \"network_rx\"", "ATTR_FILENAME = \"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY", "= [\".yaml\", \".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\")", "= \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA =", "\"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN = \"vpn\"", "available! Support up to 5 containers with equal time #", "ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME = \"hostname\" ATTR_ICON = \"icon\" ATTR_ID", "ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD", "ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\" ATTR_INIT", "LABEL_TYPE = \"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON = \"addon\" META_HOMEASSISTANT", "DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This needs to match the dockerd", "\"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE = \"message\"", "= \"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO =", "\"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES = \"devices\" ATTR_DEVICETREE = \"devicetree\"", "\"want\" MAP_CONFIG = \"config\" MAP_SSL = \"ssl\" MAP_ADDONS = \"addons\"", "ATTR_CARD = \"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS", "ATTR_SERVERS = \"servers\" ATTR_SERVICE = \"service\" ATTR_SERVICES = \"services\" ATTR_SESSION", "AUTO = \"auto\" MANUAL = \"manual\" class AddonStartup(str, Enum): \"\"\"Startup", "= \"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME = \"volume\" ATTR_VPN =", "ATTR_TIMEZONE = \"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS = \"tmpfs\" ATTR_TOTP", "\"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE = \"enable\"", "ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE = \"available\" ATTR_BLK_READ", "= \"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL =", "= \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE =", "= \"repository\" ATTR_SCHEMA = \"schema\" ATTR_SECURITY = \"security\" ATTR_SERIAL =", "ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN", "= \"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT =", "= \"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT =", "= \"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON =", "= \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT =", "ARCH_AMD64 = \"amd64\" ARCH_I386 = \"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7,", "= \"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT =", "= \"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE =", "= \"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP =", "ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH", "= Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, \"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA,", "ATTR_SESSION = \"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE = \"size\" ATTR_SLUG", "\"memory_usage\" ATTR_MESSAGE = \"message\" ATTR_METHOD = \"method\" ATTR_MODE = \"mode\"", "ip_network(\"172.30.33.0/24\") # This needs to match the dockerd --cpu-rt-runtime= argument.", "ATTR_TYPE = \"type\" ATTR_UART = \"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY", "ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY = \"gateway\" ATTR_GPIO", "\"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT = \"default\"", "multiplied by CPU count. This means that # a single", "= \"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY =", "= \"priority\" ATTR_PRIVILEGED = \"privileged\" ATTR_PROTECTED = \"protected\" ATTR_PROVIDERS =", "\"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS = \"chassis\"", "\"amd64\" ARCH_I386 = \"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64,", "META_ADDON = \"addon\" META_HOMEASSISTANT = \"homeassistant\" META_SUPERVISOR = \"supervisor\" JSON_DATA", "\"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME = \"filename\" ATTR_FLAGS = \"flags\"", "\"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\"", "= \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST =", "\"audio_input\" ATTR_AUDIO_OUTPUT = \"audio_output\" ATTR_AUTH = \"auth\" ATTR_AUTH_API = \"auth_api\"", "= \"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY =", "= \"deployment\" ATTR_DESCRIPTON = \"description\" ATTR_DETACHED = \"detached\" ATTR_DEVICES =", "= \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY =", "\"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS = \"host_dbus\"", "ATTR_UDEV = \"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED", "\"addons\" MAP_BACKUP = \"backup\" MAP_SHARE = \"share\" MAP_MEDIA = \"media\"", "ERROR = \"error\" CRITICAL = \"critical\" class HostFeature(str, Enum): \"\"\"Host", "ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE = \"type\" ATTR_UART", "= \"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION =", "= \"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN =", "\"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\"", "\"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON = \"addon\"", "ATTR_REGISTRY = \"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA", "\"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\"", "\"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT = \"default\"", "\"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME = \"volume\"", "= \"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY =", "ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API = \"homeassistant_api\" ATTR_HOST", "\"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\" ATTR_NETWORK_RX = \"network_rx\" ATTR_NETWORK_TX = \"network_tx\"", "FOLDER_MEDIA = \"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128", "JSON_MESSAGE = \"message\" JSON_RESULT = \"result\" RESULT_ERROR = \"error\" RESULT_OK", "single container can schedule up to 950/5*4 = 760ms in", "ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE", "SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT", "\"started\" STOPPED = \"stopped\" UNKNOWN = \"unknown\" ERROR = \"error\"", "dockerd --cpu-rt-runtime= argument. DOCKER_CPU_RUNTIME_TOTAL = 950_000 # The rt runtimes", "= \"need\" WANT_SERVICE = \"want\" MAP_CONFIG = \"config\" MAP_SSL =", "HOSTNAME = \"hostname\" NETWORK = \"network\" REBOOT = \"reboot\" SERVICES", "= \"addons_repositories\" ATTR_ADDRESS = \"address\" ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN =", "\"password\" ATTR_PORT = \"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\"", "= \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT =", "FOLDER_HOMEASSISTANT = \"homeassistant\" FOLDER_SHARE = \"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL", "ATTR_ICON = \"icon\" ATTR_ID = \"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES", "= \"want\" MAP_CONFIG = \"config\" MAP_SSL = \"ssl\" MAP_ADDONS =", "\"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES = \"images\" ATTR_INDEX = \"index\"", "HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS = \"hassos\" HOSTNAME = \"hostname\"", "ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC = \"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER", "SECURITY_DEFAULT = \"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT", "= \"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT =", "\"\"\"Boot mode for the add-on.\"\"\" AUTO = \"auto\" MANUAL =", "\"supervisor_internet\" ATTR_SUPPORTED = \"supported\" ATTR_SUPPORTED_ARCH = \"supported_arch\" ATTR_SYSTEM = \"system\"", "JSON_RESULT = \"result\" RESULT_ERROR = \"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY", "\"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED = \"deprecated\" class AddonState(str, Enum):", "ATTR_CRYPTO = \"crypto\" ATTR_DATA = \"data\" ATTR_DATE = \"date\" ATTR_DEBUG", "SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT", "current loading state.\"\"\" INITIALIZE = \"initialize\" SETUP = \"setup\" STARTUP", "= \"hassos\" ATTR_HEALTHY = \"healthy\" ATTR_HOMEASSISTANT = \"homeassistant\" ATTR_HOMEASSISTANT_API =", "of system.\"\"\" DEBUG = \"debug\" INFO = \"info\" WARNING =", "\"network_rx\" ATTR_NETWORK_TX = \"network_tx\" ATTR_OBSERVER = \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\"", "ATTR_BOARD = \"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD", "= \"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR =", "ATTR_INDEX = \"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\" ATTR_INGRESS_PANEL", "ATTR_UUID = \"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE = \"value\" ATTR_VERSION", "\"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\"", "= \"dev\" class CoreState(str, Enum): \"\"\"Represent current loading state.\"\"\" INITIALIZE", "URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\")", "ATTR_WEBUI = \"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY", "\"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK = \"network\" ATTR_NETWORK_DESCRIPTION = \"network_description\"", "Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\")", "\"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS = \"full_access\" ATTR_GATEWAY = \"gateway\"", "= \"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON =", "ATTR_JOURNALD = \"journald\" ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE", "\"critical\" class HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS = \"hassos\" HOSTNAME", "= \"application/x-www-form-urlencoded\" COOKIE_INGRESS = \"ingress_session\" HEADER_TOKEN = \"X-Supervisor-Token\" HEADER_TOKEN_OLD =", "CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL", "ATTR_CONTAINERS = \"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO", "ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA = \"data\" ATTR_DATE", "= Path(SUPERVISOR_DATA, \"discovery.json\") FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, \"docker.json\") FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA,", "ARCH_ARMHF = \"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 = \"aarch64\" ARCH_AMD64", "priority # on a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL", "[\".yaml\", \".yml\", \".json\"] MACHINE_ID = Path(\"/etc/machine-id\") SOCKET_DBUS = Path(\"/run/dbus/system_bus_socket\") SOCKET_DOCKER", "= \"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS =", "= \"vpn\" ATTR_WAIT_BOOT = \"wait_boot\" ATTR_WATCHDOG = \"watchdog\" ATTR_WEBUI =", "DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5) DNS_SUFFIX = \"local.hass.io\" LABEL_ARCH =", "\"webui\" ATTR_WIFI = \"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\"", "= Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK =", "\"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO = \"crypto\"", "\"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS = \"dns\"", "ATTR_TIMEOUT = \"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS", "ATTR_VALUE = \"value\" ATTR_VERSION = \"version\" ATTR_VERSION_LATEST = \"version_latest\" ATTR_VIDEO", "= \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\" CONTENT_TYPE_PNG = \"image/png\" CONTENT_TYPE_TAR =", "= Path(SUPERVISOR_DATA, \"updater.json\") FILE_SUFFIX_CONFIGURATION = [\".yaml\", \".yml\", \".json\"] MACHINE_ID =", "This means that # a single container can schedule up", "760ms in RT priority # on a quad core system.", "= \"password\" ATTR_PORT = \"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION =", "ATTR_IPV6 = \"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES", "= \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE =", "\"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE = \"type\"", "ATTR_ADDRESS_DATA = \"address-data\" ATTR_ADMIN = \"admin\" ATTR_ADVANCED = \"advanced\" ATTR_APPARMOR", "AddonState(str, Enum): \"\"\"State of add-on.\"\"\" STARTED = \"started\" STOPPED =", "\"ota\" ATTR_OUTPUT = \"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\"", "= \"provide\" NEED_SERVICE = \"need\" WANT_SERVICE = \"want\" MAP_CONFIG =", "ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE", "\"backup\" ROLE_MANAGER = \"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL = [ROLE_DEFAULT,", "ROLE_MANAGER, ROLE_ADMIN] class AddonBoot(str, Enum): \"\"\"Boot mode for the add-on.\"\"\"", "\"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY = \"update_key\"", "cannot allocate more # time than available! Support up to", "ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\" REQUEST_FROM", "\"running\" FREEZE = \"freeze\" SHUTDOWN = \"shutdown\" STOPPING = \"stopping\"", "ATTR_STATE = \"state\" ATTR_STATIC = \"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE", "\"\"\"State of add-on.\"\"\" STARTED = \"started\" STOPPED = \"stopped\" UNKNOWN", "ATTR_LOCATON = \"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION", "FOLDER_ADDONS = \"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL", "\"uuid\" ATTR_VALID = \"valid\" ATTR_VALUE = \"value\" ATTR_VERSION = \"version\"", "\"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE = \"size\" ATTR_SLUG = \"slug\"", "\"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\"", "= \"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT =", "ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT = \"homeassistant\"", "\"hassos\" HOSTNAME = \"hostname\" NETWORK = \"network\" REBOOT = \"reboot\"", "= \"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS =", "state.\"\"\" INITIALIZE = \"initialize\" SETUP = \"setup\" STARTUP = \"startup\"", "\"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION = \"configuration\"", "= \"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED =", "= \"result\" RESULT_ERROR = \"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY =", "= \"multicast\" ATTR_NAME = \"name\" ATTR_NAMESERVERS = \"nameservers\" ATTR_NETWORK =", "\"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\" ATTR_UPDATE_KEY = \"update_key\" ATTR_URL = \"url\"", "= \"media\" ARCH_ARMHF = \"armhf\" ARCH_ARMV7 = \"armv7\" ARCH_AARCH64 =", "ATTR_SIZE = \"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\" ATTR_SNAPSHOTS", "\"once\" class AddonStage(str, Enum): \"\"\"Stage types of add-on.\"\"\" STABLE =", "= \"snapshot_exclude\" ATTR_SNAPSHOTS = \"snapshots\" ATTR_SOURCE = \"source\" ATTR_SQUASH =", "ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\"", "= \"io.hass.machine\" LABEL_TYPE = \"io.hass.type\" LABEL_VERSION = \"io.hass.version\" META_ADDON =", "\"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP = \"map\"", "Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS = \"https://github.com/home-assistant/addons\" URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\"", "ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH", "NEED_SERVICE = \"need\" WANT_SERVICE = \"want\" MAP_CONFIG = \"config\" MAP_SSL", "\"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE = \"type\" ATTR_UART = \"uart\"", "\"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES = \"interfaces\"", "= \"timeout\" ATTR_TIMEZONE = \"timezone\" ATTR_TITLE = \"title\" ATTR_TMPFS =", "ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX = \"prefix\" ATTR_PRIMARY", "\"wifi\" ATTR_CONTENT_TRUST = \"content_trust\" ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE = \"provide\"", "Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, \"discovery.json\")", "ATTR_DOMAINS = \"domains\" ATTR_ENABLE = \"enable\" ATTR_ENABLED = \"enabled\" ATTR_ENVIRONMENT", "\"environment\" ATTR_EVENT = \"event\" ATTR_FEATURES = \"features\" ATTR_FILENAME = \"filename\"", "\"filename\" ATTR_FLAGS = \"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY = \"frequency\"", "\"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN = \"ingress_token\" ATTR_INGRESS_URL = \"ingress_url\"", "\"unhealthy\" ATTR_UNSAVED = \"unsaved\" ATTR_UNSUPPORTED = \"unsupported\" ATTR_UPDATE_AVAILABLE = \"update_available\"", "AddonStartup(str, Enum): \"\"\"Startup types of Add-on.\"\"\" INITIALIZE = \"initialize\" SYSTEM", "ONCE = \"once\" class AddonStage(str, Enum): \"\"\"Stage types of add-on.\"\"\"", "URL_HASSIO_APPARMOR = \"https://version.home-assistant.io/apparmor.txt\" URL_HASSIO_VERSION = \"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS", "ATTR_SECURITY = \"security\" ATTR_SERIAL = \"serial\" ATTR_SERVERS = \"servers\" ATTR_SERVICE", "= \"version_latest\" ATTR_VIDEO = \"video\" ATTR_VLAN = \"vlan\" ATTR_VOLUME =", "class AddonBoot(str, Enum): \"\"\"Boot mode for the add-on.\"\"\" AUTO =", "are guarantees, hence we cannot allocate more # time than", "ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED = \"disk_used\" ATTR_DNS", "= \"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE", "\"freeze\" SHUTDOWN = \"shutdown\" STOPPING = \"stopping\" CLOSE = \"close\"", "= \"flags\" ATTR_FOLDERS = \"folders\" ATTR_FREQUENCY = \"frequency\" ATTR_FULL_ACCESS =", "\"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\"", "= \"amd64\" ARCH_I386 = \"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64,", "count. This means that # a single container can schedule", "FOLDER_SSL = \"ssl\" FOLDER_MEDIA = \"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL", "\"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT = \"output\"", "ATTR_MAC = \"mac\" ATTR_MACHINE = \"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP", "STABLE = \"stable\" EXPERIMENTAL = \"experimental\" DEPRECATED = \"deprecated\" class", "WANT_SERVICE = \"want\" MAP_CONFIG = \"config\" MAP_SSL = \"ssl\" MAP_ADDONS", "= \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY =", "more # time than available! Support up to 5 containers", "ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME", "ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY = \"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE", "\"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST = \"multicast\" ATTR_NAME = \"name\"", "\"services\" ATTR_SESSION = \"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE = \"size\"", "SECURITY_DISABLE = \"disable\" ROLE_DEFAULT = \"default\" ROLE_HOMEASSISTANT = \"homeassistant\" ROLE_BACKUP", "\"ingress_url\" ATTR_INIT = \"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT = \"input\"", "= \"homeassistant_api\" ATTR_HOST = \"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET =", "= \"devices\" ATTR_DEVICETREE = \"devicetree\" ATTR_DIAGNOSTICS = \"diagnostics\" ATTR_DISCOVERY =", "= \"machine\" ATTR_MAINTAINER = \"maintainer\" ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT =", "= \"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA =", "ATTR_APPARMOR = \"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH = \"arch\" ATTR_ARGS", "= \"data\" ATTR_DATE = \"date\" ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK =", "ATTR_DEBUG = \"debug\" ATTR_DEBUG_BLOCK = \"debug_block\" ATTR_DEFAULT = \"default\" ATTR_DEPLOYMENT", "= \"psk\" ATTR_RATING = \"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN =", "ATTR_SSID = \"ssid\" ATTR_SSL = \"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP", "DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\") # This needs to", "ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE = \"memory_usage\" ATTR_MESSAGE", "= \"tmpfs\" ATTR_TOTP = \"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE =", "ATTR_FORCE_SECURITY = \"force_security\" PROVIDE_SERVICE = \"provide\" NEED_SERVICE = \"need\" WANT_SERVICE", "MAP_BACKUP = \"backup\" MAP_SHARE = \"share\" MAP_MEDIA = \"media\" ARCH_ARMHF", "= \"output\" ATTR_PANEL_ADMIN = \"panel_admin\" ATTR_PANEL_ICON = \"panel_icon\" ATTR_PANEL_TITLE =", "SOCKET_DOCKER = Path(\"/run/docker.sock\") RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE", "= \"ipv6\" ATTR_ISSUES = \"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES =", "= \"panel_title\" ATTR_PANELS = \"panels\" ATTR_PARENT = \"parent\" ATTR_PASSWORD =", "\"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID = \"host_pid\" ATTR_HOSTNAME = \"hostname\"", "\"observer\" ATTR_OPERATING_SYSTEM = \"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA = \"ota\"", "= \"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY = \"unhealthy\" ATTR_UNSAVED =", "= \"config\" MAP_SSL = \"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP =", "= \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE =", "= \"totp\" ATTR_TRANSLATIONS = \"translations\" ATTR_TYPE = \"type\" ATTR_UART =", "CoreState(str, Enum): \"\"\"Represent current loading state.\"\"\" INITIALIZE = \"initialize\" SETUP", "= \"image/png\" CONTENT_TYPE_TAR = \"application/tar\" CONTENT_TYPE_TEXT = \"text/plain\" CONTENT_TYPE_URL =", "STOPPING = \"stopping\" CLOSE = \"close\" class LogLevel(str, Enum): \"\"\"Logging", "\"dns\" ATTR_DOCKER = \"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION = \"documentation\"", "= \"icon\" ATTR_ID = \"id\" ATTR_IMAGE = \"image\" ATTR_IMAGES =", "\"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP = \"backup\" MAP_SHARE = \"share\"", "\"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY = \"healthy\"", "\"HASSIO_TOKEN\" ENV_HOMEASSISTANT_REPOSITORY = \"HOMEASSISTANT_REPOSITORY\" ENV_SUPERVISOR_DEV = \"SUPERVISOR_DEV\" ENV_SUPERVISOR_MACHINE = \"SUPERVISOR_MACHINE\"", "= \"startup\" ATTR_STATE = \"state\" ATTR_STATIC = \"static\" ATTR_STDIN =", "\"type\" ATTR_UART = \"uart\" ATTR_UDEV = \"udev\" ATTR_UNHEALTHY = \"unhealthy\"", "= \"board\" ATTR_BOOT = \"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD =", "ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME = \"disk_life_time\" ATTR_DISK_TOTAL = \"disk_total\" ATTR_DISK_USED", "containers with equal time # allocated. # Note that the", "\"init\" ATTR_INITIALIZE = \"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED = \"installed\"", "= \"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES =", "= \"message\" ATTR_METHOD = \"method\" ATTR_MODE = \"mode\" ATTR_MULTICAST =", "ATTR_STATIC = \"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE = \"storage\" ATTR_SUGGESTIONS", "= \"kernel_modules\" ATTR_LAST_BOOT = \"last_boot\" ATTR_LEGACY = \"legacy\" ATTR_LOCALS =", "ATTR_SUGGESTIONS = \"suggestions\" ATTR_SUPERVISOR = \"supervisor\" ATTR_SUPERVISOR_INTERNET = \"supervisor_internet\" ATTR_SUPPORTED", "MAP_CONFIG = \"config\" MAP_SSL = \"ssl\" MAP_ADDONS = \"addons\" MAP_BACKUP", "ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL = \"local\"", "\"blk_read\" ATTR_BLK_WRITE = \"blk_write\" ATTR_BOARD = \"board\" ATTR_BOOT = \"boot\"", "= \"checks\" ATTR_CLI = \"cli\" ATTR_CONFIG = \"config\" ATTR_CONFIGURATION =", "\"SUPERVISOR_MACHINE\" ENV_SUPERVISOR_NAME = \"SUPERVISOR_NAME\" ENV_SUPERVISOR_SHARE = \"SUPERVISOR_SHARE\" ENV_SUPERVISOR_CPU_RT = \"SUPERVISOR_CPU_RT\"", "\"gateway\" ATTR_GPIO = \"gpio\" ATTR_HASSIO_API = \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\"", "\"manager\" ROLE_ADMIN = \"admin\" ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER,", "= \"registry\" ATTR_REPOSITORIES = \"repositories\" ATTR_REPOSITORY = \"repository\" ATTR_SCHEMA =", "CRITICAL = \"critical\" class HostFeature(str, Enum): \"\"\"Host feature.\"\"\" HASSOS =", "in RT priority # on a quad core system. DOCKER_CPU_RUNTIME_ALLOCATION", "= \"state\" ATTR_STATIC = \"static\" ATTR_STDIN = \"stdin\" ATTR_STORAGE =", "= \"session\" ATTR_SIGNAL = \"signal\" ATTR_SIZE = \"size\" ATTR_SLUG =", "ATTR_SSL = \"ssl\" ATTR_STAGE = \"stage\" ATTR_STARTUP = \"startup\" ATTR_STATE", "= \"accesspoints\" ATTR_ACTIVE = \"active\" ATTR_ADDON = \"addon\" ATTR_ADDONS =", "= \"card\" ATTR_CHANGELOG = \"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS =", "= \"crypto\" ATTR_DATA = \"data\" ATTR_DATE = \"date\" ATTR_DEBUG =", "\"images\" ATTR_INDEX = \"index\" ATTR_INGRESS = \"ingress\" ATTR_INGRESS_ENTRY = \"ingress_entry\"", "ERROR = \"error\" class UpdateChannel(str, Enum): \"\"\"Core supported update channels.\"\"\"", "\"X-Hassio-Key\" ENV_TIME = \"TZ\" ENV_TOKEN = \"SUPERVISOR_TOKEN\" ENV_TOKEN_HASSIO = \"HASSIO_TOKEN\"", "ip_network from pathlib import Path SUPERVISOR_VERSION = \"DEV\" URL_HASSIO_ADDONS =", "= \"containers\" ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO =", "= \"installed\" ATTR_INTERFACE = \"interface\" ATTR_INTERFACES = \"interfaces\" ATTR_IP_ADDRESS =", "\"message\" JSON_RESULT = \"result\" RESULT_ERROR = \"error\" RESULT_OK = \"ok\"", "\"homeassistant.json\") FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, \"ingress.json\") FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, \"services.json\") FILE_HASSIO_UPDATER", "time is multiplied by CPU count. This means that #", "\"provide\" NEED_SERVICE = \"need\" WANT_SERVICE = \"want\" MAP_CONFIG = \"config\"", "ATTR_PREFIX = \"prefix\" ATTR_PRIMARY = \"primary\" ATTR_PRIORITY = \"priority\" ATTR_PRIVILEGED", "= \"docker\" ATTR_DOCKER_API = \"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS =", "950/5*4 = 760ms in RT priority # on a quad", "\"signal\" ATTR_SIZE = \"size\" ATTR_SLUG = \"slug\" ATTR_SNAPSHOT_EXCLUDE = \"snapshot_exclude\"", "ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE = \"core\" REPOSITORY_LOCAL = \"local\" FOLDER_HOMEASSISTANT", "\"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 = \"aes128\"", "\"error\" RESULT_OK = \"ok\" CONTENT_TYPE_BINARY = \"application/octet-stream\" CONTENT_TYPE_JSON = \"application/json\"", "\"https://version.home-assistant.io/{channel}.json\" SUPERVISOR_DATA = Path(\"/data\") FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, \"addons.json\") FILE_HASSIO_AUTH =", "\"host_internet\" ATTR_HOST_IPC = \"host_ipc\" ATTR_HOST_NETWORK = \"host_network\" ATTR_HOST_PID = \"host_pid\"", "= \"ingress_entry\" ATTR_INGRESS_PANEL = \"ingress_panel\" ATTR_INGRESS_PORT = \"ingress_port\" ATTR_INGRESS_TOKEN =", "ATTR_INITIALIZE = \"initialize\" ATTR_INPUT = \"input\" ATTR_INSTALLED = \"installed\" ATTR_INTERFACE", "\"i386\" ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386] REPOSITORY_CORE =", "CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE", "is multiplied by CPU count. This means that # a", "= \"media\" SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 =", "ATTR_CPE = \"cpe\" ATTR_CPU_PERCENT = \"cpu_percent\" ATTR_CRYPTO = \"crypto\" ATTR_DATA", "SECURITY_PROFILE = \"profile\" SECURITY_DEFAULT = \"default\" SECURITY_DISABLE = \"disable\" ROLE_DEFAULT", "Path(\"/run/log/journal\") DOCKER_NETWORK = \"hassio\" DOCKER_NETWORK_MASK = ip_network(\"172.30.32.0/23\") DOCKER_NETWORK_RANGE = ip_network(\"172.30.33.0/24\")", "= \"arch\" ATTR_ARGS = \"args\" ATTR_LABELS = \"labels\" ATTR_AUDIO =", "= \"issues\" ATTR_KERNEL = \"kernel\" ATTR_KERNEL_MODULES = \"kernel_modules\" ATTR_LAST_BOOT =", "FOLDER_SHARE = \"share\" FOLDER_ADDONS = \"addons/local\" FOLDER_SSL = \"ssl\" FOLDER_MEDIA", "\"manual\" class AddonStartup(str, Enum): \"\"\"Startup types of Add-on.\"\"\" INITIALIZE =", "\"network\" REBOOT = \"reboot\" SERVICES = \"services\" SHUTDOWN = \"shutdown\"", "ATTR_LEGACY = \"legacy\" ATTR_LOCALS = \"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING", "ATTR_AUTH = \"auth\" ATTR_AUTH_API = \"auth_api\" ATTR_AUTO_UPDATE = \"auto_update\" ATTR_AVAILABLE", "REQUEST_FROM = \"HASSIO_FROM\" ATTR_ACCESS_TOKEN = \"access_token\" ATTR_ACCESSPOINTS = \"accesspoints\" ATTR_ACTIVE", "ATTR_DISCOVERY = \"discovery\" ATTR_DISK = \"disk\" ATTR_DISK_FREE = \"disk_free\" ATTR_DISK_LIFE_TIME", "\"addons.json\") FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, \"auth.json\") FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, \"config.json\") FILE_HASSIO_DISCOVERY", "= \"changelog\" ATTR_CHANNEL = \"channel\" ATTR_CHASSIS = \"chassis\" ATTR_CHECKS =", "ATTR_LOGGING = \"logging\" ATTR_LOGO = \"logo\" ATTR_LONG_DESCRIPTION = \"long_description\" ATTR_MAC", "ATTR_HOST = \"host\" ATTR_HOST_DBUS = \"host_dbus\" ATTR_HOST_INTERNET = \"host_internet\" ATTR_HOST_IPC", "ATTR_LOCALS = \"locals\" ATTR_LOCATON = \"location\" ATTR_LOGGING = \"logging\" ATTR_LOGO", "= \"supervisor\" JSON_DATA = \"data\" JSON_MESSAGE = \"message\" JSON_RESULT =", "= \"operating_system\" ATTR_OPTIONS = \"options\" ATTR_OTA = \"ota\" ATTR_OUTPUT =", "\"port\" ATTR_PORTS = \"ports\" ATTR_PORTS_DESCRIPTION = \"ports_description\" ATTR_PREFIX = \"prefix\"", "\"rating\" ATTR_REALTIME = \"realtime\" ATTR_REFRESH_TOKEN = \"refresh_token\" ATTR_REGISTRIES = \"registries\"", "RUN_SUPERVISOR_STATE = Path(\"/run/supervisor\") SYSTEMD_JOURNAL_PERSISTENT = Path(\"/var/log/journal\") SYSTEMD_JOURNAL_VOLATILE = Path(\"/run/log/journal\") DOCKER_NETWORK", "= 950_000 # The rt runtimes are guarantees, hence we", "= \"initialize\" SETUP = \"setup\" STARTUP = \"startup\" RUNNING =", "= \"info\" WARNING = \"warning\" ERROR = \"error\" CRITICAL =", "= \"apparmor\" ATTR_APPLICATION = \"application\" ATTR_ARCH = \"arch\" ATTR_ARGS =", "= \"hassio_api\" ATTR_HASSIO_ROLE = \"hassio_role\" ATTR_HASSOS = \"hassos\" ATTR_HEALTHY =", "SETUP = \"setup\" STARTUP = \"startup\" RUNNING = \"running\" FREEZE", "= \"boot\" ATTR_BRANCH = \"branch\" ATTR_BUILD = \"build\" ATTR_BUILD_FROM =", "\"addons\" ATTR_ADDONS_CUSTOM_LIST = \"addons_custom_list\" ATTR_ADDONS_REPOSITORIES = \"addons_repositories\" ATTR_ADDRESS = \"address\"", "ATTR_MAP = \"map\" ATTR_MEMORY_LIMIT = \"memory_limit\" ATTR_MEMORY_PERCENT = \"memory_percent\" ATTR_MEMORY_USAGE", "SNAPSHOT_FULL = \"full\" SNAPSHOT_PARTIAL = \"partial\" CRYPTO_AES128 = \"aes128\" SECURITY_PROFILE", "= \"docker_api\" ATTR_DOCUMENTATION = \"documentation\" ATTR_DOMAINS = \"domains\" ATTR_ENABLE =" ]
[ "self.icon = '.' self.color = choice(self.colors) self.world.add_agent(self) def die(self): \"\"\"", "as oneself. \"\"\" return [agent for agent in self.world.grid[self.coords]['agents'] if", "x in range(self.coords[0] - radius, self.coords[0] + radius + 1)", "coords != self.coords: xdif = coords[0] - self.coords[0] ydif =", "[agent for agent in self.world.grid[self.coords]['agents'] if agent is not self]", "all layers are returned. \"\"\" if layer is not None:", "and respective attributes within a distance of the agent. \"\"\"", "sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for", "the distance (in cells) from the agent to a pair", "inspect.signature(self.__init__) filter_keys = [param.name for param in sig.parameters.values() if param.kind", "a number of degrees to the left. \"\"\" self.direction =", "the same parameters as oneself. \"\"\" sig = inspect.signature(self.__init__) filter_keys", "def face_towards(self, coords: Tuple): \"\"\" Turns the agent's direction towards", "+ 1) if (self.get_distance((x, y)) <= radius and not self.world.grid[self.world.to_torus((x,", "n_steps: int = 1): \"\"\" Moves the agent one cell", "+ angle) % 360) def random_walk(self, n_steps: int = 1):", "<= radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood =", "(self.get_distance((x, y)) <= radius and (x, y) in self.world.grid)} return", "all agents located on the same cell as oneself. \"\"\"", "angle: int = 90): \"\"\" Rotates the agent's direction a", "radius, self.coords[1] + radius + 1) if (self.get_distance((x, y)) <=", "the agent at each step of the simulation. \"\"\" raise", "cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all cells and", "no agents on them) and respective attributes within a distance", "list is provided, all agents are evaluated. \"\"\" if agents", "[self.world.agents[_id] for _id in self.world.agents] dists = {agent: self.get_distance(agent.coords) for", "coordinates. \"\"\" if coords != self.coords: xdif = coords[0] -", "the agent is. If no layer is specified, the values", "Union class Agent(metaclass=ABCMeta): \"\"\" Class to represent an agent in", "+ angle) % 360) def forward(self, n_steps: int = 1):", "def forward(self, n_steps: int = 1): \"\"\" Moves the agent", "values of all layers are returned. \"\"\" if layer is", "for coords in neighborhood for agent in self.world.grid[coords]['agents'] if agent", "- self.coords[1]))) def cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns", "coords[1] - self.coords[1] dist = hypot(xdif, ydif) angle = degrees(asin(ydif", "filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords: Tuple): \"\"\" Places the", "the left. \"\"\" self.direction = round((self.direction + angle) % 360)", "_id = 0 colors = ['blue', 'brown', 'cyan', 'gray', 'green',", "radius + 1) if (self.get_distance((x, y)) <= radius and (x,", "agent to a pair of coordinates. \"\"\" x, y =", "\"\"\" Moves the agent one cell forward in a random", "a random direction for a number of times. \"\"\" for", "model. \"\"\" _id = 0 colors = ['blue', 'brown', 'cyan',", "def turn_left(self, angle: int = 90): \"\"\" Rotates the agent's", "def hatch(self): \"\"\" Creates an agent and initializes it with", "direction a number of degrees to the right. \"\"\" self.direction", "degrees to the right. \"\"\" self.direction = round((self.direction - angle)", "ydif = coords[1] - self.coords[1] dist = hypot(xdif, ydif) angle", "= [param.name for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD]", "angle = degrees(asin(ydif / dist)) if xdif < 0: self.direction", "Tuple) -> int: \"\"\" Returns the distance (in cells) from", "neighbors = [agent for coords in neighborhood for agent in", "= round((self.direction - angle) % 360) def turn_left(self, angle: int", "number of times. \"\"\" for i in range(n_steps): self.turn_right(randint(0, 360))", "for filter_key in filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords: Tuple):", "If no list is provided, all agents are evaluated. \"\"\"", "(x, y) in self.world.grid and not self.world.grid[(x, y)]['agents'])} return neighborhood", "grid. \"\"\" self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def cell_here(self, layer", "else: self.direction = round((360 + angle) % 360) def random_walk(self,", "in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self): \"\"\" Methods", "self.world.grid[self.coords]['agents'] if agent is not self] def nearest_agent(self, agents: List", "360) def random_walk(self, n_steps: int = 1): \"\"\" Moves the", "get_distance(self, coords: Tuple) -> int: \"\"\" Returns the distance (in", "self.world.agents] dists = {agent: self.get_distance(agent.coords) for agent in agents if", "- 1), randint(0, self.world.height - 1)) self.direction = 90 self.breed", "= self.cells_in_radius(radius) neighbors = [agent for coords in neighborhood for", "key=dists.get) def agents_in_radius(self, radius: int): \"\"\" Returns all agents within", "world, coords: Tuple = None): self._id = Agent._id Agent._id +=", "= round((360 + angle) % 360) def random_walk(self, n_steps: int", "= hypot(xdif, ydif) angle = degrees(asin(ydif / dist)) if xdif", "% 360) def random_walk(self, n_steps: int = 1): \"\"\" Moves", "agent is not self} return min(dists, key=dists.get) def turn_right(self, angle:", "randint(0, self.world.height - 1)) self.direction = 90 self.breed = self.__class__.__name__.lower()", "= choice(self.colors) self.world.add_agent(self) def die(self): \"\"\" Remove the agent from", "(x, y) in self.world.grid: self.move_to((x, y)) def face_towards(self, coords: Tuple):", "the agent. \"\"\" if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x,", "self.world.width - 1), randint(0, self.world.height - 1)) self.direction = 90", "to represent an agent in an agent-based model. \"\"\" _id", "\"\"\" Given a list or dictionary of cells, returns the", "angle) % 360) def random_walk(self, n_steps: int = 1): \"\"\"", "else: neighborhood = {(x, y): self.world.grid[(x, y)] for x in", "dictionary of cells, returns the coordinates of the cell that", "asin, cos, radians, degrees from abc import ABCMeta, abstractmethod from", "range(self.coords[1] - radius, self.coords[1] + radius + 1) if self.get_distance((x,", "= [agent for coords in neighborhood for agent in self.world.grid[coords]['agents']", "a layer in the model's grid for the cell where", "neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0]", "x, y = coords return round(hypot((x - self.coords[0]), (y -", "agents_in_radius(self, radius: int): \"\"\" Returns all agents within a distance", "\"\"\" if layer is not None: return self.world.grid[self.coords][layer] else: return", "+ cos(radians(self.direction)) * n_steps) y = round(self.coords[1] + sin(radians(self.direction)) *", "coordinates. \"\"\" x, y = coords return round(hypot((x - self.coords[0]),", "1) for y in range(self.coords[1] - radius, self.coords[1] + radius", "not None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self, coords:", "random import randint, choice from typing import Dict, List, Tuple,", "of coordinates. \"\"\" x, y = coords return round(hypot((x -", "that is nearest to the agent. \"\"\" dists = {cell:", "xdif < 0: self.direction = round(180 - angle) else: self.direction", "to the left. \"\"\" self.direction = round((self.direction + angle) %", "not self] return neighbors def agents_here(self) -> List: \"\"\" Returns", "def agents_in_radius(self, radius: int): \"\"\" Returns all agents within a", "Turns the agent's direction towards a given pair of coordinates.", "min(dists, key=dists.get) def agents_in_radius(self, radius: int): \"\"\" Returns all agents", "n_steps) y = round(self.coords[1] + sin(radians(self.direction)) * n_steps) if self.world.torus:", "= round(180 - angle) else: self.direction = round((360 + angle)", "in range(self.coords[1] - radius, self.coords[1] + radius + 1) if", "that is nearest to oneself. If no list is provided,", "no layer is specified, the values of all layers are", "for a number of times. \"\"\" for i in range(n_steps):", "- self.coords[1] dist = hypot(xdif, ydif) angle = degrees(asin(ydif /", "- radius, self.coords[1] + radius + 1) if (self.get_distance((x, y))", "if (self.get_distance((x, y)) <= radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])}", "a pair of coordinates. \"\"\" x, y = coords return", "\"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): \"\"\" Creates", "Moves the agent one cell forward in a random direction", "self.coords[1] dist = hypot(xdif, ydif) angle = degrees(asin(ydif / dist))", "int) -> Dict: \"\"\" Returns all empty cells (with no", "is nearest to oneself. If no list is provided, all", "move_to(self, coords: Tuple): \"\"\" Places the agent in a different", "self.world.grid[(x, y)] for x in range(self.coords[0] - radius, self.coords[0] +", "n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y) in self.world.grid:", "\"\"\" sig = inspect.signature(self.__init__) filter_keys = [param.name for param in", "cell forward in a random direction for a number of", "neighbors def agents_here(self) -> List: \"\"\" Returns all agents located", "for cell in cells} return min(dists, key=dists.get) def agents_in_radius(self, radius:", "in agents if agent is not self} return min(dists, key=dists.get)", "in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key]", "model's grid for the cell where the agent is. If", "in self.world.grid: self.move_to((x, y)) def face_towards(self, coords: Tuple): \"\"\" Turns", "* n_steps) y = round(self.coords[1] + sin(radians(self.direction)) * n_steps) if", "- 1)) self.direction = 90 self.breed = self.__class__.__name__.lower() self.icon =", "\"\"\" if coords != self.coords: xdif = coords[0] - self.coords[0]", "layers are returned. \"\"\" if layer is not None: return", "agent in self.world.grid[self.coords]['agents'] if agent is not self] def nearest_agent(self,", "all agents are evaluated. \"\"\" if agents is None: agents", "Returns all agents within a distance of oneself. \"\"\" neighborhood", "currently facing. \"\"\" x = round(self.coords[0] + cos(radians(self.direction)) * n_steps)", "round(self.coords[0] + cos(radians(self.direction)) * n_steps) y = round(self.coords[1] + sin(radians(self.direction))", "(with no agents on them) and respective attributes within a", "cells, returns the coordinates of the cell that is nearest", "radius: int) -> Dict: \"\"\" Returns all empty cells (with", "agents: List = None): \"\"\" Given a list of agents,", "from typing import Dict, List, Tuple, Union class Agent(metaclass=ABCMeta): \"\"\"", "0: self.direction = round(180 - angle) else: self.direction = round((360", "= Agent._id Agent._id += 1 self.world = world self.coords =", "different cell of the world grid. \"\"\" self.world.remove_from_grid(self) self.coords =", "in self.world.grid[self.coords]['agents'] if agent is not self] def nearest_agent(self, agents:", "self.direction = round(180 - angle) else: self.direction = round((360 +", "self def hatch(self): \"\"\" Creates an agent and initializes it", "self.get_distance((x, y)) <= radius} else: neighborhood = {(x, y): self.world.grid[(x,", "= 90 self.breed = self.__class__.__name__.lower() self.icon = '.' self.color =", "if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y) in self.world.grid: self.move_to((x,", "nearest_agent(self, agents: List = None): \"\"\" Given a list of", "cell in cells} return min(dists, key=dists.get) def agents_in_radius(self, radius: int):", "return neighbors def agents_here(self) -> List: \"\"\" Returns all agents", "agent's direction a number of degrees to the right. \"\"\"", "coords self.world.place_on_grid(self) def cell_here(self, layer = None): \"\"\" Returns the", "agents located on the same cell as oneself. \"\"\" return", "= degrees(asin(ydif / dist)) if xdif < 0: self.direction =", "if coords != self.coords: xdif = coords[0] - self.coords[0] ydif", "x = round(self.coords[0] + cos(radians(self.direction)) * n_steps) y = round(self.coords[1]", "1) if (self.get_distance((x, y)) <= radius and (x, y) in", "of all layers are returned. \"\"\" if layer is not", "a number of degrees to the right. \"\"\" self.direction =", "(randint(0, self.world.width - 1), randint(0, self.world.height - 1)) self.direction =", "returned. \"\"\" if layer is not None: return self.world.grid[self.coords][layer] else:", "self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius: int) -> Dict: \"\"\"", "def nearest_cell(self, cells: Union[List, Dict]) -> Tuple: \"\"\" Given a", "= coords or (randint(0, self.world.width - 1), randint(0, self.world.height -", "return [agent for agent in self.world.grid[self.coords]['agents'] if agent is not", "self.get_distance(agent.coords) for agent in agents if agent is not self}", "on the same cell as oneself. \"\"\" return [agent for", "the value of a layer in the model's grid for", "abc import ABCMeta, abstractmethod from random import randint, choice from", "y))] for x in range(self.coords[0] - radius, self.coords[0] + radius", "self.direction = 90 self.breed = self.__class__.__name__.lower() self.icon = '.' self.color", "= None): \"\"\" Given a list of agents, returns the", "self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self, coords: Tuple) -> int:", "i in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self): \"\"\"", "of oneself. \"\"\" neighborhood = self.cells_in_radius(radius) neighbors = [agent for", "the agent one cell forward in a random direction for", "hypot(xdif, ydif) angle = degrees(asin(ydif / dist)) if xdif <", "right. \"\"\" self.direction = round((self.direction - angle) % 360) def", "for x in range(self.coords[0] - radius, self.coords[0] + radius +", "self.world.place_on_grid(self) def cell_here(self, layer = None): \"\"\" Returns the value", "self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in", "{agent: self.get_distance(agent.coords) for agent in agents if agent is not", "self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y) in self.world.grid: self.move_to((x, y))", "represent an agent in an agent-based model. \"\"\" _id =", "import ABCMeta, abstractmethod from random import randint, choice from typing", "coords: Tuple) -> int: \"\"\" Returns the distance (in cells)", "an agent-based model. \"\"\" _id = 0 colors = ['blue',", "return neighborhood def empty_cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns", "-> Dict: \"\"\" Returns all empty cells (with no agents", "for _id in self.world.agents] dists = {agent: self.get_distance(agent.coords) for agent", "= self.__class__.__name__.lower() self.icon = '.' self.color = choice(self.colors) self.world.add_agent(self) def", "\"\"\" Rotates the agent's direction a number of degrees to", "a number of cells forward in the direction it is", "inspect from math import hypot, sin, asin, cos, radians, degrees", "'pink', 'purple', 'red', 'yellow'] def __init__(self, world, coords: Tuple =", "- radius, self.coords[1] + radius + 1) if self.get_distance((x, y))", "cells: Union[List, Dict]) -> Tuple: \"\"\" Given a list or", "agents = [self.world.agents[_id] for _id in self.world.agents] dists = {agent:", "['agents'])} else: neighborhood = {(x, y): self.world.grid[(x, y)] for x", "radius + 1) if self.get_distance((x, y)) <= radius} else: neighborhood", "the agent from the world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del", "else: return self.world.grid[self.coords] def get_distance(self, coords: Tuple) -> int: \"\"\"", "left. \"\"\" self.direction = round((self.direction + angle) % 360) def", "<gh_stars>0 import inspect from math import hypot, sin, asin, cos,", "attributes within a distance of the agent. \"\"\" if self.world.torus:", "of agents, returns the agent that is nearest to oneself.", "if (self.get_distance((x, y)) <= radius and (x, y) in self.world.grid", "def move_to(self, coords: Tuple): \"\"\" Places the agent in a", "'.' self.color = choice(self.colors) self.world.add_agent(self) def die(self): \"\"\" Remove the", "radius, self.coords[1] + radius + 1) if self.get_distance((x, y)) <=", "1), randint(0, self.world.height - 1)) self.direction = 90 self.breed =", "self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self, radius: int) -> Dict:", "\"\"\" neighborhood = self.cells_in_radius(radius) neighbors = [agent for coords in", "is currently facing. \"\"\" x = round(self.coords[0] + cos(radians(self.direction)) *", "angle) % 360) def turn_left(self, angle: int = 90): \"\"\"", "dist)) if xdif < 0: self.direction = round(180 - angle)", "Dict: \"\"\" Returns all empty cells (with no agents on", "'green', 'magenta', 'orange', 'pink', 'purple', 'red', 'yellow'] def __init__(self, world,", "== param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key in filter_keys}", "for agent in self.world.grid[coords]['agents'] if agent is not self] return", "degrees from abc import ABCMeta, abstractmethod from random import randint,", "dists = {agent: self.get_distance(agent.coords) for agent in agents if agent", "\"\"\" Returns the distance (in cells) from the agent to", "in cells} return min(dists, key=dists.get) def agents_in_radius(self, radius: int): \"\"\"", "within a distance of oneself. \"\"\" neighborhood = self.cells_in_radius(radius) neighbors", "\"\"\" dists = {cell: self.get_distance(cell) for cell in cells} return", "xdif = coords[0] - self.coords[0] ydif = coords[1] - self.coords[1]", "round(180 - angle) else: self.direction = round((360 + angle) %", "oneself. If no list is provided, all agents are evaluated.", "performed by the agent at each step of the simulation.", "\"\"\" Turns the agent's direction towards a given pair of", "of a layer in the model's grid for the cell", "self.get_distance(cell) for cell in cells} return min(dists, key=dists.get) def agents_in_radius(self,", "\"\"\" self.direction = round((self.direction - angle) % 360) def turn_left(self,", "not self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self, cells: Union[List, Dict])", "\"\"\" Creates an agent and initializes it with the same", "in a different cell of the world grid. \"\"\" self.world.remove_from_grid(self)", "List = None): \"\"\" Given a list of agents, returns", "None): self._id = Agent._id Agent._id += 1 self.world = world", "world grid. \"\"\" self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def cell_here(self,", "90): \"\"\" Rotates the agent's direction a number of degrees", "1): \"\"\" Moves the agent a number of cells forward", "towards a given pair of coordinates. \"\"\" if coords !=", "< 0: self.direction = round(180 - angle) else: self.direction =", "an agent in an agent-based model. \"\"\" _id = 0", "the agent. \"\"\" dists = {cell: self.get_distance(cell) for cell in", "hypot, sin, asin, cos, radians, degrees from abc import ABCMeta,", "round((360 + angle) % 360) def random_walk(self, n_steps: int =", "int): \"\"\" Returns all agents within a distance of oneself.", "to be performed by the agent at each step of", "in the direction it is currently facing. \"\"\" x =", "Returns all empty cells (with no agents on them) and", "self.coords = coords self.world.place_on_grid(self) def cell_here(self, layer = None): \"\"\"", "agent one cell forward in a random direction for a", "self.direction = round((self.direction + angle) % 360) def forward(self, n_steps:", "if self.get_distance((x, y)) <= radius} else: neighborhood = {(x, y):", "+ radius + 1) if (self.get_distance((x, y)) <= radius and", "round((self.direction + angle) % 360) def forward(self, n_steps: int =", "of degrees to the right. \"\"\" self.direction = round((self.direction -", "param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key in", "evaluated. \"\"\" if agents is None: agents = [self.world.agents[_id] for", "layer is not None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def", "facing. \"\"\" x = round(self.coords[0] + cos(radians(self.direction)) * n_steps) y", "self.coords[1]))) def cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all", "round((self.direction - angle) % 360) def turn_left(self, angle: int =", "self.__dict__[filter_key] for filter_key in filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords:", "elif (x, y) in self.world.grid: self.move_to((x, y)) def face_towards(self, coords:", "!= self.coords: xdif = coords[0] - self.coords[0] ydif = coords[1]", "agent that is nearest to oneself. If no list is", "Agent._id += 1 self.world = world self.coords = coords or", "choice from typing import Dict, List, Tuple, Union class Agent(metaclass=ABCMeta):", "and not self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self, cells: Union[List,", "if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x", "\"\"\" x = round(self.coords[0] + cos(radians(self.direction)) * n_steps) y =", "round(self.coords[1] + sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif", "self.direction = round((360 + angle) % 360) def random_walk(self, n_steps:", "def empty_cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all empty", "from random import randint, choice from typing import Dict, List,", "self.world.grid: self.move_to((x, y)) def face_towards(self, coords: Tuple): \"\"\" Turns the", "dist = hypot(xdif, ydif) angle = degrees(asin(ydif / dist)) if", "Returns the value of a layer in the model's grid", "self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self, cells: Union[List, Dict]) ->", "-> Tuple: \"\"\" Given a list or dictionary of cells,", "/ dist)) if xdif < 0: self.direction = round(180 -", "them) and respective attributes within a distance of the agent.", "= 0 colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta',", "90 self.breed = self.__class__.__name__.lower() self.icon = '.' self.color = choice(self.colors)", "Tuple): \"\"\" Places the agent in a different cell of", "self.coords[1] + radius + 1) if self.get_distance((x, y)) <= radius}", "and (x, y) in self.world.grid and not self.world.grid[(x, y)]['agents'])} return", "in self.world.grid and not self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self,", "self.coords: xdif = coords[0] - self.coords[0] ydif = coords[1] -", "or dictionary of cells, returns the coordinates of the cell", "in self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius: int) -> Dict:", "range(self.coords[1] - radius, self.coords[1] + radius + 1) if (self.get_distance((x,", "Dict: \"\"\" Returns all cells and respective attributes within a", "Tuple, Union class Agent(metaclass=ABCMeta): \"\"\" Class to represent an agent", "agent a number of cells forward in the direction it", "radius + 1) for y in range(self.coords[1] - radius, self.coords[1]", "radius and (x, y) in self.world.grid and not self.world.grid[(x, y)]['agents'])}", "\"\"\" Class to represent an agent in an agent-based model.", "1) if self.get_distance((x, y)) <= radius} else: neighborhood = {(x,", "to a pair of coordinates. \"\"\" x, y = coords", "{(x, y): self.world.grid[(x, y)] for x in range(self.coords[0] - radius,", "respective attributes within a distance of the agent. \"\"\" if", "= coords[0] - self.coords[0] ydif = coords[1] - self.coords[1] dist", "param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key:", "Tuple: \"\"\" Given a list or dictionary of cells, returns", "_id in self.world.agents] dists = {agent: self.get_distance(agent.coords) for agent in", "agent and initializes it with the same parameters as oneself.", "+ 1) if self.get_distance((x, y)) <= radius} else: neighborhood =", "empty cells (with no agents on them) and respective attributes", "neighborhood = self.cells_in_radius(radius) neighbors = [agent for coords in neighborhood", "+ 1) if (self.get_distance((x, y)) <= radius and (x, y)", "given pair of coordinates. \"\"\" if coords != self.coords: xdif", "= inspect.signature(self.__init__) filter_keys = [param.name for param in sig.parameters.values() if", "neighborhood = {(x, y): self.world.grid[(x, y)] for x in range(self.coords[0]", "int = 1): \"\"\" Moves the agent a number of", "import inspect from math import hypot, sin, asin, cos, radians,", "the right. \"\"\" self.direction = round((self.direction - angle) % 360)", "is not self] return neighbors def agents_here(self) -> List: \"\"\"", "Creates an agent and initializes it with the same parameters", "Given a list of agents, returns the agent that is", "in self.world.agents] dists = {agent: self.get_distance(agent.coords) for agent in agents", "oneself. \"\"\" sig = inspect.signature(self.__init__) filter_keys = [param.name for param", "if xdif < 0: self.direction = round(180 - angle) else:", "the agent's direction a number of degrees to the left.", "radius + 1) if (self.get_distance((x, y)) <= radius and not", "self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] - radius, self.coords[0] +", "def cell_here(self, layer = None): \"\"\" Returns the value of", "nearest to the agent. \"\"\" dists = {cell: self.get_distance(cell) for", "Methods to be performed by the agent at each step", "<= radius and (x, y) in self.world.grid and not self.world.grid[(x,", "cos(radians(self.direction)) * n_steps) y = round(self.coords[1] + sin(radians(self.direction)) * n_steps)", "radius, self.coords[0] + radius + 1) for y in range(self.coords[1]", "return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self, coords: Tuple) ->", "1): \"\"\" Moves the agent one cell forward in a", "pair of coordinates. \"\"\" x, y = coords return round(hypot((x", "self.world.grid[self.coords] def get_distance(self, coords: Tuple) -> int: \"\"\" Returns the", "pair of coordinates. \"\"\" if coords != self.coords: xdif =", "in an agent-based model. \"\"\" _id = 0 colors =", "agent is. If no layer is specified, the values of", "nearest to oneself. If no list is provided, all agents", "= '.' self.color = choice(self.colors) self.world.add_agent(self) def die(self): \"\"\" Remove", "\"\"\" Returns all empty cells (with no agents on them)", "self.world.add_agent(self) def die(self): \"\"\" Remove the agent from the world.", "def cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all cells", "not self] def nearest_agent(self, agents: List = None): \"\"\" Given", "= coords return round(hypot((x - self.coords[0]), (y - self.coords[1]))) def", "if agents is None: agents = [self.world.agents[_id] for _id in", "direction it is currently facing. \"\"\" x = round(self.coords[0] +", "all cells and respective attributes within a distance of the", "= 90): \"\"\" Rotates the agent's direction a number of", "self.direction = round((self.direction - angle) % 360) def turn_left(self, angle:", "range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self): \"\"\" Methods to", "def __init__(self, world, coords: Tuple = None): self._id = Agent._id", "agents within a distance of oneself. \"\"\" neighborhood = self.cells_in_radius(radius)", "{cell: self.get_distance(cell) for cell in cells} return min(dists, key=dists.get) def", "forward in a random direction for a number of times.", "self.world.grid and not self.world.grid[(x, y)]['agents'])} return neighborhood def nearest_cell(self, cells:", "the model's grid for the cell where the agent is.", "= world self.coords = coords or (randint(0, self.world.width - 1),", "turn_left(self, angle: int = 90): \"\"\" Rotates the agent's direction", "of coordinates. \"\"\" if coords != self.coords: xdif = coords[0]", "agent at each step of the simulation. \"\"\" raise NotImplementedError", "-> List: \"\"\" Returns all agents located on the same", "neighborhood for agent in self.world.grid[coords]['agents'] if agent is not self]", "agent. \"\"\" dists = {cell: self.get_distance(cell) for cell in cells}", "= 1): \"\"\" Moves the agent a number of cells", "self] return neighbors def agents_here(self) -> List: \"\"\" Returns all", "self.move_to((x, y)) def face_towards(self, coords: Tuple): \"\"\" Turns the agent's", "number of degrees to the right. \"\"\" self.direction = round((self.direction", "of the world grid. \"\"\" self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self)", "(in cells) from the agent to a pair of coordinates.", "is provided, all agents are evaluated. \"\"\" if agents is", "specified, the values of all layers are returned. \"\"\" if", "the same cell as oneself. \"\"\" return [agent for agent", "all agents within a distance of oneself. \"\"\" neighborhood =", "def turn_right(self, angle: int = 90): \"\"\" Rotates the agent's", "= ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange', 'pink', 'purple',", "['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange', 'pink', 'purple', 'red',", "= None): self._id = Agent._id Agent._id += 1 self.world =", "agent is not self] def nearest_agent(self, agents: List = None):", "is None: agents = [self.world.agents[_id] for _id in self.world.agents] dists", "cells forward in the direction it is currently facing. \"\"\"", "list of agents, returns the agent that is nearest to", "None: agents = [self.world.agents[_id] for _id in self.world.agents] dists =", "neighborhood def empty_cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all", "be performed by the agent at each step of the", "forward in the direction it is currently facing. \"\"\" x", "the world grid. \"\"\" self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def", "+ 1) for y in range(self.coords[1] - radius, self.coords[1] +", "of cells, returns the coordinates of the cell that is", "Returns the distance (in cells) from the agent to a", "agent in self.world.grid[coords]['agents'] if agent is not self] return neighbors", "the values of all layers are returned. \"\"\" if layer", "angle) else: self.direction = round((360 + angle) % 360) def", "colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange', 'pink',", "sig = inspect.signature(self.__init__) filter_keys = [param.name for param in sig.parameters.values()", "def random_walk(self, n_steps: int = 1): \"\"\" Moves the agent", "360) def forward(self, n_steps: int = 1): \"\"\" Moves the", "1 self.world = world self.coords = coords or (randint(0, self.world.width", "0 colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange',", "Agent(metaclass=ABCMeta): \"\"\" Class to represent an agent in an agent-based", "it with the same parameters as oneself. \"\"\" sig =", "return self.__class__(**filtered_dict) def move_to(self, coords: Tuple): \"\"\" Places the agent", "distance of the agent. \"\"\" if self.world.torus: neighborhood = {self.world.to_torus((x,", "the agent a number of cells forward in the direction", "is not self} return min(dists, key=dists.get) def turn_right(self, angle: int", "as oneself. \"\"\" sig = inspect.signature(self.__init__) filter_keys = [param.name for", "y) in self.world.grid: self.move_to((x, y)) def face_towards(self, coords: Tuple): \"\"\"", "radians, degrees from abc import ABCMeta, abstractmethod from random import", "y in range(self.coords[1] - radius, self.coords[1] + radius + 1)", "oneself. \"\"\" return [agent for agent in self.world.grid[self.coords]['agents'] if agent", "self} return min(dists, key=dists.get) def turn_right(self, angle: int = 90):", "cell where the agent is. If no layer is specified,", "cell that is nearest to the agent. \"\"\" dists =", "returns the agent that is nearest to oneself. If no", "y)) <= radius and (x, y) in self.world.grid)} return neighborhood", "self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): \"\"\" Creates an agent", "self.cells_in_radius(radius) neighbors = [agent for coords in neighborhood for agent", "- angle) else: self.direction = round((360 + angle) % 360)", "if agent is not self] return neighbors def agents_here(self) ->", "1)) self.direction = 90 self.breed = self.__class__.__name__.lower() self.icon = '.'", "[agent for coords in neighborhood for agent in self.world.grid[coords]['agents'] if", "self.coords[1] + radius + 1) if (self.get_distance((x, y)) <= radius", "None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self, coords: Tuple)", "a number of times. \"\"\" for i in range(n_steps): self.turn_right(randint(0,", "Union[List, Dict]) -> Tuple: \"\"\" Given a list or dictionary", "None): \"\"\" Given a list of agents, returns the agent", "y) in self.world.grid and not self.world.grid[(x, y)]['agents'])} return neighborhood def", "of cells forward in the direction it is currently facing.", "'purple', 'red', 'yellow'] def __init__(self, world, coords: Tuple = None):", "filter_key in filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords: Tuple): \"\"\"", "Class to represent an agent in an agent-based model. \"\"\"", "\"\"\" Places the agent in a different cell of the", "a given pair of coordinates. \"\"\" if coords != self.coords:", "and (x, y) in self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius:", "y)] for x in range(self.coords[0] - radius, self.coords[0] + radius", "self.__class__.__name__.lower() self.icon = '.' self.color = choice(self.colors) self.world.add_agent(self) def die(self):", "the agent in a different cell of the world grid.", "same cell as oneself. \"\"\" return [agent for agent in", "y = round(self.coords[1] + sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x,", "not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood = {(x, y): self.world.grid[(x,", "distance of oneself. \"\"\" neighborhood = self.cells_in_radius(radius) neighbors = [agent", "coords return round(hypot((x - self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self,", "List: \"\"\" Returns all agents located on the same cell", "+= 1 self.world = world self.coords = coords or (randint(0,", "\"\"\" Remove the agent from the world. \"\"\" del self.world.agents[self._id]", "return neighborhood def nearest_cell(self, cells: Union[List, Dict]) -> Tuple: \"\"\"", "not self} return min(dists, key=dists.get) def turn_right(self, angle: int =", "cells} return min(dists, key=dists.get) def agents_in_radius(self, radius: int): \"\"\" Returns", "provided, all agents are evaluated. \"\"\" if agents is None:", "the direction it is currently facing. \"\"\" x = round(self.coords[0]", "the cell where the agent is. If no layer is", "Dict, List, Tuple, Union class Agent(metaclass=ABCMeta): \"\"\" Class to represent", "in neighborhood for agent in self.world.grid[coords]['agents'] if agent is not", "randint, choice from typing import Dict, List, Tuple, Union class", "agents, returns the agent that is nearest to oneself. If", "die(self): \"\"\" Remove the agent from the world. \"\"\" del", "if agent is not self] def nearest_agent(self, agents: List =", "forward(self, n_steps: int = 1): \"\"\" Moves the agent a", "None): \"\"\" Returns the value of a layer in the", "all empty cells (with no agents on them) and respective", "return min(dists, key=dists.get) def turn_right(self, angle: int = 90): \"\"\"", "for the cell where the agent is. If no layer", "returns the coordinates of the cell that is nearest to", "% 360) def turn_left(self, angle: int = 90): \"\"\" Rotates", "cell_here(self, layer = None): \"\"\" Returns the value of a", "return min(dists, key=dists.get) def agents_in_radius(self, radius: int): \"\"\" Returns all", "times. \"\"\" for i in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod", "cos, radians, degrees from abc import ABCMeta, abstractmethod from random", "round(hypot((x - self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self, radius: int)", "coords: Tuple): \"\"\" Places the agent in a different cell", "-> Dict: \"\"\" Returns all cells and respective attributes within", "nearest_cell(self, cells: Union[List, Dict]) -> Tuple: \"\"\" Given a list", "\"\"\" Moves the agent a number of cells forward in", "from the world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def", "self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood = {(x, y): self.world.grid[(x, y)]", "def nearest_agent(self, agents: List = None): \"\"\" Given a list", "'gray', 'green', 'magenta', 'orange', 'pink', 'purple', 'red', 'yellow'] def __init__(self,", "coords or (randint(0, self.world.width - 1), randint(0, self.world.height - 1))", "- self.coords[0] ydif = coords[1] - self.coords[1] dist = hypot(xdif,", "is not self] def nearest_agent(self, agents: List = None): \"\"\"", "agent-based model. \"\"\" _id = 0 colors = ['blue', 'brown',", "= {agent: self.get_distance(agent.coords) for agent in agents if agent is", "is nearest to the agent. \"\"\" dists = {cell: self.get_distance(cell)", "layer in the model's grid for the cell where the", "of degrees to the left. \"\"\" self.direction = round((self.direction +", "value of a layer in the model's grid for the", "= round(self.coords[1] + sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y)))", "cells (with no agents on them) and respective attributes within", "self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): \"\"\" Creates an agent and", "the agent that is nearest to oneself. If no list", "oneself. \"\"\" neighborhood = self.cells_in_radius(radius) neighbors = [agent for coords", "min(dists, key=dists.get) def turn_right(self, angle: int = 90): \"\"\" Rotates", "cell of the world grid. \"\"\" self.world.remove_from_grid(self) self.coords = coords", "= {(x, y): self.world.grid[(x, y)] for x in range(self.coords[0] -", "agents_here(self) -> List: \"\"\" Returns all agents located on the", "\"\"\" if agents is None: agents = [self.world.agents[_id] for _id", "Dict]) -> Tuple: \"\"\" Given a list or dictionary of", "self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def cell_here(self, layer = None):", "for agent in self.world.grid[self.coords]['agents'] if agent is not self] def", "y))) elif (x, y) in self.world.grid: self.move_to((x, y)) def face_towards(self,", "{filter_key: self.__dict__[filter_key] for filter_key in filter_keys} return self.__class__(**filtered_dict) def move_to(self,", "def get_distance(self, coords: Tuple) -> int: \"\"\" Returns the distance", "int) -> Dict: \"\"\" Returns all cells and respective attributes", "coords[0] - self.coords[0] ydif = coords[1] - self.coords[1] dist =", "List, Tuple, Union class Agent(metaclass=ABCMeta): \"\"\" Class to represent an", "Given a list or dictionary of cells, returns the coordinates", "Places the agent in a different cell of the world", "def agents_here(self) -> List: \"\"\" Returns all agents located on", "ydif) angle = degrees(asin(ydif / dist)) if xdif < 0:", "= coords self.world.place_on_grid(self) def cell_here(self, layer = None): \"\"\" Returns", "- radius, self.coords[0] + radius + 1) for y in", "in a random direction for a number of times. \"\"\"", "the agent's direction towards a given pair of coordinates. \"\"\"", "in filter_keys} return self.__class__(**filtered_dict) def move_to(self, coords: Tuple): \"\"\" Places", "the cell that is nearest to the agent. \"\"\" dists", "= coords[1] - self.coords[1] dist = hypot(xdif, ydif) angle =", "1) if (self.get_distance((x, y)) <= radius and not self.world.grid[self.world.to_torus((x, y))]", "ABCMeta, abstractmethod from random import randint, choice from typing import", "filter_keys = [param.name for param in sig.parameters.values() if param.kind ==", "'magenta', 'orange', 'pink', 'purple', 'red', 'yellow'] def __init__(self, world, coords:", "\"\"\" for i in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def", "y))] ['agents'])} else: neighborhood = {(x, y): self.world.grid[(x, y)] for", "class Agent(metaclass=ABCMeta): \"\"\" Class to represent an agent in an", "if layer is not None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords]", "to oneself. If no list is provided, all agents are", "number of cells forward in the direction it is currently", "y = coords return round(hypot((x - self.coords[0]), (y - self.coords[1])))", "Returns all agents located on the same cell as oneself.", "import randint, choice from typing import Dict, List, Tuple, Union", "agent in a different cell of the world grid. \"\"\"", "initializes it with the same parameters as oneself. \"\"\" sig", "if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key", "same parameters as oneself. \"\"\" sig = inspect.signature(self.__init__) filter_keys =", "n_steps: int = 1): \"\"\" Moves the agent a number", "of the cell that is nearest to the agent. \"\"\"", "typing import Dict, List, Tuple, Union class Agent(metaclass=ABCMeta): \"\"\" Class", "list or dictionary of cells, returns the coordinates of the", "direction a number of degrees to the left. \"\"\" self.direction", "self.coords = coords or (randint(0, self.world.width - 1), randint(0, self.world.height", "return round(hypot((x - self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self, radius:", "and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood = {(x, y):", "y)) <= radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood", "def step(self): \"\"\" Methods to be performed by the agent", "choice(self.colors) self.world.add_agent(self) def die(self): \"\"\" Remove the agent from the", "del self def hatch(self): \"\"\" Creates an agent and initializes", "filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key in filter_keys} return self.__class__(**filtered_dict)", "y)) <= radius and (x, y) in self.world.grid and not", "layer is specified, the values of all layers are returned.", "Returns all cells and respective attributes within a distance of", "turn_right(self, angle: int = 90): \"\"\" Rotates the agent's direction", "radius} else: neighborhood = {(x, y): self.world.grid[(x, y)] for x", "to the right. \"\"\" self.direction = round((self.direction - angle) %", "= {filter_key: self.__dict__[filter_key] for filter_key in filter_keys} return self.__class__(**filtered_dict) def", "in range(self.coords[0] - radius, self.coords[0] + radius + 1) for", "<= radius and (x, y) in self.world.grid)} return neighborhood def", "world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): \"\"\"", "key=dists.get) def turn_right(self, angle: int = 90): \"\"\" Rotates the", "\"\"\" x, y = coords return round(hypot((x - self.coords[0]), (y", "[param.name for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict", "Tuple = None): self._id = Agent._id Agent._id += 1 self.world", "int: \"\"\" Returns the distance (in cells) from the agent", "degrees(asin(ydif / dist)) if xdif < 0: self.direction = round(180", "direction towards a given pair of coordinates. \"\"\" if coords", "self.forward() @abstractmethod def step(self): \"\"\" Methods to be performed by", "for agent in agents if agent is not self} return", "to the agent. \"\"\" dists = {cell: self.get_distance(cell) for cell", "\"\"\" Given a list of agents, returns the agent that", "'red', 'yellow'] def __init__(self, world, coords: Tuple = None): self._id", "if (self.get_distance((x, y)) <= radius and (x, y) in self.world.grid)}", "face_towards(self, coords: Tuple): \"\"\" Turns the agent's direction towards a", "coords in neighborhood for agent in self.world.grid[coords]['agents'] if agent is", "a distance of oneself. \"\"\" neighborhood = self.cells_in_radius(radius) neighbors =", "y)) def face_towards(self, coords: Tuple): \"\"\" Turns the agent's direction", "self.coords[0] ydif = coords[1] - self.coords[1] dist = hypot(xdif, ydif)", "degrees to the left. \"\"\" self.direction = round((self.direction + angle)", "self.world.grid[coords]['agents'] if agent is not self] return neighbors def agents_here(self)", "located on the same cell as oneself. \"\"\" return [agent", "agent's direction a number of degrees to the left. \"\"\"", "\"\"\" self.world.remove_from_grid(self) self.coords = coords self.world.place_on_grid(self) def cell_here(self, layer =", "= None): \"\"\" Returns the value of a layer in", "the coordinates of the cell that is nearest to the", "= {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] -", "agent is not self] return neighbors def agents_here(self) -> List:", "sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y)", "% 360) def forward(self, n_steps: int = 1): \"\"\" Moves", "dists = {cell: self.get_distance(cell) for cell in cells} return min(dists,", "are returned. \"\"\" if layer is not None: return self.world.grid[self.coords][layer]", "\"\"\" return [agent for agent in self.world.grid[self.coords]['agents'] if agent is", "a list or dictionary of cells, returns the coordinates of", "\"\"\" _id = 0 colors = ['blue', 'brown', 'cyan', 'gray',", "- angle) % 360) def turn_left(self, angle: int = 90):", "radius and (x, y) in self.world.grid)} return neighborhood def empty_cells_in_radius(self,", "Rotates the agent's direction a number of degrees to the", "where the agent is. If no layer is specified, the", "360) def turn_left(self, angle: int = 90): \"\"\" Rotates the", "a different cell of the world grid. \"\"\" self.world.remove_from_grid(self) self.coords", "= round((self.direction + angle) % 360) def forward(self, n_steps: int", "self.breed = self.__class__.__name__.lower() self.icon = '.' self.color = choice(self.colors) self.world.add_agent(self)", "y) in self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius: int) ->", "in self.world.grid[coords]['agents'] if agent is not self] return neighbors def", "Remove the agent from the world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self)", "on them) and respective attributes within a distance of the", "\"\"\" self.direction = round((self.direction + angle) % 360) def forward(self,", "parameters as oneself. \"\"\" sig = inspect.signature(self.__init__) filter_keys = [param.name", "is specified, the values of all layers are returned. \"\"\"", "a distance of the agent. \"\"\" if self.world.torus: neighborhood =", "an agent and initializes it with the same parameters as", "= {cell: self.get_distance(cell) for cell in cells} return min(dists, key=dists.get)", "agents is None: agents = [self.world.agents[_id] for _id in self.world.agents]", "Moves the agent a number of cells forward in the", "\"\"\" Methods to be performed by the agent at each", "= round(self.coords[0] + cos(radians(self.direction)) * n_steps) y = round(self.coords[1] +", "range(self.coords[0] - radius, self.coords[0] + radius + 1) for y", "'yellow'] def __init__(self, world, coords: Tuple = None): self._id =", "distance (in cells) from the agent to a pair of", "agent. \"\"\" if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))]", "param.POSITIONAL_OR_KEYWORD] filtered_dict = {filter_key: self.__dict__[filter_key] for filter_key in filter_keys} return", "if agent is not self} return min(dists, key=dists.get) def turn_right(self,", "and initializes it with the same parameters as oneself. \"\"\"", "or (randint(0, self.world.width - 1), randint(0, self.world.height - 1)) self.direction", "the world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self):", "-> int: \"\"\" Returns the distance (in cells) from the", "world self.coords = coords or (randint(0, self.world.width - 1), randint(0,", "from abc import ABCMeta, abstractmethod from random import randint, choice", "of the agent. \"\"\" if self.world.torus: neighborhood = {self.world.to_torus((x, y)):", "for y in range(self.coords[1] - radius, self.coords[1] + radius +", "the agent's direction a number of degrees to the right.", "'cyan', 'gray', 'green', 'magenta', 'orange', 'pink', 'purple', 'red', 'yellow'] def", "return self.world.grid[self.coords] def get_distance(self, coords: Tuple) -> int: \"\"\" Returns", "\"\"\" Returns all agents located on the same cell as", "within a distance of the agent. \"\"\" if self.world.torus: neighborhood", "\"\"\" Returns the value of a layer in the model's", "self.__class__(**filtered_dict) def move_to(self, coords: Tuple): \"\"\" Places the agent in", "y): self.world.grid[(x, y)] for x in range(self.coords[0] - radius, self.coords[0]", "'orange', 'pink', 'purple', 'red', 'yellow'] def __init__(self, world, coords: Tuple", "math import hypot, sin, asin, cos, radians, degrees from abc", "def die(self): \"\"\" Remove the agent from the world. \"\"\"", "(self.get_distance((x, y)) <= radius and (x, y) in self.world.grid and", "the agent to a pair of coordinates. \"\"\" x, y", "+ radius + 1) if self.get_distance((x, y)) <= radius} else:", "abstractmethod from random import randint, choice from typing import Dict,", "is. If no layer is specified, the values of all", "+ sin(radians(self.direction)) * n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x,", "hatch(self): \"\"\" Creates an agent and initializes it with the", "a list of agents, returns the agent that is nearest", "is not None: return self.world.grid[self.coords][layer] else: return self.world.grid[self.coords] def get_distance(self,", "\"\"\" if self.world.torus: neighborhood = {self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for", "y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] - radius, self.coords[0]", "radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else: neighborhood = {(x,", "are evaluated. \"\"\" if agents is None: agents = [self.world.agents[_id]", "of times. \"\"\" for i in range(n_steps): self.turn_right(randint(0, 360)) self.forward()", "coords: Tuple): \"\"\" Turns the agent's direction towards a given", "Agent._id Agent._id += 1 self.world = world self.coords = coords", "from the agent to a pair of coordinates. \"\"\" x,", "y)) <= radius} else: neighborhood = {(x, y): self.world.grid[(x, y)]", "__init__(self, world, coords: Tuple = None): self._id = Agent._id Agent._id", "cells) from the agent to a pair of coordinates. \"\"\"", "agents if agent is not self} return min(dists, key=dists.get) def", "coords: Tuple = None): self._id = Agent._id Agent._id += 1", "\"\"\" Returns all agents within a distance of oneself. \"\"\"", "(self.get_distance((x, y)) <= radius and not self.world.grid[self.world.to_torus((x, y))] ['agents'])} else:", "= 1): \"\"\" Moves the agent one cell forward in", "import Dict, List, Tuple, Union class Agent(metaclass=ABCMeta): \"\"\" Class to", "radius: int) -> Dict: \"\"\" Returns all cells and respective", "import hypot, sin, asin, cos, radians, degrees from abc import", "y)]['agents'])} return neighborhood def nearest_cell(self, cells: Union[List, Dict]) -> Tuple:", "Tuple): \"\"\" Turns the agent's direction towards a given pair", "self.color = choice(self.colors) self.world.add_agent(self) def die(self): \"\"\" Remove the agent", "no list is provided, all agents are evaluated. \"\"\" if", "neighborhood def nearest_cell(self, cells: Union[List, Dict]) -> Tuple: \"\"\" Given", "radius: int): \"\"\" Returns all agents within a distance of", "self.world.height - 1)) self.direction = 90 self.breed = self.__class__.__name__.lower() self.icon", "layer = None): \"\"\" Returns the value of a layer", "{self.world.to_torus((x, y)): self.world.grid[self.world.to_torus((x, y))] for x in range(self.coords[0] - radius,", "agent in agents if agent is not self} return min(dists,", "self.move_to(self.world.to_torus((x, y))) elif (x, y) in self.world.grid: self.move_to((x, y)) def", "cell as oneself. \"\"\" return [agent for agent in self.world.grid[self.coords]['agents']", "by the agent at each step of the simulation. \"\"\"", "from math import hypot, sin, asin, cos, radians, degrees from", "agent's direction towards a given pair of coordinates. \"\"\" if", "random direction for a number of times. \"\"\" for i", "one cell forward in a random direction for a number", "direction for a number of times. \"\"\" for i in", "cells and respective attributes within a distance of the agent.", "coordinates of the cell that is nearest to the agent.", "self] def nearest_agent(self, agents: List = None): \"\"\" Given a", "self._id = Agent._id Agent._id += 1 self.world = world self.coords", "it is currently facing. \"\"\" x = round(self.coords[0] + cos(radians(self.direction))", "with the same parameters as oneself. \"\"\" sig = inspect.signature(self.__init__)", "agent in an agent-based model. \"\"\" _id = 0 colors", "random_walk(self, n_steps: int = 1): \"\"\" Moves the agent one", "self.world = world self.coords = coords or (randint(0, self.world.width -", "empty_cells_in_radius(self, radius: int) -> Dict: \"\"\" Returns all empty cells", "agents are evaluated. \"\"\" if agents is None: agents =", "for i in range(n_steps): self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self):", "<= radius} else: neighborhood = {(x, y): self.world.grid[(x, y)] for", "* n_steps) if self.world.torus: self.move_to(self.world.to_torus((x, y))) elif (x, y) in", "for param in sig.parameters.values() if param.kind == param.POSITIONAL_OR_KEYWORD] filtered_dict =", "- self.coords[0]), (y - self.coords[1]))) def cells_in_radius(self, radius: int) ->", "+ radius + 1) for y in range(self.coords[1] - radius,", "int = 90): \"\"\" Rotates the agent's direction a number", "'brown', 'cyan', 'gray', 'green', 'magenta', 'orange', 'pink', 'purple', 'red', 'yellow']", "step(self): \"\"\" Methods to be performed by the agent at", "grid for the cell where the agent is. If no", "sin, asin, cos, radians, degrees from abc import ABCMeta, abstractmethod", "del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self def hatch(self): \"\"\" Creates an", "= [self.world.agents[_id] for _id in self.world.agents] dists = {agent: self.get_distance(agent.coords)", "angle) % 360) def forward(self, n_steps: int = 1): \"\"\"", "(y - self.coords[1]))) def cells_in_radius(self, radius: int) -> Dict: \"\"\"", "(x, y) in self.world.grid)} return neighborhood def empty_cells_in_radius(self, radius: int)", "number of degrees to the left. \"\"\" self.direction = round((self.direction", "int = 1): \"\"\" Moves the agent one cell forward", "agent from the world. \"\"\" del self.world.agents[self._id] self.world.grid[self.coords]['agents'].remove(self) del self", "self.coords[0] + radius + 1) for y in range(self.coords[1] -", "in the model's grid for the cell where the agent", "@abstractmethod def step(self): \"\"\" Methods to be performed by the", "If no layer is specified, the values of all layers", "agents on them) and respective attributes within a distance of", "360)) self.forward() @abstractmethod def step(self): \"\"\" Methods to be performed", "\"\"\" Returns all cells and respective attributes within a distance", "self.turn_right(randint(0, 360)) self.forward() @abstractmethod def step(self): \"\"\" Methods to be" ]
[ "forward(self, x): x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 =", "proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear') proj = torch.cat([proj2, proj3],", "convert3], dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3) for prediction in sources:", "1024, kernel_size=1) layers += [pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return", "= nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6,", "= nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024,", "= out_planes self.relu = nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes,", "convs for generate the lsn features self.icn1 = LSN_init(3, 512,", "= out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride,", "== '.pkl' or '.pth': print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file))", "current level self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2 =", "3), stride=stride, padding=1) ) def forward(self, x): x = self.relu(x)", "q = self.Norm4(self.dsc3(w) + x) sources.append(q) elif k == 5", "= self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p", "kernel_size=3,stride=1)] return layers extras = { '300': [1024, 'S', 512,", "ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.ibn1 = IBN(512, bn=True) self.ibn2", "self.num_classes), ) return output def load_weights(self, base_file): other, ext =", "def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels", "output = ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes), )", "conf = list() new_sources = list() # apply lds to", "F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')", "= nn.ModuleList(base) self.lds = LDS() # convs for merging the", "import torch.nn as nn import os import torch.nn.functional as F", "conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for o", "= ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def forward(self, x): out1", "forward(self, x): x = self.relu(x) out = self.single_branch(x) return out", "num_classes): loc_layers = [] conf_layers = [] vgg_source = [1,", "enumerate(self.extras): x = v(x) if k == 0: x_pool3_skip, x_pool3_icn", "inter_planes, kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes,", "out class LRFNet(nn.Module): \"\"\"LRFNet for object detection The network is", "nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v),", "phase: test: list of concat outputs from: 1: softmax layers,", "padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)] i", "nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1) ) def forward(self,", "num_classes): super(LRFNet, self).__init__() self.phase = phase self.num_classes = num_classes self.size", "ConvBlock(512, 256, kernel_size=1, stride=1) # convs to reduce the feature", "cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif", "out_planes, stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels = out_planes self.single_branch", "and ssd features self.Norm1 = Relu_Conv(512, 512, stride=1) self.Norm2 =", "== 5 or k == 7: sources.append(x) else: pass #", "image size is not supported!\") return return LRFNet(phase, size, *multibox(size,", "def load_weights(self, base_file): other, ext = os.path.splitext(base_file) if ext ==", "# The conv8 level proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')", "vgg up to conv4_3 for k in range(22): x =", "300: print(\"Error: The input image size is not supported!\") return", "x): out = self.single_branch(x) return out class Relu_Conv(nn.Module): def __init__(self,", "if k == 0: loc_layers += [nn.Conv2d(512, cfg[k] * 4,", "stride=1, padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7", "layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels,", "either 300 or 512 extras: extra layers that feed to", "'S', 256]} def multibox(size, vgg, extra_layers, cfg, num_classes): loc_layers =", "LSN_later(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels =", "= { '300': [1024, 'S', 512, 'S', 256]} def multibox(size,", "= self.base[34](x) # apply extra layers and cache source layer", "layer outputs for k, v in enumerate(self.extras): x = v(x)", "on phase: test: list of concat outputs from: 1: softmax", "None: x = self.bn(x) if self.relu is not None: x", "localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] train:", "= list() loc = list() conf = list() new_sources =", "concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization", "class IBN(nn.Module): def __init__(self, out_planes, bn=True): super(IBN, self).__init__() self.out_channels =", "# convs to reduce the feature dimensions of current level", "padding=(1, 1)) self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1)) self.dsc3", "image or batch of images. Shape: [batch,3,300,300]. Return: Depending on", "self.relu = nn.ReLU(inplace=False) if relu else None def forward(self, x):", "= Relu_Conv(256, 256, stride=1) # convs for generate the lsn", "new_sources.append(pred3) for prediction in sources: new_sources.append(prediction) # apply multibox head", "the forward features into lower dimension. tmp1 = self.proj1(p) tmp2", "pred3 = torch.cat([agent3, convert3], dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3) for", "to conv4_3 for k in range(22): x = self.base[k](x) conv4_3_bn", "self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2),", "forward(self, x): out = self.single_branch(x) return out class LRFNet(nn.Module): \"\"\"LRFNet", "self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3 = ConvBlock(256, 128,", "38), mode='bilinear') proj = torch.cat([proj1, proj2, proj3], dim=1) agent1 =", "cfg[i] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i] *", "6, 6, 6, 4, 4]} def build_net(phase, size=300, num_classes=81): if", "256 and size == 512: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2),", "scores 2) conv2d for localization predictions 3) associated priorbox layer", "to reduce the feature dimensions of other levels self.proj1 =", "F class LDS(nn.Module): def __init__(self,): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2,", "= self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) + x * x_pool3_icn) elif", "padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)]", "size=(10, 10), mode='bilinear') proj = proj3 agent3 = self.agent3(w) convert3", "layers, Shape: [2,num_priors*4] \"\"\" sources = list() loc = list()", "phase: (string) Can be \"test\" or \"train\" base: VGG16 layers", "512, stride=1) self.Norm4 = Relu_Conv(256, 256, stride=1) # convs for", "self.phase == \"test\": output = ( loc.view(loc.size(0), -1, 4), #", "kernel_size=1, stride=1, relu=False) def forward(self, x): out1 = self.part_a(x) out2", "size # vgg network self.base = nn.ModuleList(base) self.lds = LDS()", "= IBN(512, bn=True) self.ibn2 = IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False)", "4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3, padding=1)]", "of current level self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2", "if self.bn is not None: x = self.bn(x) return x", "kernel_size=1) # convs to merge the features of the current", "stride=1) # convs to reduce the feature dimensions of other", "'300': [6, 6, 6, 6, 4, 4]} def build_net(phase, size=300,", "the lsn and ssd features self.Norm1 = Relu_Conv(512, 512, stride=1)", "default bounding boxes specific to the layer's feature map size.", "architecture. Each multibox layer branches into 1) conv2d for class", "stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def forward(self,", "1024, stride=2, padding=(1, 1)) self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1,", "self.icn1(x_pool) s = self.Norm1(conv4_3_bn * x_pool1_icn) # apply vgg up", "ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1) self.merge3 = ConvBlock(512, 512, kernel_size=3,", "pred2 = self.merge2(pred2) new_sources.append(pred2) # The conv8 level proj3 =", "else: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels = v", "self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module): def __init__(self,", ") else: output = ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1,", "stride=2, padding=(1, 1)) self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))", "and higher level features self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1,", "Can be \"test\" or \"train\" base: VGG16 layers for input,", "proj = torch.cat([proj1, proj2, proj3], dim=1) agent1 = self.agent1(s) convert1", "[] vgg_source = [1, -2] for k, v in enumerate(vgg_source):", "fc7 for k in range(22, 34): x = self.base[k](x) conv7_bn", "= out_planes inter_planes = out_planes // 4 self.part_a = ConvBlock(in_planes,", "kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] layers += [ConvBlock(256, 128,", "padding=1) self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.ibn1 =", "self.conf = nn.ModuleList(head[1]) if self.phase == 'test': self.softmax = nn.Softmax()", "i += 1 return vgg, extra_layers, (loc_layers, conf_layers) mbox =", "nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn =", "v in enumerate(vgg_source): if k == 0: loc_layers += [nn.Conv2d(512,", "padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3, padding=1)] else: loc_layers", "elif k == 2: q = self.Norm4(self.dsc3(w) + x) sources.append(q)", "= self.relu(x) return x class LSN_init(nn.Module): def __init__(self, in_planes, out_planes,", "self.relu = nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf", "torch.cat([o.view(o.size(0), -1) for o in loc], 1) conf = torch.cat([o.view(o.size(0),", "num_classes, kernel_size=3, padding=1)] i += 1 return vgg, extra_layers, (loc_layers,", "out_planes inter_planes = out_planes // 4 self.part_a = nn.Sequential( ConvBlock(in_planes,", "None def forward(self, x): if self.bn is not None: x", "None self.relu = nn.ReLU(inplace=False) if relu else None def forward(self,", "= self.proj3(q) # The conv4_3 level proj1 = F.upsample(tmp1, size=(38,", "apply multibox head to source layers for (x, l, c)", "if bn else None def forward(self, x): if self.bn is", "size=(38, 38), mode='bilinear') proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3", "= nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False) )", "self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s =", "= self.single_branch(x) return out class Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes,", "128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] return layers extras", "other levels self.convert1 = ConvBlock(384, 256, kernel_size=1) self.convert2 = ConvBlock(256,", "self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn) x = self.base[34](x) # apply", "x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) + x *", "stride=1) self.icn2 = LSN_later(128, 1024, stride=2) self.icn3 = LSN_later(256, 512,", "if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers +=", "layer to produce default bounding boxes specific to the layer's", "o in loc], 1) conf = torch.cat([o.view(o.size(0), -1) for o", "self.softmax = nn.Softmax() def forward(self, x): \"\"\"Applies network layers and", "self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) + x * x_pool3_icn) elif k", "self.lds(x) # apply vgg up to conv4_3 for k in", "to produce default bounding boxes specific to the layer's feature", "out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels = out_planes inter_planes = out_planes", "v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d", "self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn) x =", "v == 'S': if in_channels == 256 and size ==", "stride=stride, padding=1, relu=False) ) def forward(self, x): out = self.single_branch(x)", "512, kernel_size=1, stride=1) self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1) #", "vgg network self.base = nn.ModuleList(base) self.lds = LDS() # convs", "batch_norm=False): layers = [] in_channels = i for v in", "levels self.convert1 = ConvBlock(384, 256, kernel_size=1) self.convert2 = ConvBlock(256, 512,", "[nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2,", "num_classes=81): if size != 300: print(\"Error: The input image size", "layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v,", "+= [ConvBlock(128, 256, kernel_size=3,stride=1)] return layers extras = { '300':", "mbox = { '300': [6, 6, 6, 6, 4, 4]}", "Shape: [batch,3,300,300]. Return: Depending on phase: test: list of concat", "up to conv4_3 for k in range(22): x = self.base[k](x)", "x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s = self.Norm1(conv4_3_bn * x_pool1_icn) #", "self).__init__() self.out_channels = out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3,", "self.convert2 = ConvBlock(256, 512, kernel_size=1) self.convert3 = ConvBlock(128, 256, kernel_size=1)", "0) or (k > indicator+1 and k % 2 !=", "Ds_Conv(512, 1024, stride=2, padding=(1, 1)) self.dsc2 = Ds_Conv(1024, 512, stride=2,", "= self.proj2(w) tmp3 = self.proj3(q) # The conv4_3 level proj1", "self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None", "128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512,", "[2,num_priors*4] train: list of concat outputs from: 1: confidence layers,", "kernel_size=(3, 3), stride=stride, padding=padding, relu=False) ) def forward(self, x): out", "convs to reduce the feature dimensions of other levels self.convert1", "out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels = out_planes self.relu = nn.ReLU(inplace=False)", "out class Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):", "or k == 7: sources.append(x) else: pass # project the", "[nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)", "stride=1) # convs for generate the lsn features self.icn1 =", "[One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels = v layers += [ConvBlock(256,", "-1) for o in loc], 1) conf = torch.cat([o.view(o.size(0), -1)", "features self.icn1 = LSN_init(3, 512, stride=1) self.icn2 = LSN_later(128, 1024,", "agent1 = self.agent1(s) convert1 = self.convert1(proj) pred1 = torch.cat([agent1, convert1],", "project the forward features into lower dimension. tmp1 = self.proj1(p)", "new_sources.append(pred2) # The conv8 level proj3 = F.upsample(tmp3, size=(10, 10),", "'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256,", "2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def", "7: sources.append(x) else: pass # project the forward features into", "num_classes, kernel_size=3, padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4,", "def add_extras(size, cfg, i, batch_norm=False): # Extra layers added to", "ConvBlock(256, 128, kernel_size=1, stride=1) # convs to reduce the feature", "x = self.base[34](x) # apply extra layers and cache source", "proj = torch.cat([proj2, proj3], dim=1) agent2 = self.agent2(p) convert2 =", "for (x, l, c) in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2,", "or (k > indicator+1 and k % 2 != 0):", "size=300, num_classes=81): if size != 300: print(\"Error: The input image", "2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for o in", "x class One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__()", "# apply extra layers and cache source layer outputs for", "(loc_layers, conf_layers) mbox = { '300': [6, 6, 6, 6,", "range(22): x = self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn =", "x_pool2_icn) x = self.base[34](x) # apply extra layers and cache", "512: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers +=", "input image or batch of images. Shape: [batch,3,300,300]. Return: Depending", "head, num_classes): super(LRFNet, self).__init__() self.phase = phase self.num_classes = num_classes", "padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride,", "= self.bn(x) if self.relu is not None: x = self.relu(x)", "kernel_size=3, stride=1, padding=1) self.ibn1 = IBN(512, bn=True) self.ibn2 = IBN(1024,", "x class LSN_init(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_init, self).__init__()", "= i for v in cfg: if v == 'M':", "is not supported!\") return return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),", "conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024,", "None def forward(self, x): x = self.conv(x) if self.bn is", "nn.ReLU(inplace=False)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6", "'S', 512, 'S', 256]} def multibox(size, vgg, extra_layers, cfg, num_classes):", "agent3 = self.agent3(w) convert3 = self.convert3(proj) pred3 = torch.cat([agent3, convert3],", "= LSN_later(128, 1024, stride=2) self.icn3 = LSN_later(256, 512, stride=2) #", "F.upsample(tmp3, size=(10, 10), mode='bilinear') proj = proj3 agent3 = self.agent3(w)", "size of either 300 or 512 extras: extra layers that", "return output def load_weights(self, base_file): other, ext = os.path.splitext(base_file) if", "super(LRFNet, self).__init__() self.phase = phase self.num_classes = num_classes self.size =", ") self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def forward(self,", "= num_classes self.size = size # vgg network self.base =", "v in enumerate(cfg): if in_channels != 'S': if v ==", "LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3), add_extras(size, extras[str(size)], 1024), mbox[str(size)], num_classes),", "x) sources.append(q) elif k == 5 or k == 7:", "2 indicator = 3 for k, v in enumerate(extra_layers): if", "== 256 and size == 512: layers += [One_Three_Conv(in_channels, cfg[k+1],", "super(Relu_Conv, self).__init__() self.out_channels = out_planes self.relu = nn.ReLU(inplace=False) self.single_branch =", "self.phase == 'test': self.softmax = nn.Softmax() def forward(self, x): \"\"\"Applies", "__init__(self, in_planes, out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels = out_planes inter_planes", "num_classes, kernel_size=3, padding=1)] i = 2 indicator = 3 for", "self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1)) self.dsc3 = Ds_Conv(512,", "map size. Args: phase: (string) Can be \"test\" or \"train\"", "= F.upsample(tmp3, size=(10, 10), mode='bilinear') proj = proj3 agent3 =", "indicator+1 and k % 2 != 0): loc_layers += [nn.Conv2d(v.out_channels,", "+= [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels = v layers +=", "def forward(self, x): out1 = self.part_a(x) out2 = self.part_b(out1) return", "= IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc", "x): out1 = self.part_a(x) out2 = self.part_b(out1) return out1, out2", "Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__()", "x_pool2_icn = self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)", "super(LSN_later, self).__init__() self.out_channels = out_planes inter_planes = out_planes // 4", "256, 256, 256, 'C', 512, 512, 512, 'M', 512, 512,", "kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3, padding=1)] else:", "[conv2d, nn.ReLU(inplace=False)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)", "in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels = out_planes self.relu =", "stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels = out_planes self.single_branch =", "Args: x: input image or batch of images. Shape: [batch,3,300,300].", "256, kernel_size=1, stride=1) # convs to reduce the feature dimensions", "= self.bn(x) return x class One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes,", "1)): super(Ds_Conv, self).__init__() self.out_channels = out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes,", "layers for input, size of either 300 or 512 extras:", "conv7, nn.ReLU(inplace=False)] return layers base = { '300': [64, 64,", "boxes specific to the layer's feature map size. Args: phase:", "k == 5 or k == 7: sources.append(x) else: pass", "v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v ==", "+=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3, padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels,", "self.single_branch(x) return out class Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1):", "object detection The network is based on the SSD architecture.", "to the layer's feature map size. Args: phase: (string) Can", "self).__init__() self.out_channels = out_planes inter_planes = in_planes // 4 self.single_branch", "import torch import torch.nn as nn import os import torch.nn.functional", "or 512 extras: extra layers that feed to multibox loc", "other, ext = os.path.splitext(base_file) if ext == '.pkl' or '.pth':", "ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1,", "ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)", "= ( loc.view(loc.size(0), -1, 4), # loc preds self.softmax(conf.view(-1, self.num_classes)),", "conv4_3 level proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2 =", "up to fc7 for k in range(22, 34): x =", "input image(s) x. Args: x: input image or batch of", "else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers", "= phase self.num_classes = num_classes self.size = size # vgg", "branches into 1) conv2d for class conf scores 2) conv2d", "self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p =", "= self.single_branch(x) return out class LRFNet(nn.Module): \"\"\"LRFNet for object detection", "[nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)] i = 2 indicator", "class conf scores 2) conv2d for localization predictions 3) associated", "out = self.single_branch(x) return out class Ds_Conv(nn.Module): def __init__(self, in_planes,", "SSD architecture. Each multibox layer branches into 1) conv2d for", "def __init__(self, in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels = out_planes", "batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers += [conv2d,", "x): if self.bn is not None: x = self.bn(x) return", "layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] return layers extras = {", "512, stride=1) self.icn2 = LSN_later(128, 1024, stride=2) self.icn3 = LSN_later(256,", "convert2 = self.convert2(proj) pred2 = torch.cat([agent2, convert2], dim=1) pred2 =", "nn.ModuleList(head[1]) if self.phase == 'test': self.softmax = nn.Softmax() def forward(self,", "= Ds_Conv(1024, 512, stride=2, padding=(1, 1)) self.dsc3 = Ds_Conv(512, 256,", "[] in_channels = i flag = False for k, v", "enumerate(extra_layers): if (k < indicator+1 and k % 2 ==", "loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers +=", "self.merge2(pred2) new_sources.append(pred2) # The conv8 level proj3 = F.upsample(tmp3, size=(10,", "512, kernel_size=3, stride=1, padding=1) self.ibn1 = IBN(512, bn=True) self.ibn2 =", "or \"train\" base: VGG16 layers for input, size of either", "extra_layers, cfg, num_classes): loc_layers = [] conf_layers = [] vgg_source", "# vgg network self.base = nn.ModuleList(base) self.lds = LDS() #", "self.Norm4(self.dsc3(w) + x) sources.append(q) elif k == 5 or k", "= ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.merge2 = ConvBlock(1024, 1024,", "the SSD architecture. Each multibox layer branches into 1) conv2d", "layers, Shape: [2,num_priors*4] train: list of concat outputs from: 1:", "size == 512: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else:", "x = self.bn(x) if self.relu is not None: x =", "padding=1, relu=False) ) def forward(self, x): out = self.single_branch(x) return", "specific to the layer's feature map size. Args: phase: (string)", "dimension. tmp1 = self.proj1(p) tmp2 = self.proj2(w) tmp3 = self.proj3(q)", "Each multibox layer branches into 1) conv2d for class conf", "self.num_classes)), # conf preds ) else: output = ( loc.view(loc.size(0),", "64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512,", "128, kernel_size=1, stride=1) self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3", "proj3 agent3 = self.agent3(w) convert3 = self.convert3(proj) pred3 = torch.cat([agent3,", "# apply vgg up to fc7 for k in range(22,", "x): \"\"\"Applies network layers and ops on input image(s) x.", "+= [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)] i = 2", "stride=2) # convs with s=2 to downsample the features self.dsc1", "LSN_init(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels =", "= Relu_Conv(512, 512, stride=1) self.Norm2 = Relu_Conv(1024, 1024, stride=1) self.Norm3", "self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1)) self.dsc2 = Ds_Conv(1024,", ".pkl files supported.') def vgg(cfg, i, batch_norm=False): layers = []", "padding=1) ) def forward(self, x): x = self.relu(x) out =", "preds ) else: output = ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0),", "== 0: loc_layers += [nn.Conv2d(512, cfg[k] * 4, kernel_size=3, padding=1)]", "in_channels = v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 =", "conf conv layers \"\"\" def __init__(self, phase, size, base, extras,", "cache source layer outputs for k, v in enumerate(self.extras): x", "stride=1) self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1) # convs to", "ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)", "x = self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)", "IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc =", "for input, size of either 300 or 512 extras: extra", "in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):", "source layers for (x, l, c) in zip(new_sources, self.loc, self.conf):", "!= 0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)]", "loc_layers += [nn.Conv2d(512, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512,", "= self.single_branch(x) return out class Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes,", "k, v in enumerate(self.extras): x = v(x) if k ==", "self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.ibn1 = IBN(512,", "to the initial image x_pool = self.lds(x) # apply vgg", "256, stride=2, padding=(1, 1)) # convs to reduce the feature", "= self.icn1(x_pool) s = self.Norm1(conv4_3_bn * x_pool1_icn) # apply vgg", "[1, -2] for k, v in enumerate(vgg_source): if k ==", "Args: phase: (string) Can be \"test\" or \"train\" base: VGG16", "loc_layers += [nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)] conf_layers +=", "stride=1, padding=1) self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.ibn1", "s = self.Norm1(conv4_3_bn * x_pool1_icn) # apply vgg up to", "dimensions of current level self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)", "self.single_branch(x) return out class Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1,", "stride=1, padding=1) self.ibn1 = IBN(512, bn=True) self.ibn2 = IBN(1024, bn=True)", "# apply vgg up to conv4_3 for k in range(22):", "priorbox layers, Shape: [2,num_priors*4] \"\"\" sources = list() loc =", "'S': if in_channels == 256 and size == 512: layers", "padding=1) self.ibn1 = IBN(512, bn=True) self.ibn2 = IBN(1024, bn=True) self.relu", "__init__(self,): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2", "self.convert3 = ConvBlock(128, 256, kernel_size=1) # convs to merge the", "stride=stride, padding=1) ) def forward(self, x): x = self.relu(x) out", "proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear') proj = torch.cat([proj1, proj2,", "nn.ReLU(inplace=False)] else: layers += [conv2d, nn.ReLU(inplace=False)] in_channels = v pool5", "out_planes, bn=True): super(IBN, self).__init__() self.out_channels = out_planes self.bn = nn.BatchNorm2d(out_planes,", "nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)", "if in_channels != 'S': if v == 'S': if in_channels", "inter_planes, kernel_size=(3, 3), stride=stride, padding=1) ) self.part_b = ConvBlock(inter_planes, out_planes,", "Relu_Conv(512, 512, stride=1) self.Norm4 = Relu_Conv(256, 256, stride=1) # convs", "Relu_Conv(1024, 1024, stride=1) self.Norm3 = Relu_Conv(512, 512, stride=1) self.Norm4 =", "out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels = out_planes inter_planes = in_planes", "for k in range(22): x = self.base[k](x) conv4_3_bn = self.ibn1(x)", "\"train\" base: VGG16 layers for input, size of either 300", "self.conv(x) if self.bn is not None: x = self.bn(x) if", "6, 6, 4, 4]} def build_net(phase, size=300, num_classes=81): if size", "x_pool = self.lds(x) # apply vgg up to conv4_3 for", "level self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2 = ConvBlock(1024,", "the feature dimensions of other levels self.convert1 = ConvBlock(384, 256,", "stride=1): super(One_Three_Conv, self).__init__() self.out_channels = out_planes inter_planes = in_planes //", "= self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn) x = self.base[34](x) #", "stride=stride, padding=1) ) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)", "out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False) ) def forward(self, x):", "> indicator+1 and k % 2 != 0): loc_layers +=", "self.agent1(s) convert1 = self.convert1(proj) pred1 = torch.cat([agent1, convert1], dim=1) pred1", "self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) + conv7_bn", "super(One_Three_Conv, self).__init__() self.out_channels = out_planes inter_planes = in_planes // 4", "-1, 4), conf.view(conf.size(0), -1, self.num_classes), ) return output def load_weights(self,", "< indicator+1 and k % 2 == 0) or (k", "pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3,", "= [] in_channels = i for v in cfg: if", "self.out_channels = out_planes inter_planes = out_planes // 4 self.part_a =", "bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes,", "kernel_size=1, stride=1) self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3 =", "= self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) +", "def __init__(self, out_planes, bn=True): super(IBN, self).__init__() self.out_channels = out_planes self.bn", "convert1], dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1) # The fc_7 level", "-1) for o in conf], 1) if self.phase == \"test\":", "vgg up to fc7 for k in range(22, 34): x", "kernel_size=3, padding=1)] i = 2 indicator = 3 for k,", "size. Args: phase: (string) Can be \"test\" or \"train\" base:", "= self.part_a(x) out2 = self.part_b(out1) return out1, out2 class IBN(nn.Module):", "= 2 indicator = 3 for k, v in enumerate(extra_layers):", "class LDS(nn.Module): def __init__(self,): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2),", "apply extra layers and cache source layer outputs for k,", "__init__(self, in_planes, out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels = out_planes inter_planes", "out_planes // 4 self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride,", "+= [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels,", "with s=2 to downsample the features self.dsc1 = Ds_Conv(512, 1024,", "print('Finished!') else: print('Sorry only .pth and .pkl files supported.') def", "class LSN_init(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels", "VGG for feature scaling layers = [] in_channels = i", "for k, v in enumerate(extra_layers): if (k < indicator+1 and", "x = self.conv(x) if self.bn is not None: x =", "i, batch_norm=False): # Extra layers added to VGG for feature", "torch.cat([proj2, proj3], dim=1) agent2 = self.agent2(p) convert2 = self.convert2(proj) pred2", "and .pkl files supported.') def vgg(cfg, i, batch_norm=False): layers =", "add_extras(size, cfg, i, batch_norm=False): # Extra layers added to VGG", "= 3 for k, v in enumerate(extra_layers): if (k <", "self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2 = ConvBlock(512, 128,", "out_planes, kernel_size=(3, 3), stride=stride, padding=1) ) def forward(self, x): x", "self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only .pth and .pkl files supported.')", "cfg, num_classes): loc_layers = [] conf_layers = [] vgg_source =", "= torch.cat([proj1, proj2, proj3], dim=1) agent1 = self.agent1(s) convert1 =", "= F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3 = F.upsample(tmp3, size=(19, 19),", "if self.phase == \"test\": output = ( loc.view(loc.size(0), -1, 4),", "ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.merge2 = ConvBlock(1024, 1024, kernel_size=3,", "self.icn3 = LSN_later(256, 512, stride=2) # convs with s=2 to", "cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels = v layers += [ConvBlock(256, 128,", "The network is based on the SSD architecture. Each multibox", "conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)] i =", "= nn.Softmax() def forward(self, x): \"\"\"Applies network layers and ops", "+ x) sources.append(q) elif k == 5 or k ==", "out2 = self.part_b(out1) return out1, out2 class IBN(nn.Module): def __init__(self,", "feature dimensions of current level self.agent1 = ConvBlock(512, 256, kernel_size=1,", "(x, l, c) in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3,", "__init__(self, in_planes, out_planes, stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels =", "self.agent2(p) convert2 = self.convert2(proj) pred2 = torch.cat([agent2, convert2], dim=1) pred2", "= self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3", "self.part_b(out1) return out1, out2 class LSN_later(nn.Module): def __init__(self, in_planes, out_planes,", "= ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3 = ConvBlock(512, 256, kernel_size=1,", "1)) self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1)) # convs", "head to source layers for (x, l, c) in zip(new_sources,", "dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2) # The conv8 level proj3", "= ConvBlock(256, 128, kernel_size=1, stride=1) # convs to reduce the", "= out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,", "nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels =", "nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers base = { '300': [64,", "conv4_3 for k in range(22): x = self.base[k](x) conv4_3_bn =", "out2 = self.part_b(out1) return out1, out2 class LSN_later(nn.Module): def __init__(self,", "added to VGG for feature scaling layers = [] in_channels", "= ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) self.part_b = ConvBlock(inter_planes,", "= self.part_b(out1) return out1, out2 class LSN_later(nn.Module): def __init__(self, in_planes,", "predictions 3) associated priorbox layer to produce default bounding boxes", "bn else None self.relu = nn.ReLU(inplace=False) if relu else None", "and cache source layer outputs for k, v in enumerate(self.extras):", "layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox", "ConvBlock(256, 512, kernel_size=1) self.convert3 = ConvBlock(128, 256, kernel_size=1) # convs", "-1, self.num_classes), ) return output def load_weights(self, base_file): other, ext", "512, 'S', 256]} def multibox(size, vgg, extra_layers, cfg, num_classes): loc_layers", "kernel_size=3, padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3,", "and ops on input image(s) x. Args: x: input image", "512, stride=2, padding=(1, 1)) self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1,", ") return output def load_weights(self, base_file): other, ext = os.path.splitext(base_file)", "= [1, -2] for k, v in enumerate(vgg_source): if k", "38), mode='bilinear') proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear') proj =", "if bn else None self.relu = nn.ReLU(inplace=False) if relu else", "if relu else None def forward(self, x): x = self.conv(x)", "self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if", "lsn features self.icn1 = LSN_init(3, 512, stride=1) self.icn2 = LSN_later(128,", "mode='bilinear') proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3 = F.upsample(tmp3,", "[nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k]", "stride=1) self.Norm4 = Relu_Conv(256, 256, stride=1) # convs for generate", "def multibox(size, vgg, extra_layers, cfg, num_classes): loc_layers = [] conf_layers", "VGG16 layers for input, size of either 300 or 512", "for k, v in enumerate(self.extras): x = v(x) if k", "[pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers base = {", "self.convert1(proj) pred1 = torch.cat([agent1, convert1], dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1)", "= proj3 agent3 = self.agent3(w) convert3 = self.convert3(proj) pred3 =", "from: 1: softmax layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape:", "padding=1)] i += 1 return vgg, extra_layers, (loc_layers, conf_layers) mbox", "merging the lsn and ssd features self.Norm1 = Relu_Conv(512, 512,", "enumerate(cfg): if in_channels != 'S': if v == 'S': if", "LSN_init(3, 512, stride=1) self.icn2 = LSN_later(128, 1024, stride=2) self.icn3 =", "for localization predictions 3) associated priorbox layer to produce default", "nn.ModuleList(base) self.lds = LDS() # convs for merging the lsn", "128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',", "out_planes inter_planes = out_planes // 4 self.part_a = ConvBlock(in_planes, inter_planes,", "output = ( loc.view(loc.size(0), -1, 4), # loc preds self.softmax(conf.view(-1,", "and conf conv layers \"\"\" def __init__(self, phase, size, base,", "multibox layer branches into 1) conv2d for class conf scores", "into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only .pth and", "stride=2), nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels", "super(LSN_init, self).__init__() self.out_channels = out_planes inter_planes = out_planes // 4", "IBN(512, bn=True) self.ibn2 = IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False) self.extras", "# convs to merge the features of the current and", "padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)", "The conv4_3 level proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2", "(k > indicator+1 and k % 2 != 0): loc_layers", "x_pool1_icn = self.icn1(x_pool) s = self.Norm1(conv4_3_bn * x_pool1_icn) # apply", "self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc", "s=2 to downsample the features self.dsc1 = Ds_Conv(512, 1024, stride=2,", "sources: new_sources.append(prediction) # apply multibox head to source layers for", "extras = { '300': [1024, 'S', 512, 'S', 256]} def", "conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s)", "pred1 = torch.cat([agent1, convert1], dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1) #", "apply vgg up to conv4_3 for k in range(22): x", "for class conf scores 2) conv2d for localization predictions 3)", "ConvBlock(128, 256, kernel_size=1) # convs to merge the features of", "+= [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3,", "stride=1, padding=1) self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1) self.merge3", "loc_layers = [] conf_layers = [] vgg_source = [1, -2]", "stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) ) self.part_b =", "# convs to reduce the feature dimensions of other levels", "= torch.cat([agent2, convert2], dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2) # The", "tmp2 = self.proj2(w) tmp3 = self.proj3(q) # The conv4_3 level", "concat outputs from: 1: softmax layers, Shape: [batch*num_priors,num_classes] 2: localization", "higher level features self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)", "3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1)", "4), # loc preds self.softmax(conf.view(-1, self.num_classes)), # conf preds )", "One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels =", "dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers += [pool5, conv6,", "= nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def forward(self, x): x_pool1 =", "4]} def build_net(phase, size=300, num_classes=81): if size != 300: print(\"Error:", "kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) ) self.part_b", "dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3) for prediction in sources: new_sources.append(prediction)", "kernel_size=1, stride=1) # convs to reduce the feature dimensions of", "= ConvBlock(128, 256, kernel_size=1) # convs to merge the features", "in range(22, 34): x = self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip,", "= F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2 = F.upsample(tmp2, size=(38, 38),", "def forward(self, x): x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3", "forward features into lower dimension. tmp1 = self.proj1(p) tmp2 =", "train: list of concat outputs from: 1: confidence layers, Shape:", "convs to reduce the feature dimensions of current level self.agent1", "supported.') def vgg(cfg, i, batch_norm=False): layers = [] in_channels =", "self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes,", "= F.upsample(tmp3, size=(38, 38), mode='bilinear') proj = torch.cat([proj1, proj2, proj3],", "vgg, extra_layers, (loc_layers, conf_layers) mbox = { '300': [6, 6,", "x): out = self.single_branch(x) return out class LRFNet(nn.Module): \"\"\"LRFNet for", "conf preds ) else: output = ( loc.view(loc.size(0), -1, 4),", "torch.cat([agent3, convert3], dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3) for prediction in", "out_planes, kernel_size=1, stride=1, relu=False) def forward(self, x): out1 = self.part_a(x)", "lower dimension. tmp1 = self.proj1(p) tmp2 = self.proj2(w) tmp3 =", "+= [ConvBlock(128, 256, kernel_size=3,stride=1)] layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers", "list() new_sources = list() # apply lds to the initial", "( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes), ) return output", "19), mode='bilinear') proj = torch.cat([proj2, proj3], dim=1) agent2 = self.agent2(p)", "* 4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3,", "dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only .pth and .pkl files", "def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,", "into 1) conv2d for class conf scores 2) conv2d for", "be \"test\" or \"train\" base: VGG16 layers for input, size", "convs with s=2 to downsample the features self.dsc1 = Ds_Conv(512,", "build_net(phase, size=300, num_classes=81): if size != 300: print(\"Error: The input", "nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def forward(self, x): x_pool1 = self.pool1(x)", "= self.merge2(pred2) new_sources.append(pred2) # The conv8 level proj3 = F.upsample(tmp3,", "= v layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128,", "* 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i] * num_classes,", "mode='bilinear') proj = torch.cat([proj1, proj2, proj3], dim=1) agent1 = self.agent1(s)", "if v == 'S': if in_channels == 256 and size", "self.proj2(w) tmp3 = self.proj3(q) # The conv4_3 level proj1 =", "[1024, 'S', 512, 'S', 256]} def multibox(size, vgg, extra_layers, cfg,", "\"test\" or \"train\" base: VGG16 layers for input, size of", "level proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2 = F.upsample(tmp2,", "== 'test': self.softmax = nn.Softmax() def forward(self, x): \"\"\"Applies network", "level proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3 = F.upsample(tmp3,", "loc preds self.softmax(conf.view(-1, self.num_classes)), # conf preds ) else: output", "{ '300': [6, 6, 6, 6, 4, 4]} def build_net(phase,", "= nn.Conv2d(1024, 1024, kernel_size=1) layers += [pool5, conv6, nn.ReLU(inplace=False), conv7,", "__init__(self, out_planes, bn=True): super(IBN, self).__init__() self.out_channels = out_planes self.bn =", "vgg(cfg, i, batch_norm=False): layers = [] in_channels = i for", "momentum=0.01, affine=True) if bn else None def forward(self, x): if", "nn.Softmax() def forward(self, x): \"\"\"Applies network layers and ops on", "to source layers for (x, l, c) in zip(new_sources, self.loc,", "ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) ) self.part_b = ConvBlock(inter_planes,", "1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for", "self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1) # convs to reduce", "dimensions of other levels self.convert1 = ConvBlock(384, 256, kernel_size=1) self.convert2", "'M', 256, 256, 256, 'C', 512, 512, 512, 'M', 512,", "+= [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels, cfg[k+1],", "list() loc = list() conf = list() new_sources = list()", "= self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module): def", "[batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] train: list of concat", "apply lds to the initial image x_pool = self.lds(x) #", "self.proj1(p) tmp2 = self.proj2(w) tmp3 = self.proj3(q) # The conv4_3", "= self.Norm3(self.dsc2(p) + x * x_pool3_icn) elif k == 2:", "kernel_size=3,stride=1)] layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256,", "kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] return layers extras =", "# convs for merging the lsn and ssd features self.Norm1", "inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)", "levels self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2 = ConvBlock(512,", "convs to reduce the feature dimensions of other levels self.proj1", "ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1) ) def forward(self, x):", "affine=True) if bn else None def forward(self, x): if self.bn", "stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False) ) def", "= self.agent2(p) convert2 = self.convert2(proj) pred2 = torch.cat([agent2, convert2], dim=1)", "'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels,", "k in range(22): x = self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip,", "False for k, v in enumerate(cfg): if in_channels != 'S':", "def vgg(cfg, i, batch_norm=False): layers = [] in_channels = i", "for k, v in enumerate(vgg_source): if k == 0: loc_layers", "padding=1) self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1) self.merge3 =", "self.bn(x) return x class One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1):", "conv8 level proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear') proj =", "return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3), add_extras(size, extras[str(size)], 1024), mbox[str(size)],", "x): x = self.relu(x) out = self.single_branch(x) return out class", "loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc =", "stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01,", "= self.convert3(proj) pred3 = torch.cat([agent3, convert3], dim=1) pred3 = self.merge3(pred3)", "forward(self, x): x = self.conv(x) if self.bn is not None:", "stride=1): super(LSN_later, self).__init__() self.out_channels = out_planes inter_planes = out_planes //", "+= [nn.Conv2d(512, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k]", "[ConvBlock(128, 256, kernel_size=3,stride=1)] layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers +=", "input, size of either 300 or 512 extras: extra layers", "loc = list() conf = list() new_sources = list() #", "if self.relu is not None: x = self.relu(x) return x", "= self.Norm4(self.dsc3(w) + x) sources.append(q) elif k == 5 or", "pred3 = self.merge3(pred3) new_sources.append(pred3) for prediction in sources: new_sources.append(prediction) #", "kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes,", "# convs with s=2 to downsample the features self.dsc1 =", "list() # apply lds to the initial image x_pool =", "\"multibox head\" consists of loc and conf conv layers \"\"\"", "proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3 = F.upsample(tmp3, size=(19,", "'.pkl' or '.pth': print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!')", "torch.nn as nn import os import torch.nn.functional as F class", "convs for merging the lsn and ssd features self.Norm1 =", "k % 2 != 0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i] *", "nn.ReLU(inplace=False) if relu else None def forward(self, x): x =", "bias=False): super(ConvBlock, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes,", "pred2 = torch.cat([agent2, convert2], dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2) #", "stride=1) self.Norm3 = Relu_Conv(512, 512, stride=1) self.Norm4 = Relu_Conv(256, 256,", "self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2),", "self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def forward(self, x):", "detection The network is based on the SSD architecture. Each", "3, 1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for o in loc],", "multibox(size, vgg, extra_layers, cfg, num_classes): loc_layers = [] conf_layers =", "size=(19, 19), mode='bilinear') proj = torch.cat([proj2, proj3], dim=1) agent2 =", "self).__init__() self.out_channels = out_planes self.relu = nn.ReLU(inplace=False) self.single_branch = nn.Sequential(", "features self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.merge2 =", "out_planes // 4 self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3),", "return vgg, extra_layers, (loc_layers, conf_layers) mbox = { '300': [6,", "padding=1)] i = 2 indicator = 3 for k, v", "conf_layers) mbox = { '300': [6, 6, 6, 6, 4,", "l, c) in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous())", "// 4 self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes,", "extras, head, num_classes): super(LRFNet, self).__init__() self.phase = phase self.num_classes =", "is not None: x = self.bn(x) return x class One_Three_Conv(nn.Module):", "convert2], dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2) # The conv8 level", "k in range(22, 34): x = self.base[k](x) conv7_bn = self.ibn2(x)", "forward(self, x): out1 = self.part_a(x) out2 = self.part_b(out1) return out1,", "that feed to multibox loc and conf layers head: \"multibox", "self).__init__() self.out_channels = out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True)", "in range(22): x = self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn", "== 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C':", "else None def forward(self, x): x = self.conv(x) if self.bn", "out2 class IBN(nn.Module): def __init__(self, out_planes, bn=True): super(IBN, self).__init__() self.out_channels", "for object detection The network is based on the SSD", "nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if self.phase == 'test': self.softmax =", "None: x = self.bn(x) return x class One_Three_Conv(nn.Module): def __init__(self,", "= self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn) x", "= F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3 = F.upsample(tmp3, size=(38, 38),", "self.convert2(proj) pred2 = torch.cat([agent2, convert2], dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2)", "dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if", "layers = [] in_channels = i for v in cfg:", "to reduce the feature dimensions of current level self.agent1 =", "nn.ReLU(inplace=False)] in_channels = v layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers", "% 2 == 0) or (k > indicator+1 and k", "def __init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels = out_planes", "groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn", "self.bn is not None: x = self.bn(x) return x class", "stride=stride, padding=padding, relu=False) ) def forward(self, x): out = self.single_branch(x)", "conf_layers += [nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)] i +=", "2 != 0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3,", "out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False) ) def forward(self, x):", "= { '300': [6, 6, 6, 6, 4, 4]} def", "6, 4, 4]} def build_net(phase, size=300, num_classes=81): if size !=", "is not None: x = self.relu(x) return x class LSN_init(nn.Module):", "current and higher level features self.merge1 = ConvBlock(512, 512, kernel_size=3,", "= size # vgg network self.base = nn.ModuleList(base) self.lds =", "= LDS() # convs for merging the lsn and ssd", "kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5,", "for prediction in sources: new_sources.append(prediction) # apply multibox head to", "[ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] layers +=", "torch import torch.nn as nn import os import torch.nn.functional as", "eps=1e-5, momentum=0.01, affine=True) if bn else None def forward(self, x):", "images. Shape: [batch,3,300,300]. Return: Depending on phase: test: list of", "conf_layers = [] vgg_source = [1, -2] for k, v", "128, kernel_size=1, stride=1) # convs to reduce the feature dimensions", "layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers +=", "stride=2, padding=1) def forward(self, x): x_pool1 = self.pool1(x) x_pool2 =", "reduce the feature dimensions of other levels self.proj1 = ConvBlock(1024,", "= False for k, v in enumerate(cfg): if in_channels !=", "i for v in cfg: if v == 'M': layers", "kernel_size=(3, 3), stride=stride, padding=1) ) def forward(self, x): x =", "256, kernel_size=1) # convs to merge the features of the", "3), stride=stride, padding=padding, relu=False) ) def forward(self, x): out =", "# convs for generate the lsn features self.icn1 = LSN_init(3,", "else: layers += [conv2d, nn.ReLU(inplace=False)] in_channels = v pool5 =", "the current and higher level features self.merge1 = ConvBlock(512, 512,", "the features of the current and higher level features self.merge1", "__init__(self, in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels = out_planes self.relu", "layer branches into 1) conv2d for class conf scores 2)", "= nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes,", "x): x = self.conv(x) if self.bn is not None: x", "super(ConvBlock, self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size,", "the feature dimensions of current level self.agent1 = ConvBlock(512, 256,", "forward(self, x): \"\"\"Applies network layers and ops on input image(s)", "in_planes, out_planes, stride=1, padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels = out_planes", "conf], 1) if self.phase == \"test\": output = ( loc.view(loc.size(0),", "512, stride=2) # convs with s=2 to downsample the features", "loc.view(loc.size(0), -1, 4), # loc preds self.softmax(conf.view(-1, self.num_classes)), # conf", "os import torch.nn.functional as F class LDS(nn.Module): def __init__(self,): super(LDS,", "stride=1): super(LSN_init, self).__init__() self.out_channels = out_planes inter_planes = out_planes //", "into lower dimension. tmp1 = self.proj1(p) tmp2 = self.proj2(w) tmp3", "not None: x = self.bn(x) if self.relu is not None:", "= LSN_init(3, 512, stride=1) self.icn2 = LSN_later(128, 1024, stride=2) self.icn3", "= self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s = self.Norm1(conv4_3_bn *", "is not None: x = self.bn(x) if self.relu is not", "out2 class LSN_later(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_later, self).__init__()", "256, kernel_size=1) self.convert2 = ConvBlock(256, 512, kernel_size=1) self.convert3 = ConvBlock(128,", "def forward(self, x): if self.bn is not None: x =", "network is based on the SSD architecture. Each multibox layer", "\"\"\"Applies network layers and ops on input image(s) x. Args:", "bias=bias) self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else", "out_planes inter_planes = in_planes // 4 self.single_branch = nn.Sequential( ConvBlock(in_planes,", "v(x) if k == 0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w", "512, kernel_size=3, stride=1, padding=1) self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1,", "= self.convert2(proj) pred2 = torch.cat([agent2, convert2], dim=1) pred2 = self.merge2(pred2)", "i, batch_norm=False): layers = [] in_channels = i for v", "self.relu is not None: x = self.relu(x) return x class", "ext == '.pkl' or '.pth': print('Loading weights into state dict...')", "if self.phase == 'test': self.softmax = nn.Softmax() def forward(self, x):", "mode='bilinear') proj = torch.cat([proj2, proj3], dim=1) agent2 = self.agent2(p) convert2", "= torch.cat([o.view(o.size(0), -1) for o in loc], 1) conf =", "level features self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.merge2", "image(s) x. Args: x: input image or batch of images.", "nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1)", "feed to multibox loc and conf layers head: \"multibox head\"", "= Relu_Conv(1024, 1024, stride=1) self.Norm3 = Relu_Conv(512, 512, stride=1) self.Norm4", "for o in loc], 1) conf = torch.cat([o.view(o.size(0), -1) for", "nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False) ) def", "features self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1)) self.dsc2 =", "state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only .pth and .pkl", "layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] \"\"\" sources", "self.ibn2 = IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras)", "loc], 1) conf = torch.cat([o.view(o.size(0), -1) for o in conf],", "4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3,", "conv2d for class conf scores 2) conv2d for localization predictions", "ConvBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1,", "4 self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) self.part_b", "300 or 512 extras: extra layers that feed to multibox", "stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels", "to multibox loc and conf layers head: \"multibox head\" consists", "= ConvBlock(512, 256, kernel_size=1, stride=1) # convs to reduce the", "layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers += [conv2d, nn.ReLU(inplace=False)]", "self.icn1 = LSN_init(3, 512, stride=1) self.icn2 = LSN_later(128, 1024, stride=2)", "the layer's feature map size. Args: phase: (string) Can be", "output def load_weights(self, base_file): other, ext = os.path.splitext(base_file) if ext", "self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if self.phase == 'test':", "self.out_channels = out_planes inter_planes = in_planes // 4 self.single_branch =", "class LSN_later(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels", "= nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn", "in_planes, out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels = out_planes inter_planes =", "padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers", "cfg, i, batch_norm=False): # Extra layers added to VGG for", "batch_norm=False): # Extra layers added to VGG for feature scaling", "dilation=1, groups=1, relu=True, bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels = out_planes", "3), stride=stride, padding=1) ) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1,", "padding=(1, 1)) self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1)) #", "i = 2 indicator = 3 for k, v in", "mode='bilinear') proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear') proj = torch.cat([proj2,", "dim=1) agent2 = self.agent2(p) convert2 = self.convert2(proj) pred2 = torch.cat([agent2,", "+= [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] return", "* 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes,", "return out1, out2 class LSN_later(nn.Module): def __init__(self, in_planes, out_planes, stride=1):", "relu=True, bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels = out_planes self.conv =", "prediction in sources: new_sources.append(prediction) # apply multibox head to source", "pass # project the forward features into lower dimension. tmp1", "= { '300': [64, 64, 'M', 128, 128, 'M', 256,", "[] conf_layers = [] vgg_source = [1, -2] for k,", "LDS(nn.Module): def __init__(self,): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2,", "outputs from: 1: softmax layers, Shape: [batch*num_priors,num_classes] 2: localization layers,", "\"\"\"LRFNet for object detection The network is based on the", "def __init__(self, in_planes, out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels = out_planes", "+= [nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels,", "in loc], 1) conf = torch.cat([o.view(o.size(0), -1) for o in", "forward(self, x): if self.bn is not None: x = self.bn(x)", "Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] train: list of", "[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C',", "* num_classes, kernel_size=3, padding=1)] i += 1 return vgg, extra_layers,", "convs to merge the features of the current and higher", "Shape: [2,num_priors*4] train: list of concat outputs from: 1: confidence", "size is not supported!\") return return LRFNet(phase, size, *multibox(size, vgg(base[str(size)],", "self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1) )", "of other levels self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2", "size != 300: print(\"Error: The input image size is not", "4 self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),", "(string) Can be \"test\" or \"train\" base: VGG16 layers for", "1) conv2d for class conf scores 2) conv2d for localization", "kernel_size=1, stride=1) self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3 =", "fc_7 level proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3 =", "self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s = self.Norm1(conv4_3_bn * x_pool1_icn)", "softmax layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3:", "== 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d =", "groups=1, relu=True, bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels = out_planes self.conv", "class LRFNet(nn.Module): \"\"\"LRFNet for object detection The network is based", "to merge the features of the current and higher level", "nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1,", "v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2,", "self.relu(x) out = self.single_branch(x) return out class Ds_Conv(nn.Module): def __init__(self,", "layers += [conv2d, nn.ReLU(inplace=False)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=3,", "feature map size. Args: phase: (string) Can be \"test\" or", "out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(ConvBlock,", "cfg[i] * num_classes, kernel_size=3, padding=1)] i += 1 return vgg,", "in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels = out_planes inter_planes =", "F.upsample(tmp3, size=(38, 38), mode='bilinear') proj = torch.cat([proj1, proj2, proj3], dim=1)", "= self.merge3(pred3) new_sources.append(pred3) for prediction in sources: new_sources.append(prediction) # apply", "ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)", "convert3 = self.convert3(proj) pred3 = torch.cat([agent3, convert3], dim=1) pred3 =", "inter_planes = out_planes // 4 self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3,", "0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)] conf_layers", "return out class Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1, padding=(1,", "self.Norm1(conv4_3_bn * x_pool1_icn) # apply vgg up to fc7 for", "== 0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) +", "nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None def forward(self,", "if size != 300: print(\"Error: The input image size is", "self.merge3(pred3) new_sources.append(pred3) for prediction in sources: new_sources.append(prediction) # apply multibox", "+= [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] layers", "conf.view(conf.size(0), -1, self.num_classes), ) return output def load_weights(self, base_file): other,", "== 0) or (k > indicator+1 and k % 2", "not None: x = self.relu(x) return x class LSN_init(nn.Module): def", "self.Norm1 = Relu_Conv(512, 512, stride=1) self.Norm2 = Relu_Conv(1024, 1024, stride=1)", "padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)] i", "in_planes // 4 self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),", "relu else None def forward(self, x): x = self.conv(x) if", "= os.path.splitext(base_file) if ext == '.pkl' or '.pth': print('Loading weights", "proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear') proj2 = F.upsample(tmp2, size=(38,", "size, *multibox(size, vgg(base[str(size)], 3), add_extras(size, extras[str(size)], 1024), mbox[str(size)], num_classes), num_classes)", "def __init__(self, in_planes, out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels = out_planes", "in conf], 1) if self.phase == \"test\": output = (", "nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)", "out = self.single_branch(x) return out class Relu_Conv(nn.Module): def __init__(self, in_planes,", "= [] in_channels = i flag = False for k,", "'S': if v == 'S': if in_channels == 256 and", "kernel_size=1) layers += [pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers", "nn import os import torch.nn.functional as F class LDS(nn.Module): def", "34): x = self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn =", "x = self.relu(x) out = self.single_branch(x) return out class Ds_Conv(nn.Module):", "nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers += [conv2d, nn.ReLU(inplace=False)] in_channels = v", "kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)]", "[2,num_priors*4] \"\"\" sources = list() loc = list() conf =", "layer's feature map size. Args: phase: (string) Can be \"test\"", "is based on the SSD architecture. Each multibox layer branches", "self.Norm4 = Relu_Conv(256, 256, stride=1) # convs for generate the", ") def forward(self, x): out = self.single_branch(x) return out class", "padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2,", "loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1) conf", "self.softmax(conf.view(-1, self.num_classes)), # conf preds ) else: output = (", "scaling layers = [] in_channels = i flag = False", "[nn.Conv2d(512, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] *", "x_pool3 = self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module): def __init__(self, in_planes,", "outputs for k, v in enumerate(self.extras): x = v(x) if", "cfg[k] * num_classes, kernel_size=3, padding=1)] i = 2 indicator =", "indicator+1 and k % 2 == 0) or (k >", "the lsn features self.icn1 = LSN_init(3, 512, stride=1) self.icn2 =", "stride=2, padding=(1, 1)) self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))", "self.size = size # vgg network self.base = nn.ModuleList(base) self.lds", "vgg_source = [1, -2] for k, v in enumerate(vgg_source): if", "1024, kernel_size=3, stride=1, padding=1) self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1,", "= nn.ModuleList(head[1]) if self.phase == 'test': self.softmax = nn.Softmax() def", "c) in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0,", "k, v in enumerate(cfg): if in_channels != 'S': if v", "* x_pool1_icn) # apply vgg up to fc7 for k", "feature scaling layers = [] in_channels = i flag =", "= out_planes inter_planes = in_planes // 4 self.single_branch = nn.Sequential(", "2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4]", "38), mode='bilinear') proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3 =", "out class Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__()", "x = self.bn(x) return x class One_Three_Conv(nn.Module): def __init__(self, in_planes,", "lsn and ssd features self.Norm1 = Relu_Conv(512, 512, stride=1) self.Norm2", "conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers +=", "+ x * x_pool3_icn) elif k == 2: q =", "self.icn2 = LSN_later(128, 1024, stride=2) self.icn3 = LSN_later(256, 512, stride=2)", "Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels =", "network layers and ops on input image(s) x. Args: x:", "out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups,", "x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip) p = self.Norm2(self.dsc1(s) + conv7_bn *", "import os import torch.nn.functional as F class LDS(nn.Module): def __init__(self,):", "not None: x = self.bn(x) return x class One_Three_Conv(nn.Module): def", "x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module):", "= Ds_Conv(512, 1024, stride=2, padding=(1, 1)) self.dsc2 = Ds_Conv(1024, 512,", "def build_net(phase, size=300, num_classes=81): if size != 300: print(\"Error: The", "layers \"\"\" def __init__(self, phase, size, base, extras, head, num_classes):", "= nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d,", "= self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s", "= nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2,", "if self.bn is not None: x = self.bn(x) if self.relu", "return out class Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Relu_Conv,", "conf scores 2) conv2d for localization predictions 3) associated priorbox", "'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers", "stride=stride, padding=1) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def", "kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else:", "return out1, out2 class IBN(nn.Module): def __init__(self, out_planes, bn=True): super(IBN,", "= out_planes inter_planes = out_planes // 4 self.part_a = nn.Sequential(", "'test': self.softmax = nn.Softmax() def forward(self, x): \"\"\"Applies network layers", "generate the lsn features self.icn1 = LSN_init(3, 512, stride=1) self.icn2", "= self.conv(x) if self.bn is not None: x = self.bn(x)", "!= 'S': if v == 'S': if in_channels == 256", "256, stride=1) # convs for generate the lsn features self.icn1", "in_planes, out_planes, stride=1): super(LSN_init, self).__init__() self.out_channels = out_planes inter_planes =", "agent2 = self.agent2(p) convert2 = self.convert2(proj) pred2 = torch.cat([agent2, convert2],", "localization predictions 3) associated priorbox layer to produce default bounding", "nn.ReLU(inplace=False)] return layers base = { '300': [64, 64, 'M',", "= torch.cat([agent1, convert1], dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1) # The", "2 == 0) or (k > indicator+1 and k %", "conv2d for localization predictions 3) associated priorbox layer to produce", "'300': [1024, 'S', 512, 'S', 256]} def multibox(size, vgg, extra_layers,", "== 7: sources.append(x) else: pass # project the forward features", "kernel_size=1) self.convert3 = ConvBlock(128, 256, kernel_size=1) # convs to merge", "of the current and higher level features self.merge1 = ConvBlock(512,", "self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,", "padding=1) def forward(self, x): x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1)", "out = self.single_branch(x) return out class LRFNet(nn.Module): \"\"\"LRFNet for object", "as nn import os import torch.nn.functional as F class LDS(nn.Module):", "= ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3 = ConvBlock(256, 128, kernel_size=1,", "stride=2) self.icn3 = LSN_later(256, 512, stride=2) # convs with s=2", "= out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn", "apply vgg up to fc7 for k in range(22, 34):", "self.Norm3(self.dsc2(p) + x * x_pool3_icn) elif k == 2: q", "produce default bounding boxes specific to the layer's feature map", "* x_pool2_icn) x = self.base[34](x) # apply extra layers and", "* num_classes, kernel_size=3, padding=1)] i = 2 indicator = 3", "2: q = self.Norm4(self.dsc3(w) + x) sources.append(q) elif k ==", "preds self.softmax(conf.view(-1, self.num_classes)), # conf preds ) else: output =", "in enumerate(self.extras): x = v(x) if k == 0: x_pool3_skip,", "print('Sorry only .pth and .pkl files supported.') def vgg(cfg, i,", "features self.Norm1 = Relu_Conv(512, 512, stride=1) self.Norm2 = Relu_Conv(1024, 1024,", "localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] \"\"\"", "or '.pth': print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else:", "0: loc_layers += [nn.Conv2d(512, cfg[k] * 4, kernel_size=3, padding=1)] conf_layers", "torch.cat([agent1, convert1], dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1) # The fc_7", "= nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf =", "LRFNet(nn.Module): \"\"\"LRFNet for object detection The network is based on", "Ds_Conv(512, 256, stride=2, padding=(1, 1)) # convs to reduce the", "512, 512, 512]} def add_extras(size, cfg, i, batch_norm=False): # Extra", "None: x = self.relu(x) return x class LSN_init(nn.Module): def __init__(self,", "of images. Shape: [batch,3,300,300]. Return: Depending on phase: test: list", "v in enumerate(extra_layers): if (k < indicator+1 and k %", "+ conv7_bn * x_pool2_icn) x = self.base[34](x) # apply extra", "self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1)) # convs to", "forward(self, x): out = self.single_branch(x) return out class Relu_Conv(nn.Module): def", "= self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module): def __init__(self, in_planes, out_planes,", "= self.merge1(pred1) new_sources.append(pred1) # The fc_7 level proj2 = F.upsample(tmp2,", "tmp1 = self.proj1(p) tmp2 = self.proj2(w) tmp3 = self.proj3(q) #", ".pth and .pkl files supported.') def vgg(cfg, i, batch_norm=False): layers", "padding=1) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def forward(self,", "import torch.nn.functional as F class LDS(nn.Module): def __init__(self,): super(LDS, self).__init__()", "2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3", "class One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels", "self).__init__() self.phase = phase self.num_classes = num_classes self.size = size", "out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding,", "padding=0) self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def forward(self, x):", "relu=False) def forward(self, x): out1 = self.part_a(x) out2 = self.part_b(out1)", "bounding boxes specific to the layer's feature map size. Args:", "if in_channels == 256 and size == 512: layers +=", "pred1 = self.merge1(pred1) new_sources.append(pred1) # The fc_7 level proj2 =", "= [] conf_layers = [] vgg_source = [1, -2] for", "The fc_7 level proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3", "self).__init__() self.out_channels = out_planes self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,", "self.single_branch(x) return out class LRFNet(nn.Module): \"\"\"LRFNet for object detection The", "vgg, extra_layers, cfg, num_classes): loc_layers = [] conf_layers = []", "ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False) ) def forward(self,", "self.agent3(w) convert3 = self.convert3(proj) pred3 = torch.cat([agent3, convert3], dim=1) pred3", "3), stride=stride, padding=1) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)", "only .pth and .pkl files supported.') def vgg(cfg, i, batch_norm=False):", "head\" consists of loc and conf conv layers \"\"\" def", "reduce the feature dimensions of other levels self.convert1 = ConvBlock(384,", "( loc.view(loc.size(0), -1, 4), # loc preds self.softmax(conf.view(-1, self.num_classes)), #", "-2] for k, v in enumerate(vgg_source): if k == 0:", "w = self.Norm3(self.dsc2(p) + x * x_pool3_icn) elif k ==", "kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False) )", "inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) )", "to downsample the features self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1,", "[batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape:", "self.convert3(proj) pred3 = torch.cat([agent3, convert3], dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3)", "stride=2), nn.ReLU(inplace=False)] in_channels = v layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]", "ssd features self.Norm1 = Relu_Conv(512, 512, stride=1) self.Norm2 = Relu_Conv(1024,", "torch.cat([agent2, convert2], dim=1) pred2 = self.merge2(pred2) new_sources.append(pred2) # The conv8", "class Ds_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)): super(Ds_Conv,", "self.Norm3 = Relu_Conv(512, 512, stride=1) self.Norm4 = Relu_Conv(256, 256, stride=1)", "= nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride,", "nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)", "outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers,", "= v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512,", "+= [nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)] i += 1", "== 'S': if in_channels == 256 and size == 512:", "of either 300 or 512 extras: extra layers that feed", "sources.append(x) else: pass # project the forward features into lower", "= self.agent1(s) convert1 = self.convert1(proj) pred1 = torch.cat([agent1, convert1], dim=1)", "layers extras = { '300': [1024, 'S', 512, 'S', 256]}", "padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(ConvBlock, self).__init__() self.out_channels =", "os.path.splitext(base_file) if ext == '.pkl' or '.pth': print('Loading weights into", "sources = list() loc = list() conf = list() new_sources", "The input image size is not supported!\") return return LRFNet(phase,", "Relu_Conv(512, 512, stride=1) self.Norm2 = Relu_Conv(1024, 1024, stride=1) self.Norm3 =", "self.relu(x) return x class LSN_init(nn.Module): def __init__(self, in_planes, out_planes, stride=1):", "3) associated priorbox layer to produce default bounding boxes specific", "stride=1) self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3 = ConvBlock(256,", "print(\"Error: The input image size is not supported!\") return return", "stride=2, padding=(1, 1)) # convs to reduce the feature dimensions", "x = self.relu(x) return x class LSN_init(nn.Module): def __init__(self, in_planes,", "[batch,3,300,300]. Return: Depending on phase: test: list of concat outputs", "3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3,", "self.part_b(out1) return out1, out2 class IBN(nn.Module): def __init__(self, out_planes, bn=True):", "self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous())", "loc and conf conv layers \"\"\" def __init__(self, phase, size,", "256, kernel_size=3,stride=1)] layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128,", "and conf layers head: \"multibox head\" consists of loc and", "conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers base = { '300':", "Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] \"\"\" sources =", "self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return x_pool3 class", "Relu_Conv(256, 256, stride=1) # convs for generate the lsn features", "The conv8 level proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear') proj", "return layers base = { '300': [64, 64, 'M', 128,", "= nn.ReLU(inplace=False) if relu else None def forward(self, x): x", "base, extras, head, num_classes): super(LRFNet, self).__init__() self.phase = phase self.num_classes", "k == 2: q = self.Norm4(self.dsc3(w) + x) sources.append(q) elif", "supported!\") return return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3), add_extras(size, extras[str(size)],", "phase, size, base, extras, head, num_classes): super(LRFNet, self).__init__() self.phase =", "self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2,", "in_channels = i flag = False for k, v in", "v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]", "= ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes), ) return", "self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2 = ConvBlock(1024, 512,", "{ '300': [64, 64, 'M', 128, 128, 'M', 256, 256,", "extra layers that feed to multibox loc and conf layers", "self.phase = phase self.num_classes = num_classes self.size = size #", "stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool3 =", "features of the current and higher level features self.merge1 =", "(k < indicator+1 and k % 2 == 0) or", "* x_pool3_icn) elif k == 2: q = self.Norm4(self.dsc3(w) +", "elif k == 5 or k == 7: sources.append(x) else:", "proj3], dim=1) agent1 = self.agent1(s) convert1 = self.convert1(proj) pred1 =", "mode='bilinear') proj = proj3 agent3 = self.agent3(w) convert3 = self.convert3(proj)", "k % 2 == 0) or (k > indicator+1 and", "if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v", "as F class LDS(nn.Module): def __init__(self,): super(LDS, self).__init__() self.pool1 =", "lds to the initial image x_pool = self.lds(x) # apply", "out_planes self.relu = nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3,", "and k % 2 != 0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i]", "self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1) # convs to reduce", "= torch.cat([o.view(o.size(0), -1) for o in conf], 1) if self.phase", "return return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3), add_extras(size, extras[str(size)], 1024),", "2) conv2d for localization predictions 3) associated priorbox layer to", "256]} def multibox(size, vgg, extra_layers, cfg, num_classes): loc_layers = []", "layers head: \"multibox head\" consists of loc and conf conv", "512, 512]} def add_extras(size, cfg, i, batch_norm=False): # Extra layers", "512, 'M', 512, 512, 512]} def add_extras(size, cfg, i, batch_norm=False):", "+= [pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers base =", "= LSN_later(256, 512, stride=2) # convs with s=2 to downsample", "else: print('Sorry only .pth and .pkl files supported.') def vgg(cfg,", "10), mode='bilinear') proj = proj3 agent3 = self.agent3(w) convert3 =", "1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4]", "not supported!\") return return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3), add_extras(size,", "mode='bilinear') proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear') proj = torch.cat([proj1,", "sources.append(q) elif k == 5 or k == 7: sources.append(x)", "nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)", "256, 'C', 512, 512, 512, 'M', 512, 512, 512]} def", "LSN_later(256, 512, stride=2) # convs with s=2 to downsample the", "network self.base = nn.ModuleList(base) self.lds = LDS() # convs for", "= nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1) ) def", "padding=1) ) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def", "conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool) s = self.Norm1(conv4_3_bn", "self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3 = ConvBlock(512, 256,", "for feature scaling layers = [] in_channels = i flag", "bn else None def forward(self, x): if self.bn is not", "\"\"\" sources = list() loc = list() conf = list()", "cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]", "return out class LRFNet(nn.Module): \"\"\"LRFNet for object detection The network", "[conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers += [conv2d, nn.ReLU(inplace=False)] in_channels =", "for k, v in enumerate(cfg): if in_channels != 'S': if", "x: input image or batch of images. Shape: [batch,3,300,300]. Return:", "[nn.Conv2d(v.out_channels, cfg[i] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(v.out_channels, cfg[i]", "4 self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes,", "the feature dimensions of other levels self.proj1 = ConvBlock(1024, 128,", "in_channels = v layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers +=", "torch.cat([o.view(o.size(0), -1) for o in conf], 1) if self.phase ==", "range(22, 34): x = self.base[k](x) conv7_bn = self.ibn2(x) x_pool2_skip, x_pool2_icn", "1)) # convs to reduce the feature dimensions of current", "# The fc_7 level proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')", "kernel_size=3, stride=1, padding=1) self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)", "x_pool3_icn) elif k == 2: q = self.Norm4(self.dsc3(w) + x)", "3 for k, v in enumerate(extra_layers): if (k < indicator+1", "level proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear') proj = proj3", "affine=True) if bn else None self.relu = nn.ReLU(inplace=False) if relu", "!= 300: print(\"Error: The input image size is not supported!\")", "merge the features of the current and higher level features", "batch of images. Shape: [batch,3,300,300]. Return: Depending on phase: test:", "for v in cfg: if v == 'M': layers +=", "self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) self.part_b =", "in_channels != 'S': if v == 'S': if in_channels ==", "def forward(self, x): x = self.conv(x) if self.bn is not", "source layer outputs for k, v in enumerate(self.extras): x =", "layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]", "kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]", "inter_planes = out_planes // 4 self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes,", "confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3:", "nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride,", "of concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2:", "stride=1): super(Relu_Conv, self).__init__() self.out_channels = out_planes self.relu = nn.ReLU(inplace=False) self.single_branch", "size=(38, 38), mode='bilinear') proj = torch.cat([proj1, proj2, proj3], dim=1) agent1", "0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) + x", "of other levels self.convert1 = ConvBlock(384, 256, kernel_size=1) self.convert2 =", "Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4] 3: priorbox layers,", "1024, stride=2) self.icn3 = LSN_later(256, 512, stride=2) # convs with", "kernel_size=1) self.convert2 = ConvBlock(256, 512, kernel_size=1) self.convert3 = ConvBlock(128, 256,", "v in enumerate(self.extras): x = v(x) if k == 0:", "= self.part_b(out1) return out1, out2 class IBN(nn.Module): def __init__(self, out_planes,", "out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.bn = nn.BatchNorm2d(out_planes,", "* num_classes, kernel_size=3, padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] *", "conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers += [pool5, conv6, nn.ReLU(inplace=False),", "512, kernel_size=1) self.convert3 = ConvBlock(128, 256, kernel_size=1) # convs to", "= ConvBlock(256, 512, kernel_size=1) self.convert3 = ConvBlock(128, 256, kernel_size=1) #", "ConvBlock(384, 256, kernel_size=1) self.convert2 = ConvBlock(256, 512, kernel_size=1) self.convert3 =", "= self.relu(x) out = self.single_branch(x) return out class Ds_Conv(nn.Module): def", "ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1) self.part_b = ConvBlock(inter_planes, out_planes,", "super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 =", "= list() # apply lds to the initial image x_pool", "from: 1: confidence layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape:", "self.part_a(x) out2 = self.part_b(out1) return out1, out2 class LSN_later(nn.Module): def", "k == 7: sources.append(x) else: pass # project the forward", "padding=(1, 1)): super(Ds_Conv, self).__init__() self.out_channels = out_planes self.single_branch = nn.Sequential(", "def forward(self, x): out = self.single_branch(x) return out class LRFNet(nn.Module):", "Ds_Conv(1024, 512, stride=2, padding=(1, 1)) self.dsc3 = Ds_Conv(512, 256, stride=2,", "x_pool3_icn = self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)", "self.relu = nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3),", "priorbox layer to produce default bounding boxes specific to the", "dim=1) agent1 = self.agent1(s) convert1 = self.convert1(proj) pred1 = torch.cat([agent1,", "kernel_size=1, stride=1) self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1) # convs", "3: priorbox layers, Shape: [2,num_priors*4] train: list of concat outputs", "= out_planes // 4 self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3),", "list() conf = list() new_sources = list() # apply lds", "# The conv4_3 level proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')", "nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None self.relu =", "class Relu_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(Relu_Conv, self).__init__() self.out_channels", "{ '300': [1024, 'S', 512, 'S', 256]} def multibox(size, vgg,", "layers for (x, l, c) in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0,", "x. Args: x: input image or batch of images. Shape:", "weights into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only .pth", "def forward(self, x): x = self.relu(x) out = self.single_branch(x) return", "stride=1) self.Norm2 = Relu_Conv(1024, 1024, stride=1) self.Norm3 = Relu_Conv(512, 512,", "= torch.cat([agent3, convert3], dim=1) pred3 = self.merge3(pred3) new_sources.append(pred3) for prediction", "zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3,", "size=(19, 19), mode='bilinear') proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear') proj", "x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2) return", "bn=True) self.ibn2 = IBN(1024, bn=True) self.relu = nn.ReLU(inplace=False) self.extras =", "image x_pool = self.lds(x) # apply vgg up to conv4_3", "# project the forward features into lower dimension. tmp1 =", "feature dimensions of other levels self.convert1 = ConvBlock(384, 256, kernel_size=1)", "layers, Shape: [batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] train: list", "inter_planes = in_planes // 4 self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes,", "stride=1) self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3 = ConvBlock(512,", "kernel_size=1, stride=1) self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1) # convs", "in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)]", "new_sources.append(pred1) # The fc_7 level proj2 = F.upsample(tmp2, size=(19, 19),", "of concat outputs from: 1: softmax layers, Shape: [batch*num_priors,num_classes] 2:", "+= [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)] else: layers += [conv2d, nn.ReLU(inplace=False)] in_channels", "= in_planes // 4 self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1,", "else: output = ( loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes),", "proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3 = F.upsample(tmp3, size=(38,", "[ConvBlock(128, 256, kernel_size=3,stride=1)] return layers extras = { '300': [1024,", "i flag = False for k, v in enumerate(cfg): if", "[6, 6, 6, 6, 4, 4]} def build_net(phase, size=300, num_classes=81):", "conf layers head: \"multibox head\" consists of loc and conf", "out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else", "p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn) x = self.base[34](x)", "and size == 512: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]", "== 2: q = self.Norm4(self.dsc3(w) + x) sources.append(q) elif k", "size=(38, 38), mode='bilinear') proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear') proj", "padding=1) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) conv7 =", "super(Ds_Conv, self).__init__() self.out_channels = out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes,", "stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if", "enumerate(vgg_source): if k == 0: loc_layers += [nn.Conv2d(512, cfg[k] *", "cfg[k] * num_classes, kernel_size=3, padding=1)] else: loc_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k]", "def __init__(self, phase, size, base, extras, head, num_classes): super(LRFNet, self).__init__()", "on input image(s) x. Args: x: input image or batch", "feature dimensions of other levels self.proj1 = ConvBlock(1024, 128, kernel_size=1,", "def forward(self, x): \"\"\"Applies network layers and ops on input", "kernel_size=(3, 3), stride=stride, padding=1, relu=False) ) def forward(self, x): out", "dim=1) pred1 = self.merge1(pred1) new_sources.append(pred1) # The fc_7 level proj2", "eps=1e-5, momentum=0.01, affine=True) if bn else None self.relu = nn.ReLU(inplace=False)", "= Ds_Conv(512, 256, stride=2, padding=(1, 1)) # convs to reduce", "self.out_channels = out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if", "= F.upsample(tmp3, size=(19, 19), mode='bilinear') proj = torch.cat([proj2, proj3], dim=1)", "= nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None def", "# loc preds self.softmax(conf.view(-1, self.num_classes)), # conf preds ) else:", "1024, stride=1) self.Norm3 = Relu_Conv(512, 512, stride=1) self.Norm4 = Relu_Conv(256,", "kernel_size=3, stride=1, padding=1) self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)", "+= [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2,", "= v(x) if k == 0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)", "in_channels == 256 and size == 512: layers += [One_Three_Conv(in_channels,", "ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False) def forward(self, x): out1 =", "super(IBN, self).__init__() self.out_channels = out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01,", "num_classes self.size = size # vgg network self.base = nn.ModuleList(base)", "Return: Depending on phase: test: list of concat outputs from:", "Depending on phase: test: list of concat outputs from: 1:", "\"test\": output = ( loc.view(loc.size(0), -1, 4), # loc preds", "base = { '300': [64, 64, 'M', 128, 128, 'M',", "priorbox layers, Shape: [2,num_priors*4] train: list of concat outputs from:", "bn=True) self.relu = nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0])", "k, v in enumerate(extra_layers): if (k < indicator+1 and k", "3: priorbox layers, Shape: [2,num_priors*4] \"\"\" sources = list() loc", "128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] layers += [ConvBlock(256,", "layers = [] in_channels = i flag = False for", "self.out_channels = out_planes self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3),", "4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3,", "self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1) def forward(self, x): x_pool1", "associated priorbox layer to produce default bounding boxes specific to", "= list() conf = list() new_sources = list() # apply", "// 4 self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)", "= ConvBlock(384, 256, kernel_size=1) self.convert2 = ConvBlock(256, 512, kernel_size=1) self.convert3", "if k == 0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w =", ") def forward(self, x): x = self.relu(x) out = self.single_branch(x)", "__init__(self, phase, size, base, extras, head, num_classes): super(LRFNet, self).__init__() self.phase", "new_sources.append(prediction) # apply multibox head to source layers for (x,", "x_pool1_icn) # apply vgg up to fc7 for k in", "phase self.num_classes = num_classes self.size = size # vgg network", "layers and cache source layer outputs for k, v in", "or batch of images. Shape: [batch,3,300,300]. Return: Depending on phase:", "cfg[k] * 4, kernel_size=3, padding=1)] conf_layers += [nn.Conv2d(vgg[v].out_channels, cfg[k] *", "1) conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)", "class ConvBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1,", "self.bn(x) if self.relu is not None: x = self.relu(x) return", "in enumerate(extra_layers): if (k < indicator+1 and k % 2", "inter_planes, kernel_size=(3, 3), stride=stride, padding=1) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1,", "kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(ConvBlock, self).__init__()", "return layers extras = { '300': [1024, 'S', 512, 'S',", "self.Norm2 = Relu_Conv(1024, 1024, stride=1) self.Norm3 = Relu_Conv(512, 512, stride=1)", "= ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1) self.merge3 = ConvBlock(512, 512,", "[] in_channels = i for v in cfg: if v", "else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers", "head: \"multibox head\" consists of loc and conf conv layers", "conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes, kernel_size=3, padding=1)] else: loc_layers +=", "+= 1 return vgg, extra_layers, (loc_layers, conf_layers) mbox = {", "x * x_pool3_icn) elif k == 2: q = self.Norm4(self.dsc3(w)", "def __init__(self,): super(LDS, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)", "tmp3 = self.proj3(q) # The conv4_3 level proj1 = F.upsample(tmp1,", "layers += [pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)] return layers base", "4, 4]} def build_net(phase, size=300, num_classes=81): if size != 300:", "cfg[k] * 4, kernel_size=3, padding=1)] conf_layers +=[nn.Conv2d(512, cfg[k] * num_classes,", "self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.merge2 = ConvBlock(1024,", "load_weights(self, base_file): other, ext = os.path.splitext(base_file) if ext == '.pkl'", "dimensions of other levels self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)", "for o in conf], 1) if self.phase == \"test\": output", "256, kernel_size=1, stride=1) self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1) self.agent3", "ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False) ) def forward(self,", "1: softmax layers, Shape: [batch*num_priors,num_classes] 2: localization layers, Shape: [batch,num_priors*4]", "= self.agent3(w) convert3 = self.convert3(proj) pred3 = torch.cat([agent3, convert3], dim=1)", "padding=(1, 1)) # convs to reduce the feature dimensions of", "__init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv, self).__init__() self.out_channels = out_planes inter_planes", "4), conf.view(conf.size(0), -1, self.num_classes), ) return output def load_weights(self, base_file):", "self.lds = LDS() # convs for merging the lsn and", "x = v(x) if k == 0: x_pool3_skip, x_pool3_icn =", "list of concat outputs from: 1: confidence layers, Shape: [batch*num_priors,num_classes]", "= i flag = False for k, v in enumerate(cfg):", "1)) self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1)) self.dsc3 =", "self.single_branch = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3,", "\"\"\" def __init__(self, phase, size, base, extras, head, num_classes): super(LRFNet,", "ops on input image(s) x. Args: x: input image or", "= torch.cat([proj2, proj3], dim=1) agent2 = self.agent2(p) convert2 = self.convert2(proj)", "new_sources = list() # apply lds to the initial image", "self.convert1 = ConvBlock(384, 256, kernel_size=1) self.convert2 = ConvBlock(256, 512, kernel_size=1)", "512 extras: extra layers that feed to multibox loc and", "nn.Conv2d(1024, 1024, kernel_size=1) layers += [pool5, conv6, nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]", "'M', 512, 512, 512]} def add_extras(size, cfg, i, batch_norm=False): #", "to VGG for feature scaling layers = [] in_channels =", "'.pth': print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry", "# apply multibox head to source layers for (x, l,", "self.base = nn.ModuleList(base) self.lds = LDS() # convs for merging", "multibox head to source layers for (x, l, c) in", "256, 256, 'C', 512, 512, 512, 'M', 512, 512, 512]}", "loc and conf layers head: \"multibox head\" consists of loc", "nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if self.phase ==", "test: list of concat outputs from: 1: softmax layers, Shape:", "self.pool3(x_pool2) return x_pool3 class ConvBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size,", "= nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if self.phase", "of loc and conf conv layers \"\"\" def __init__(self, phase,", "proj2, proj3], dim=1) agent1 = self.agent1(s) convert1 = self.convert1(proj) pred1", "self.base[34](x) # apply extra layers and cache source layer outputs", "o in conf], 1) if self.phase == \"test\": output =", "def forward(self, x): out = self.single_branch(x) return out class Relu_Conv(nn.Module):", "1).contiguous()) loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)", "F.upsample(tmp2, size=(19, 19), mode='bilinear') proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')", "loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, self.num_classes), ) return output def", "torch.nn.functional as F class LDS(nn.Module): def __init__(self,): super(LDS, self).__init__() self.pool1", "Shape: [2,num_priors*4] \"\"\" sources = list() loc = list() conf", "[nn.Conv2d(v.out_channels, cfg[i] * num_classes, kernel_size=3, padding=1)] i += 1 return", "= list() new_sources = list() # apply lds to the", "indicator = 3 for k, v in enumerate(extra_layers): if (k", "downsample the features self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))", "flag = False for k, v in enumerate(cfg): if in_channels", "else None def forward(self, x): if self.bn is not None:", "if ext == '.pkl' or '.pth': print('Loading weights into state", "other levels self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2 =", "momentum=0.01, affine=True) if bn else None self.relu = nn.ReLU(inplace=False) if", "the features self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1)) self.dsc2", "ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)", "to reduce the feature dimensions of other levels self.convert1 =", "else None self.relu = nn.ReLU(inplace=False) if relu else None def", "return x class LSN_init(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_init,", "% 2 != 0): loc_layers += [nn.Conv2d(v.out_channels, cfg[i] * 4,", "conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1) if", "base: VGG16 layers for input, size of either 300 or", "proj = proj3 agent3 = self.agent3(w) convert3 = self.convert3(proj) pred3", "x_pool3 class ConvBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0,", "v pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) conv6 = nn.Conv2d(512, 1024,", "list of concat outputs from: 1: softmax layers, Shape: [batch*num_priors,num_classes]", "1 return vgg, extra_layers, (loc_layers, conf_layers) mbox = { '300':", "= [] vgg_source = [1, -2] for k, v in", "extra_layers, (loc_layers, conf_layers) mbox = { '300': [6, 6, 6,", "= out_planes // 4 self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3,", "self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)", "[batch,num_priors*4] 3: priorbox layers, Shape: [2,num_priors*4] \"\"\" sources = list()", "= nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1]) if self.phase == 'test': self.softmax", "= nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0) self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2,", "v layers += [ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256,", "padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers += [pool5,", "ConvBlock(512, 128, kernel_size=1, stride=1) self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)", "layers and ops on input image(s) x. Args: x: input", "= ConvBlock(512, 512, kernel_size=3, stride=1, padding=1) self.ibn1 = IBN(512, bn=True)", "input image size is not supported!\") return return LRFNet(phase, size,", "self.part_a(x) out2 = self.part_b(out1) return out1, out2 class IBN(nn.Module): def", "ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),", "layers added to VGG for feature scaling layers = []", "torch.cat([proj1, proj2, proj3], dim=1) agent1 = self.agent1(s) convert1 = self.convert1(proj)", "in sources: new_sources.append(prediction) # apply multibox head to source layers", "128, kernel_size=1, stride=1) self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1) #", "= nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None self.relu", "# conf preds ) else: output = ( loc.view(loc.size(0), -1,", "for k in range(22, 34): x = self.base[k](x) conv7_bn =", "and k % 2 == 0) or (k > indicator+1", "= self.part_a(x) out2 = self.part_b(out1) return out1, out2 class LSN_later(nn.Module):", "consists of loc and conf conv layers \"\"\" def __init__(self,", "F.upsample(tmp3, size=(19, 19), mode='bilinear') proj = torch.cat([proj2, proj3], dim=1) agent2", "= self.proj1(p) tmp2 = self.proj2(w) tmp3 = self.proj3(q) # The", "kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers +=", "relu=False) ) def forward(self, x): out = self.single_branch(x) return out", "3), stride=stride, padding=1, relu=False) ) def forward(self, x): out =", "512, 512, 512, 'M', 512, 512, 512]} def add_extras(size, cfg,", "ext = os.path.splitext(base_file) if ext == '.pkl' or '.pth': print('Loading", "512, stride=1) self.Norm2 = Relu_Conv(1024, 1024, stride=1) self.Norm3 = Relu_Conv(512,", "in enumerate(vgg_source): if k == 0: loc_layers += [nn.Conv2d(512, cfg[k]", "2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2, 3, 1).contiguous()) loc = torch.cat([o.view(o.size(0),", "layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]", "= self.lds(x) # apply vgg up to conv4_3 for k", "layers that feed to multibox loc and conf layers head:", "initial image x_pool = self.lds(x) # apply vgg up to", "out1, out2 class LSN_later(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(LSN_later,", "kernel_size=(3, 3), stride=stride, padding=1) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1,", "= ConvBlock(1024, 128, kernel_size=1, stride=1) self.proj2 = ConvBlock(512, 128, kernel_size=1,", "'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512,", "else: pass # project the forward features into lower dimension.", "LDS() # convs for merging the lsn and ssd features", "-1, 4), # loc preds self.softmax(conf.view(-1, self.num_classes)), # conf preds", "for merging the lsn and ssd features self.Norm1 = Relu_Conv(512,", "out1 = self.part_a(x) out2 = self.part_b(out1) return out1, out2 class", "in_channels = i for v in cfg: if v ==", "features into lower dimension. tmp1 = self.proj1(p) tmp2 = self.proj2(w)", "on the SSD architecture. Each multibox layer branches into 1)", "multibox loc and conf layers head: \"multibox head\" consists of", "for generate the lsn features self.icn1 = LSN_init(3, 512, stride=1)", "2), stride=2, padding=1) def forward(self, x): x_pool1 = self.pool1(x) x_pool2", "conv layers \"\"\" def __init__(self, phase, size, base, extras, head,", "self).__init__() self.out_channels = out_planes inter_planes = out_planes // 4 self.part_a", "ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm:", "= nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, out_planes, kernel_size=(3, 3),", "self.num_classes = num_classes self.size = size # vgg network self.base", "nn.ReLU(inplace=False) self.extras = nn.ModuleList(extras) self.loc = nn.ModuleList(head[0]) self.conf = nn.ModuleList(head[1])", "Extra layers added to VGG for feature scaling layers =", "= ConvBlock(512, 256, kernel_size=1, stride=1) self.agent2 = ConvBlock(1024, 512, kernel_size=1,", "# apply lds to the initial image x_pool = self.lds(x)", "x = self.base[k](x) conv4_3_bn = self.ibn1(x) x_pool1_skip, x_pool1_icn = self.icn1(x_pool)", "based on the SSD architecture. Each multibox layer branches into", "k == 0: loc_layers += [nn.Conv2d(512, cfg[k] * 4, kernel_size=3,", "padding=padding, relu=False) ) def forward(self, x): out = self.single_branch(x) return", "__init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True,", "reduce the feature dimensions of current level self.agent1 = ConvBlock(512,", "stride=1, relu=False) def forward(self, x): out1 = self.part_a(x) out2 =", "1024, kernel_size=3, padding=6, dilation=6) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers", "'C', 512, 512, 512, 'M', 512, 512, 512]} def add_extras(size,", "proj3], dim=1) agent2 = self.agent2(p) convert2 = self.convert2(proj) pred2 =", "self.bn is not None: x = self.bn(x) if self.relu is", "layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] in_channels = v layers", "elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else:", "== 512: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers", "base_file): other, ext = os.path.splitext(base_file) if ext == '.pkl' or", "the initial image x_pool = self.lds(x) # apply vgg up", "stride=2)] elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]", "out1, out2 class IBN(nn.Module): def __init__(self, out_planes, bn=True): super(IBN, self).__init__()", "layers base = { '300': [64, 64, 'M', 128, 128,", "= Relu_Conv(512, 512, stride=1) self.Norm4 = Relu_Conv(256, 256, stride=1) #", "return x class One_Three_Conv(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(One_Three_Conv,", "// 4 self.part_a = nn.Sequential( ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride,", "return x_pool3 class ConvBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size, stride=1,", "kernel_size=(3, 3), stride=stride, padding=1) ) self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1,", "self.ibn1 = IBN(512, bn=True) self.ibn2 = IBN(1024, bn=True) self.relu =", "= self.Norm1(conv4_3_bn * x_pool1_icn) # apply vgg up to fc7", "out_planes, stride=1): super(LSN_later, self).__init__() self.out_channels = out_planes inter_planes = out_planes", "x): x_pool1 = self.pool1(x) x_pool2 = self.pool2(x_pool1) x_pool3 = self.pool3(x_pool2)", "self.out_channels = out_planes self.relu = nn.ReLU(inplace=False) self.single_branch = nn.Sequential( ConvBlock(in_planes,", "kernel_size=3, padding=1)] i += 1 return vgg, extra_layers, (loc_layers, conf_layers)", "[One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)] else: layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2),", "to fc7 for k in range(22, 34): x = self.base[k](x)", "# Extra layers added to VGG for feature scaling layers", "extras: extra layers that feed to multibox loc and conf", "size, base, extras, head, num_classes): super(LRFNet, self).__init__() self.phase = phase", "k, v in enumerate(vgg_source): if k == 0: loc_layers +=", "proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear') proj = proj3 agent3", "[ConvBlock(256, 128, kernel_size=1,stride=1)] layers += [ConvBlock(128, 256, kernel_size=3,stride=1)] return layers", "F.upsample(tmp2, size=(38, 38), mode='bilinear') proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')", "== \"test\": output = ( loc.view(loc.size(0), -1, 4), # loc", "self.merge1(pred1) new_sources.append(pred1) # The fc_7 level proj2 = F.upsample(tmp2, size=(19,", "self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1) self.merge3 = ConvBlock(512,", "IBN(nn.Module): def __init__(self, out_planes, bn=True): super(IBN, self).__init__() self.out_channels = out_planes", "5 or k == 7: sources.append(x) else: pass # project", "if (k < indicator+1 and k % 2 == 0)", "256, kernel_size=3,stride=1)] return layers extras = { '300': [1024, 'S',", "conv7_bn * x_pool2_icn) x = self.base[34](x) # apply extra layers", "stride=1) self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1) # convs to", "LSN_later(128, 1024, stride=2) self.icn3 = LSN_later(256, 512, stride=2) # convs", "extra layers and cache source layer outputs for k, v", "files supported.') def vgg(cfg, i, batch_norm=False): layers = [] in_channels", "in zip(new_sources, self.loc, self.conf): loc.append(l(x).permute(0, 2, 3, 1).contiguous()) conf.append(c(x).permute(0, 2,", "convert1 = self.convert1(proj) pred1 = torch.cat([agent1, convert1], dim=1) pred1 =", "stride=stride, padding=1), ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1), ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3),", "print('Loading weights into state dict...') self.load_state_dict(torch.load(base_file)) print('Finished!') else: print('Sorry only", "in enumerate(cfg): if in_channels != 'S': if v == 'S':", "512, 512, 'M', 512, 512, 512]} def add_extras(size, cfg, i,", "512]} def add_extras(size, cfg, i, batch_norm=False): # Extra layers added", "= self.convert1(proj) pred1 = torch.cat([agent1, convert1], dim=1) pred1 = self.merge1(pred1)", "self.single_branch = nn.Sequential( ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)", "self.proj3(q) # The conv4_3 level proj1 = F.upsample(tmp1, size=(38, 38),", "k == 0: x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip) w = self.Norm3(self.dsc2(p)", "bn=True): super(IBN, self).__init__() self.out_channels = out_planes self.bn = nn.BatchNorm2d(out_planes, eps=1e-5,", "19), mode='bilinear') proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear') proj =", "+= [conv2d, nn.ReLU(inplace=False)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=3, stride=1,", "1) if self.phase == \"test\": output = ( loc.view(loc.size(0), -1," ]
[ "Image(width=w, height=h, background=Color('red')) as t: r = wpi.exportpixels(t, 0, 0,", "0.5*t.quantum_range, channel='red') save(t, f, True) def test_separate_channel(self): f = wpi.separate_channel", "f(t) save(t, f) with self.rose.clone() as t: f(t, channel='red') save(t,", "f(t, 'password') save(t, f) def test_deskew(self): f = wpi.deskew with", "-30, 0) save(t, f) with self.rose.clone() as t: f(t, -30,", "f(t) save(t, f) def test_clippath(self): # NOTE: result is always", "= t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95)) save(t,", "f(t, 30, 10, 45) save(t, f) with self.logo.clone() as t:", "self.grad.clone() as t: f(t, 'gaussian') save(t, f) with self.grad.clone() as", "Drawing() as d: text = 'check' d.font = 'Arial' d.font_size", "def test_constitute(self): f = wpi.constitute with Image() as t: w", "3, 3) save(t, f) with self.rose.clone() as t: f(t, 3,", "test_sparsecolor(self): f = wpi.sparsecolor with Image(width=100, height=100, background=Color('black')) as t:", "with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond') save(t, f)", "with Image(width=h, height=h, background=white) as w: wpi.add(t, b) # add", "def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f", "= wpi.clippath with self.rose.clone() as t: f(t, '#1', True) save(t,", "5) as q: save(q, f, ext='.gif') def test_morphology(self): f =", "wpi.randomthreshold with self.text_a.clone() as t: rng = t.quantum_range f(t, int(rng", "self.logo.clone() as t: # I couldn't build on Windows... f(t,", "height=100) as p: p.rotate(90) with self.grad.clone() as t: f(t, p)", "columns = 3 for i in range(rows * columns): wpi.add(dst,", "with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False) save(t,", "20) save(t, f) def test_selectiveblur(self): f = wpi.selectiveblur with self.logo.clone()", "as t: f(t, 3, True) save(t, f) def test_raiseimage(self): f", "True) def test_cyclecolormap(self): f = wpi.cyclecolormap with self.logo.clone() as t:", "save(q, f) def test_comment(self): f = wpi.comment with self.grad.clone() as", "0.95), channel='red') save(t, f, True) def test_remap(self): f = wpi.remap", "1.0, ] with self.rose.clone() as t: f(t, 3, 3, kernel)", "f(t, 'gaussian', channel='red') save(t, f, True) def test_affinetransform(self): f =", "Image(width=50, height=50, background=Color('red')) as p: wpi.add(t, p) with Image(width=25, height=25,", "f) def test_enhance(self): f = wpi.enhance with Image(filename='plasma:', width=100, height=100)", "f(t, 20, 20, 0.5*t.quantum_range, channel='red') save(t, f, True) def test_separate_channel(self):", "tmpfile = 'tmp.png' with Image(width=w, height=h, background=Color('white')) as p: with", "save(t, f, True) def test_polaroid(self): f = wpi.polaroid with self.logo.clone()", "2/16, 1/16, 2/16, 4/16, 2/16, 1/16, 2/16, 1/16] with self.rose.clone()", "text, height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize > 0) if __name__", "0, channel='red') save(t, f, True) def test_blur(self): f = wpi.blur", "makeletter(letter, w, h): img = Image(width=w, height=h) with Drawing() as", "size) self.assertTrue(size[0] > 0 and size[1] > 0) def test_fontsize(self):", "= wpi.statistic with self.rose.clone() as t: f(t, 'gradient', 4, 4)", "True) def test_autolevel(self): f = wpi.autolevel with self.rose.clone() as t:", "f = wpi.blackthreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t,", "test_fft(self): f = wpi.forwardfouriertransform # require IM build option '--with-fftw'", "t: with self.rose.clone() as p: with f(t, p) as q:", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0 ] f(t, 5, 5,", "# TODO: more useful code with self.rose.clone() as t: f(t)", "self.grad.clone() as t: f(t, 'o4x4,3,3', channel='red') save(t, f, True) def", "3.0) save(t, f) with self.rose.clone() as t: f(t, 5.0, 3.0,", "f(t, 10, 10) save(t, f) def test_rotationalblur(self): f = wpi.rotationalblur", "+ ext # print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def setUpClass(self):", "int(rng * 0.05), int(rng * 0.95), channel='red') save(t, f, True)", "def test_texture(self): f = wpi.texture with Image(width=300, height=200) as t:", "R + G + B with f(t, channel) as q:", "10, 45) save(t, f) with self.logo.clone() as t: f(t, 30,", "with self.grad.clone() as t: f(t, 'gaussian', channel='red') save(t, f, True)", "f = wpi.morph color = Color('white') with self.rose.clone() as t:", "True) def test_shave(self): f = wpi.shave with self.logo.clone() as t:", "1) save(t, f) def test_chop(self): f = wpi.chop with self.grad.clone()", "test_extent(self): f = wpi.extent with self.rose.clone() as t: t.gravity =", "w = 1 h = 1 channels = 'RGB' with", "with Image() as t: w = 100 h = 100", "wpi.texture with Image(width=300, height=200) as t: with self.rose.clone() as p:", "= wpi.colordecisionlist with self.rose.clone() as t: f(t, xml) save(t, f)", "height=200) as t: with self.rose.clone() as p: with f(t, p)", "wand.image import Image from wand.drawing import Drawing from wand.color import", "True) c.destroy() def test_comparelayer(self): f = wpi.comparelayer with Image() as", "Image() as t: w = 100 h = 100 black", "test_contrast(self): f = wpi.contrast with self.rose.clone() as t: f(t, False)", "f(t, True, 45, 135) save(t, f) def test_shadow(self): f =", "0, 10) save(t, f) def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with", "save(t, f, True) def test_orderedposterize(self): f = wpi.orderedposterize with self.grad.clone()", "t: f(t, 'password') save(t, f) f = wpi.decipher f(t, 'password')", "as t: f(t, Color('red'), 0, 10) save(t, f) def test_sigmoidalcontrast(self):", "with Image(width=t.width, height=t.height, background=color) as p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t,", "'red') save(t, f) def test_sepiatone(self): f = wpi.sepiatone with self.rose.clone()", "finally: os.remove(tmpfile) def test_stereo(self): f = wpi.stereo with self.rose.clone() as", "int(rng * 0.05), int(rng * 0.95)) save(t, f) with self.text_a.clone()", "Color('rgb'), Color('gray(25%)')) save(t, f) def test_vignette(self): f = wpi.vignette with", "self.rose.clone() as t: f(t, p, channel='red') save(t, f, True) def", "1, 1, w, h, 'RGB', 'char', b) save(t, f) def", "with makeletter('A', 50, 30) as a: with makeletter('B', 50, 30)", "def test_stegano(self): f = wpi.stegano with self.rose.clone() as t: w", "@classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad = Image(filename='gradient:',", "as p: with Drawing() as d: d.gravity = 'center' d.fill_color", "d.font = 'Arial' fontsize = calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ',", "0.5*t.quantum_range) # TODO: find an skewed image as sample save(t,", "f) def test_fft(self): f = wpi.forwardfouriertransform # require IM build", "2/16, 4/16, 2/16, 1/16, 2/16, 1/16] with self.rose.clone() as t:", "<ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2 0.5 </Slope>", "5) save(t, f) with self.rose.clone() as t: f(t, 5, 5,", "True) def test_oilpaint(self): f = wpi.oilpaint with self.rose.clone() as t:", "f) def test_autogamma(self): f = wpi.autogamma with self.rose.clone() as t:", "as q: save(q, f) def test_swirl(self): f = wpi.swirl with", "self.rose.clone() as t: f(t, 1.0) save(t, f) def test_importpixels(self): f", "with self.rose.clone() as t: f(t, 3, 3, channel='red') save(t, f,", "def test_adaptivethreshold(self): f = wpi.adaptivethreshold with self.logo.clone() as t: f(t,", "1.0, 0.0, 0.0, 1.0, 100, 100, 0.0, 1.0, 1.0, 1.0])", "2 columns = 3 for i in range(rows * columns):", "p) with Image(width=25, height=25, background=Color('green1')) as q: for i in", "= wpi.colormatrix with self.logo.clone() as t: kernel = [ 0.5,", "a) wpi.add(t, b) wpi.setfirstiterator(t) with f(t, False, -3) as p:", "as q: wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:' + tmpfile) save(q,", "= wpi.shadow with self.text.clone() as t: with self.text.clone() as p:", "+ \"_ch\" + ext else: path = tmpdir + function.__name__", "with f(t) as p: save(p, f) def test_colordecisionlist(self): xml =", "self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond', channel='red') save(t, f,", "f(t, 'default_channels', 'bilinear', [0, 0, 1.0, 0.0, 0.0, 1.0, 100,", "wpi.clippath with self.rose.clone() as t: f(t, '#1', True) save(t, f)", "f(t, int(rng * 0.05), int(rng * 0.95)) save(t, f) with", "as t: f(t, 30, 10, 45) save(t, f) with self.logo.clone()", "5, 5) save(t, f) with self.rose.clone() as t: f(t, 5,", "f(t, Color('red'), Color('blue'), 1.0, False) save(t, f) with self.logo.clone() as", "wpi.blur(p, 0, 1) wpi.add(t, p) with f(t) as p: save(p,", "f = wpi.adaptivethreshold with self.logo.clone() as t: f(t, 20, 20,", "xml) save(t, f) def test_colorize(self): f = wpi.colorize with self.grad.clone()", "self.rose.clone() as t: f(t, channel='red') save(t, f, True) def test_autolevel(self):", "def test_orderedposterize(self): f = wpi.orderedposterize with self.grad.clone() as t: f(t,", "img = Image(width=w, height=h) with Drawing() as d: d.font =", "p, offset) as q: q.save(filename=tmpfile) try: with Image() as q:", "as dst: rows = 2 columns = 3 for i", "with Drawing() as d: text = 'check' d.font = 'Arial'", "with Drawing() as d: d.gravity = 'center' d.fill_color = Color('black')", "p: p.rotate(90) with self.grad.clone() as t: f(t, p) save(t, f)", "p, channel='red') save(t, f, True) def test_implode(self): f = wpi.implode", "f(t, -30, 0) save(t, f) with self.rose.clone() as t: f(t,", "f(t, 2.0) save(t, f) def test_opaquepaint(self): f = wpi.opaquepaint with", "r: save(r, f, ext='.gif') def test_constitute(self): f = wpi.constitute with", "True) def test_coalesce(self): # TODO: input optimized .gif file. f", "as t: f(t, 3, 3, kernel, channel='red') save(t, f, True)", "save(t, f) def test_adaptivesharpen(self): f = wpi.adaptivesharpen with self.rose.clone() as", "\"15x15+3+3\" mode = \"frame\" with Drawing() as d: with f(dst,", "f = wpi.filterimage kernel = [ # Sobel filter -1.0,", "t: f(t, 40, 200) save(t, f) def test_whitethreshold(self): f =", "test_wave(self): f = wpi.wave with self.grad.clone() as t: f(t, 40,", "f = wpi.swirl with self.rose.clone() as t: f(t, 180) save(t,", "f = wpi.minify with self.rose.clone() as t: f(t) save(t, f)", "with self.text.clone() as p: p.negate() f(p, 100, 2, 10, 10)", "background=Color('red')) as p: wpi.add(t, p) with Image(width=25, height=25, background=Color('green1')) as", "int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self): f = wpi.addnoise with self.grad.clone()", "0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 ]", "def test_edge(self): f = wpi.edge with self.logo.clone() as t: f(t,", "= Image(width=w, height=h) with Drawing() as d: d.font = 'Arial'", "save(t, f) def test_shade(self): f = wpi.shade with self.logo.clone() as", "h): img = Image(width=w, height=h) with Drawing() as d: d.font", "= Image(width=70, height=60) with Drawing() as draw: draw.font = 'Arial'", "as t: w = 100 h = 100 black =", "wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t, 5) as q:", "f) def test_raiseimage(self): f = wpi.raiseimage with self.rose.clone() as t:", "def test_brightnesscontrast(self): f = wpi.brightnesscontrast with self.rose.clone() as t: f(t,", "p: with self.rose.clone() as t: f(t, p) save(t, f) with", "text = 'check' d.font = 'Arial' d.font_size = 36 size", "f) def test_clut(self): f = wpi.clut with Image(filename='gradient:red-blue', width=1, height=100)", "test_clippath(self): # NOTE: result is always FAILED. f = wpi.clippath", "t: f(t, 100, 100) save(t, f) def test_tint(self): f =", "0.0, 0.0, 2.0, 0.0, 0.0]) f(t, d) # not work", "def test_localcontrast(self): f = wpi.localcontrast with self.logo.clone() as t: f(t,", "0.0) self.assertEqual(r[2], 0.0) def test_extent(self): f = wpi.extent with self.rose.clone()", "1 + 2 + 4 # R + G +", "as t: with t.clone() as p: (c, d) = f(t,", "f = wpi.clippath with self.rose.clone() as t: f(t, '#1', True)", "f, True) def test_scale(self): f = wpi.scale with self.rose.clone() as", "= tmpdir + function.__name__ + ext # print(path) img.save(filename=path) class", "t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self): f =", "] f(t, 5, 5, kernel) save(t, f) def test_combine(self): f", "tmpdir + function.__name__ + \"_ch\" + ext else: path =", "0, 3, channel='red') save(t, f, True) def test_charcoal(self): f =", "with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False, channel='blue')", "t.clone() as p: (c, d) = f(t, p, metric='absolute', channel='red')", "f, True) def test_motionblur(self): f = wpi.motionblur with self.logo.clone() as", "t: f(t) save(t, f) def test_minify(self): f = wpi.minify with", "5, 5, channel='red') save(t, f, True) def test_adaptivethreshold(self): f =", "f(t, 'gradient', 4, 4, channel='red') save(t, f, True) def test_stegano(self):", "test_edge(self): f = wpi.edge with self.logo.clone() as t: f(t, 3)", "'overlay') save(t, f) def test_sharpen(self): f = wpi.sharpen with self.rose.clone()", "wpi.comment with self.grad.clone() as t: f(t, 'hello') save(t, f) def", "d.font = 'Arial' d.font_size = 24 d.gravity = 'center' d.text(0,", "', size) self.assertTrue(size[0] > 0 and size[1] > 0) def", "Image(width=4, height=4, background=Color('red')) as t: w = 2 h =", "f(t, p, channel='red') save(t, f, True) def test_implode(self): f =", "b) save(t, f) def test_contrast(self): f = wpi.contrast with self.rose.clone()", "self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod", "f = wpi.resample with self.rose.clone() as t: dpi = 72", "with self.rose.clone() as t: f(t, 3, 3) save(t, f) with", "as t: f(t, 3, 3, kernel) save(t, f) with self.rose.clone()", "wpi.roll with self.rose.clone() as t: f(t, 10, 10) save(t, f)", "255, 0, 0, 0, 255, 0] f(t, 1, 1, w,", "Drawing() as d: d.font = 'Arial' d.font_size = 24 d.gravity", "as t: f(t, xml) save(t, f) def test_colorize(self): f =", "def test_equalize(self): f = wpi.equalize with self.rose.clone() as t: f(t)", "save(q, f) def test_thumbnail(self): f = wpi.thumbnail with self.logo.clone() as", "f = wpi.motionblur with self.logo.clone() as t: f(t, 30, 10,", "False) save(t, f) with self.logo.clone() as t: f(t, Color('red'), Color('blue'),", "p: for i in range(5): wpi.blur(p, 0, 1) wpi.add(t, p)", "Image(width=70, height=60) with Drawing() as draw: draw.font = 'Arial' draw.font_size", "save(t, f) def test_shear(self): f = wpi.shear with self.grad.clone() as", "Image() as dst: rows = 2 columns = 3 for", "self.assertEqual(r[2], 0.0) def test_extent(self): f = wpi.extent with self.rose.clone() as", "as t: with makeletter('A', 50, 30) as a: with makeletter('B',", "kernel = [ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5,", "f = wpi.colorize with self.grad.clone() as t: f(t, Color('red'), Color('gray(25%)'))", "Image(filename='label:Confirm', width=200, height=60) self.text_a = Image(width=70, height=60) with Drawing() as", "ext='.gif') def test_constitute(self): f = wpi.constitute with Image() as t:", "= 'center' draw.fill_color = Color('white') draw.stroke_color = Color('black') draw.text(0, 0,", "'Diamond') save(t, f) with self.logo.clone() as t: f(t, 'dilate', 1,", "as d: d.font = 'Arial' d.font_size = 24 d.gravity =", "True) def test_remap(self): f = wpi.remap with self.logo.clone() as t:", "save(p, f) def test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection", "wpi.despeckle with self.rose.clone() as t: # TODO: add speckle noise", "height=50, background=Color('red')) as p: wpi.add(t, p) with Image(width=25, height=25, background=Color('green1'))", "B with f(t, channel) as q: save(q, f) def test_comment(self):", "def test_scale(self): f = wpi.scale with self.rose.clone() as t: f(t,", "f(t, 5, 1) save(t, f) def test_chop(self): f = wpi.chop", "45, 135) save(t, f) def test_shadow(self): f = wpi.shadow with", "wpi.morphology with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond') save(t,", "20, 0.5*t.quantum_range) save(t, f) with self.logo.clone() as t: f(t, 20,", "self.logo.clone() as t: kernel = [ 0.5, 0.0, 0.0, 0.0,", "f) with self.rose.clone() as t: f(t, 3, 3, channel='red') save(t,", "= Color('black') draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir", "save(t, f) def test_clamp(self): f = wpi.clamp # TODO: more", "as t: f(t, 0.5*t.quantum_range) save(t, f) def test_shade(self): f =", "wpi.opaquepaint with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False)", "as t: f(t, 10, 10, 45) save(t, f) def test_smush(self):", "rows) thumb = \"80x50+4+3\" frame = \"15x15+3+3\" mode = \"frame\"", "test_morphology(self): f = wpi.morphology with self.logo.clone() as t: f(t, 'dilate',", "f) def test_shadow(self): f = wpi.shadow with self.text.clone() as t:", "height=h) with Drawing() as d: d.font = 'Arial' d.font_size =", "self.rose.clone() as t: f(t) save(t, f) def test_montage(self): f =", "ext='.gif') def test_morphology(self): f = wpi.morphology with self.logo.clone() as t:", "always FAILED. f = wpi.clippath with self.rose.clone() as t: f(t,", "wpi.exportpixels(t, 0, 0, w, h, channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1],", "wpi.vignette with self.logo.clone() as t: wpi.minify(t) t.background_color = Color('black') f(t,", "t: f(t, 3, True) save(t, f) def test_raiseimage(self): f =", "def test_randomthreshold(self): f = wpi.randomthreshold with self.text_a.clone() as t: rng", "Drawing() as d: d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0]) f(t,", "f) def test_vignette(self): f = wpi.vignette with self.logo.clone() as t:", "p) as q: save(q, f) def test_thumbnail(self): f = wpi.thumbnail", "f(t, p, metric='absolute', channel='red') save(c, f, True) c.destroy() def test_comparelayer(self):", "t: kernel = [ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0,", "channel='red') save(t, f, True) def test_floodfillpaint(self): f = wpi.floodfillpaint with", "wpi.add(t, b) # add image for red channel wpi.add(t, b)", "= 3 for i in range(rows * columns): wpi.add(dst, base)", "@classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self):", "f) def test_shade(self): f = wpi.shade with self.logo.clone() as t:", "# TODO: add speckle noise f(t) save(t, f) def test_edge(self):", "True) def test_blackthreshold(self): f = wpi.blackthreshold with self.grad.clone() as t:", "os import unittest tmpdir = '_tmp/' def save(img, function, channel=False,", "channel='green') save(t, f, True) def test_coalesce(self): # TODO: input optimized", "f = wpi.scale with self.rose.clone() as t: f(t, t.width*2, t.height*2)", "save(t, f, True) def test_affinetransform(self): f = wpi.affinetransform with self.rose.clone()", "wpi.add(t, p) with Image(width=25, height=25, background=Color('green1')) as q: for i", "self.logo.clone() as t: with self.rose.clone() as p: f(t, p, 'nodither')", "test_raiseimage(self): f = wpi.raiseimage with self.rose.clone() as t: f(t, 10,", "f(t, 0, 3) save(t, f) def test_enhance(self): f = wpi.enhance", "with self.rose.clone() as t: f(t, xml) save(t, f) def test_colorize(self):", "def test_floodfillpaint(self): f = wpi.floodfillpaint with self.logo.clone() as t: f(t,", "t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95), channel='red') save(t,", "t: f(t, 180) save(t, f) def test_texture(self): f = wpi.texture", "letter) d(img) return img with Image() as t: with makeletter('A',", "2, 10, 10) t.composite_channel('default_channels', p, 'overlay') save(t, f) def test_sharpen(self):", "height=40, background=Color('black')) as t: f(t, 0.5*t.quantum_range) # TODO: find an", "f(t, True, 3, 3) save(t, f) with self.rose.clone() as t:", "= 2 h = 2 b = [0, 0, 0,", "p, 'nodither') save(t, f) def test_resample(self): f = wpi.resample with", "= Color('blue') f(t, -10, -10, t.width+20, t.height+20) save(t, f) def", "couldn't build on Windows... f(t, True) save(t, f) # includes", "h, 'RGB', 'char', b) save(t, f) def test_label(self): f =", "0.10*t.quantum_range, Color('white'), 0, 0) save(t, f) def test_fft(self): f =", "import Color import wandplus.image as wpi from wandplus.textutil import calcSuitableFontsize,", "p.negate() f(p, 100, 2, 10, 10) t.composite_channel('default_channels', p, 'overlay') save(t,", "mag: with t.sequence[1].clone() as phase: wpi.blur(mag, 0, 0.5) # as", "wpi.shade with self.logo.clone() as t: f(t, True, 45, 135) save(t,", "= wpi.adaptiveblur with self.rose.clone() as t: f(t, 5.0, 3.0) save(t,", "with self.text.clone() as t: with self.text.clone() as p: p.negate() f(p,", "0, w, h, channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2],", "calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize > 0) fontsize", "= 50 draw.gravity = 'center' draw.fill_color = Color('white') draw.stroke_color =", "w: wpi.add(t, b) # add image for red channel wpi.add(t,", "t.sequence[1].clone() as phase: wpi.blur(mag, 0, 0.5) # as degradation t2", "as t: f(t, p, channel='green') save(t, f, True) def test_coalesce(self):", "= wpi.encipher with self.rose.clone() as t: f(t, 'password') save(t, f)", "with self.logo.clone() as t: f(t, 5, 30) save(t, f) def", "h = 40 offset = 15 tmpfile = 'tmp.png' with", "as t: f(t, Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self):", "'nodither') save(t, f) def test_resample(self): f = wpi.resample with self.rose.clone()", "f) def test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\">", "t: f(t, 'o4x4,3,3', channel='red') save(t, f, True) def test_polaroid(self): f", "with self.rose.clone() as t: f(t, 5, 1) save(t, f) def", "t.height//2, 20, 20) save(t, f) def test_sparsecolor(self): f = wpi.sparsecolor", "with self.grad.clone() as t: f(t, Color('red'), 0, 10) save(t, f)", "* 0.05), int(rng * 0.95), channel='red') save(t, f, True) def", "'Arial' fontsize = calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize", "# includes two images(magnitude&phase) f = wpi.inversefouriertransform with t.sequence[0].clone() as", "wpi.adaptiveresize with self.rose.clone() as t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f)", "f) with self.rose.clone() as t: f(t, True, 3, 3, channel='red')", "with self.rose.clone() as t: f(t, 180) save(t, f) def test_texture(self):", "= 'Arial' draw.font_size = 50 draw.gravity = 'center' draw.fill_color =", "t: with self.rose.clone() as p: for i in range(5): wpi.blur(p,", "index pointer channel = 1 + 2 + 4 #", "self.rose.clone() as t: f(t, 5.0, 3.0, channel='red') save(t, f, True)", "t: f(t, 'dilate', 1, 'Diamond', channel='red') save(t, f, True) def", "green channel wpi.add(t, w) # add image for blue channel", "wpi.chop with self.grad.clone() as t: t.gravity = 'north_west' f(t, 0,", "f(t, channel) as q: save(q, f) def test_comment(self): f =", "as q: save(q, f, ext='.gif') def test_morphology(self): f = wpi.morphology", "0.0) def test_extent(self): f = wpi.extent with self.rose.clone() as t:", "Image() as t: w = 2 h = 2 b", "f) def test_haldclut(self): f = wpi.haldclut # TODO: more useful", "Color('white') draw.stroke_color = Color('black') draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir +", "0.0, 0.0, 0.0, 0.0, 1.0 ] f(t, 5, 5, kernel)", "f = wpi.decipher f(t, 'password') save(t, f) def test_deskew(self): f", "Color('red'), Color('blue'), 1.0, False, channel='blue') save(t, f, True) def test_orderedposterize(self):", "p, metric='absolute', channel='red') save(c, f, True) c.destroy() def test_comparelayer(self): f", "f = wpi.clamp # TODO: more useful code with self.rose.clone()", "save(c, f) c.destroy() with self.rose.clone() as t: with t.clone() as", "includes two images(magnitude&phase) f = wpi.inversefouriertransform with t.sequence[0].clone() as mag:", "t: f(t, 0.5) save(t, f) def test_brightnesscontrast(self): f = wpi.brightnesscontrast", "f) def test_solarize(self): f = wpi.solarize with self.rose.clone() as t:", "wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t, 5) as q: save(q, f,", "base) tile = \"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\" frame =", "as t: f(t, 180) save(t, f) def test_texture(self): f =", "Image(width=25, height=25, background=Color('green1')) as q: for i in range(4): with", "f, True) def test_adaptivethreshold(self): f = wpi.adaptivethreshold with self.logo.clone() as", "f = wpi.importpixels with Image(width=4, height=4, background=Color('red')) as t: w", "with self.rose.clone() as t: f(t, 3, 3, kernel) save(t, f)", "d: d.font = 'Arial' d.font_size = 24 d.gravity = 'center'", "as t: f(t, p) save(t, f) with self.grad.clone() as t:", "metric='absolute', channel='red') save(c, f, True) c.destroy() def test_comparelayer(self): f =", "20, int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self): f = wpi.addnoise with", "self.rose.clone() as t: w = 50 h = 40 offset", "255, 255, 0, 0, 0, 255, 0] f(t, w, h,", "0.8 1.5 </Power> </SOPNode> <SATNode> <Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection>", "for red channel wpi.add(t, b) # add image for green", "test_emboss(self): f = wpi.emboss with self.logo.clone() as t: f(t, 0,", "self.text_a.save(filename=tmpdir + 'a.png') @classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy()", "f(t, 180) save(t, f) def test_texture(self): f = wpi.texture with", "p: (c, d) = f(t, p, metric='absolute') save(c, f) c.destroy()", "30, 10, 45) save(t, f) with self.logo.clone() as t: f(t,", "with self.rose.clone() as t: f(t, -30, 0) save(t, f) with", "= wpi.roll with self.rose.clone() as t: f(t, 10, 10) save(t,", "save(t, f, True) def test_autolevel(self): f = wpi.autolevel with self.rose.clone()", "2 b = [0, 0, 0, 255, 255, 255, 255,", "[0, 0, 1.0, 0.0, 0.0, 1.0, 100, 100, 0.0, 1.0,", "f(t, 'gaussian') save(t, f) with self.grad.clone() as t: f(t, 'gaussian',", "f) def test_colorize(self): f = wpi.colorize with self.grad.clone() as t:", "0 and size[1] > 0) def test_fontsize(self): w = 100", "test_scale(self): f = wpi.scale with self.rose.clone() as t: f(t, t.width*2,", "0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0, ] with", "os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad = Image(filename='gradient:', width=400, height=400) self.logo", "3, kernel, channel='red') save(t, f, True) def test_cyclecolormap(self): f =", "Image from wand.drawing import Drawing from wand.color import Color import", "self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False) save(t, f)", "'gaussian', channel='red') save(t, f, True) def test_affinetransform(self): f = wpi.affinetransform", "24 d.gravity = 'center' d.text(0, 0, letter) d(img) return img", "a: with makeletter('B', 50, 30) as b: wpi.add(t, a) wpi.add(t,", "with self.rose.clone() as t: f(t, False) save(t, f) def test_convolve(self):", "t: with Image(width=t.width, height=t.height, background=color) as p: wpi.add(t, p) wpi.setfirstiterator(t)", "thumb = \"80x50+4+3\" frame = \"15x15+3+3\" mode = \"frame\" with", "save(t, f) def test_resample(self): f = wpi.resample with self.rose.clone() as", "draw.stroke_color = Color('black') draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png')", "as t: f(t) save(t, f) def test_equalize(self): f = wpi.equalize", "+ 4 # R + G + B with f(t,", "= wpi.shade with self.logo.clone() as t: f(t, True, 45, 135)", "'compareany') as r: save(r, f, ext='.gif') def test_constitute(self): f =", "from wand.color import Color import wandplus.image as wpi from wandplus.textutil", "# print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose", "def test_resample(self): f = wpi.resample with self.rose.clone() as t: dpi", "def test_spread(self): f = wpi.spread with self.logo.clone() as t: f(t,", "def test_clut(self): f = wpi.clut with Image(filename='gradient:red-blue', width=1, height=100) as", "as t: f(t, True, 3, 3, channel='red') save(t, f, True)", "f = wpi.blueshift with self.logo.clone() as t: f(t, 0.5) save(t,", "100) save(t, f) def test_shear(self): f = wpi.shear with self.grad.clone()", "offset) as q: q.save(filename=tmpfile) try: with Image() as q: wpi.setsizeoffset(q,", "as t: f(t, 5, 5) save(t, f) with self.rose.clone() as", "with self.rose.clone() as t: f(t, p) save(t, f) with self.rose.clone()", "-1.0, 0.0, 1.0, ] with self.rose.clone() as t: f(t, 3,", "as t: f(t, 20, 20, 0.5*t.quantum_range) save(t, f) with self.logo.clone()", "wpi.add(t, b) # add image for green channel wpi.add(t, w)", "def makeletter(letter, w, h): img = Image(width=w, height=h) with Drawing()", "3, channel='red') save(t, f, True) def test_charcoal(self): f = wpi.charcoal", "= 'check' d.font = 'Arial' fontsize = calcSuitableFontsize(d, text, width=w)", "f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0) save(t, f) def test_fft(self):", "t.width+20, t.height+20) save(t, f) def test_filterimage(self): f = wpi.filterimage kernel", "with Drawing() as draw: draw.font = 'Arial' draw.font_size = 50", "channel wpi.setfirstiterator(t) # rewind the index pointer channel = 1", "t.height+20) save(t, f) def test_filterimage(self): f = wpi.filterimage kernel =", "</Slope> <Offset> 0.4 -0.5 0.6 </Offset> <Power> 1.0 0.8 1.5", "self.grad.clone() as t: f(t, p) save(t, f) with self.grad.clone() as", "0.05), int(rng * 0.95)) save(t, f) with self.text_a.clone() as t:", "w, h, 'RGB', 'char', b) save(t, f) def test_contrast(self): f", "sample save(t, f) def test_despeckle(self): f = wpi.despeckle with self.rose.clone()", "t: f(t, 30, 10, 45, channel='red') save(t, f, True) def", "t: r = wpi.exportpixels(t, 0, 0, w, h, channels, 'double')", "def test_motionblur(self): f = wpi.motionblur with self.logo.clone() as t: f(t,", "as t: f(t, 0, 3) save(t, f) def test_enhance(self): f", "45, channel='red') save(t, f, True) def test_scale(self): f = wpi.scale", "def test_montage(self): f = wpi.montage with self.rose.clone() as base: with", "as t: with self.rose.clone() as p: with f(t, p) as", "as t: f(t, 0.4*t.quantum_range) save(t, f) with self.rose.clone() as t:", "function.__name__ + \"_ch\" + ext else: path = tmpdir +", "self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir", "f(t, 5) save(t, f) def test_cipher(self): f = wpi.encipher with", "1) wpi.add(t, p) with f(t) as p: save(p, f) def", "with self.rose.clone() as t: f(t, 3, kernel) save(t, f) with", "f = wpi.solarize with self.rose.clone() as t: f(t, 0.4*t.quantum_range) save(t,", "t: f(t, 0, 3) save(t, f) def test_enhance(self): f =", "d.text(0, 0, letter) d(img) return img with Image() as t:", "self.rose.clone() as t: f(t, 0.5*t.quantum_range) save(t, f) def test_shade(self): f", "test_autogamma(self): f = wpi.autogamma with self.rose.clone() as t: f(t) save(t,", "import calcSuitableFontsize, calcSuitableImagesize import os import unittest tmpdir = '_tmp/'", "with self.rose.clone() as t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f) def", "p: wpi.add(t, p) with Image(width=25, height=25, background=Color('green1')) as q: for", "as t: with self.text.clone() as p: p.negate() f(p, 100, 2,", "with self.logo.clone() as t: f(t, 0, 3) save(t, f) def", "f, True) def test_blackthreshold(self): f = wpi.blackthreshold with self.grad.clone() as", "CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad =", "= wpi.coalesce with Image() as t: with self.rose.clone() as p:", "f(t, 0, 3, channel='red') save(t, f, True) def test_charcoal(self): f", "as t: t.gravity = 'center' t.background_color = Color('blue') f(t, -10,", "30) save(t, f) def test_magnify(self): f = wpi.magnify with self.rose.clone()", "', fontsize) self.assertTrue(fontsize > 0) fontsize = calcSuitableFontsize(d, text, height=h)", "self.rose.clone() as t: f(t, 'red') save(t, f) def test_sepiatone(self): f", "with makeletter('B', 50, 30) as b: wpi.add(t, a) wpi.add(t, b)", "rewind the index pointer channel = 1 + 2 +", "Color('blue') f(t, -10, -10, t.width+20, t.height+20) save(t, f) def test_filterimage(self):", "wpi.minify with self.rose.clone() as t: f(t) save(t, f) def test_montage(self):", "with f(t, p) as q: save(q, f) def test_thumbnail(self): f", "0.5*t.quantum_range) save(t, f) def test_shade(self): f = wpi.shade with self.logo.clone()", "t: with self.text.clone() as p: p.negate() f(p, 100, 2, 10,", "def test_chop(self): f = wpi.chop with self.grad.clone() as t: t.gravity", "# R + G + B with f(t, channel) as", "0.4 -0.5 0.6 </Offset> <Power> 1.0 0.8 1.5 </Power> </SOPNode>", "save(t, f) f = wpi.decipher f(t, 'password') save(t, f) def", "'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir + 'text.png')", "def test_fontsize(self): w = 100 h = 100 with Drawing()", "4, channel='red') save(t, f, True) def test_stegano(self): f = wpi.stegano", "100, 100) save(t, f) def test_tint(self): f = wpi.tint with", "as w: wpi.add(t, b) # add image for red channel", "Color('blue'), 1.0, False) save(t, f) with self.logo.clone() as t: f(t,", "f) def test_sharpen(self): f = wpi.sharpen with self.rose.clone() as t:", "background=Color('black')) as t: f(t, 'default_channels', 'bilinear', [0, 0, 1.0, 0.0,", "as t: f(t, Color('red'), Color('blue'), 1.0, False, channel='blue') save(t, f,", "def test_selectiveblur(self): f = wpi.selectiveblur with self.logo.clone() as t: f(t,", "as t: f(t, 45, channel='red') save(t, f, True) def test_scale(self):", "self.grad.clone() as t: f(t, 'gaussian', channel='red') save(t, f, True) def", "save(t, f) def test_spread(self): f = wpi.spread with self.logo.clone() as", "color = Color('white') with self.rose.clone() as t: with Image(width=t.width, height=t.height,", "t: f(t, 3, kernel) save(t, f) with self.rose.clone() as t:", "red channel wpi.add(t, b) # add image for green channel", "t: f(t, 10, 10) save(t, f) def test_rotationalblur(self): f =", "optimized .gif file. f = wpi.coalesce with Image() as t:", "as t: f(t, 'password') save(t, f) f = wpi.decipher f(t,", "t: f(t) save(t, f) def test_montage(self): f = wpi.montage with", "f = wpi.thumbnail with self.logo.clone() as t: f(t, 100, 100)", "with self.logo.clone() as t: # I couldn't build on Windows...", "self.rose.clone() as t: f(t, 3, 3, kernel, channel='red') save(t, f,", "def test_sparsecolor(self): f = wpi.sparsecolor with Image(width=100, height=100, background=Color('black')) as", "with t.sequence[0].clone() as mag: with t.sequence[1].clone() as phase: wpi.blur(mag, 0,", "range(rows * columns): wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns, rows) thumb", "f(t, t.width*2, t.height*2) save(t, f) def test_segment(self): f = wpi.segment", "d) # not work correctly (IM<6.9.9-36) save(t, f) def test_autogamma(self):", "True) def test_adaptivethreshold(self): f = wpi.adaptivethreshold with self.logo.clone() as t:", "+ G + B with f(t, channel) as q: save(q,", "def test_compare(self): f = wpi.compare with self.rose.clone() as t: with", "TODO: more useful code with Image(filename='hald:12') as p: with self.rose.clone()", "f(t, 45) save(t, f) with self.rose.clone() as t: f(t, 45,", "3, 3, channel='red') save(t, f, True) def test_sketch(self): f =", "0, 0, w, h, channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0)", "= 2 b = [0, 0, 0, 255, 255, 255,", "3) save(t, f) with self.rose.clone() as t: f(t, 3, 3,", "save(t, f) def test_autogamma(self): f = wpi.autogamma with self.rose.clone() as", "f) with self.rose.clone() as t: f(t, p, channel='red') save(t, f,", "channel='red') save(t, f, True) def test_adaptivethreshold(self): f = wpi.adaptivethreshold with", "f(t, 0, 10, 20, 20) save(t, f) def test_wave(self): f", "q: q.save(filename=tmpfile) try: with Image() as q: wpi.setsizeoffset(q, w, h,", "with Image(filename='gradient:red-blue', width=1, height=100) as p: p.rotate(90) with self.grad.clone() as", "save(t, f) def test_localcontrast(self): f = wpi.localcontrast with self.logo.clone() as", "= Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with f(t, p, offset)", "save(t, f) with self.rose.clone() as t: f(t, p, channel='red') save(t,", "with self.grad.clone() as t: f(t, 40, 200) save(t, f) def", "test_label(self): f = wpi.label with self.rose.clone() as t: f(t, 'hello')", "wpi.sparsecolor with Image(width=100, height=100, background=Color('black')) as t: f(t, 'default_channels', 'bilinear',", "with self.rose.clone() as t: dpi = 72 * 2 f(t,", "Color('black') white = Color('white') with Image(width=w, height=w, background=black) as b:", "</SOPNode> <SATNode> <Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f", "wpi.edge with self.logo.clone() as t: f(t, 3) save(t, f) def", "= wpi.enhance with Image(filename='plasma:', width=100, height=100) as t: f(t) save(t,", "True) def test_charcoal(self): f = wpi.charcoal with self.rose.clone() as t:", "f(t, 'gradient', 4, 4) save(t, f) with self.rose.clone() as t:", "t: f(t, 'rgb', False, 5, 20) save(t, f) def test_selectiveblur(self):", "channel='red') save(t, f, True) def test_affinetransform(self): f = wpi.affinetransform with", "test_stereo(self): f = wpi.stereo with self.rose.clone() as t: with self.rose.clone()", "mag f(t2, phase, True) save(t2, f) def test_haldclut(self): f =", "save(t, f) with self.rose.clone() as t: f(t, 5.0, 3.0, channel='red')", "f = wpi.edge with self.logo.clone() as t: f(t, 3) save(t,", "channel='red') save(t, f, True) def test_scale(self): f = wpi.scale with", "def test_clip(self): # NOTE: result is always FAILED. f =", "'RGB', 'char', b) save(t, f) def test_contrast(self): f = wpi.contrast", "3, 3, kernel, channel='red') save(t, f, True) def test_floodfillpaint(self): f", "5) save(t, f) def test_cipher(self): f = wpi.encipher with self.rose.clone()", "wpi.splice with self.rose.clone() as t: t.gravity = 'center' f(t, t.width//2,", "q: wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:' + tmpfile) save(q, f)", "self.text.clone() as t: with self.text.clone() as p: p.negate() f(p, 100,", "f = wpi.clut with Image(filename='gradient:red-blue', width=1, height=100) as p: p.rotate(90)", "test_chop(self): f = wpi.chop with self.grad.clone() as t: t.gravity =", "test_implode(self): f = wpi.implode with self.rose.clone() as t: f(t, 1.0)", "result is always FAILED. f = wpi.clippath with self.rose.clone() as", "True) save(t, f) def test_randomthreshold(self): f = wpi.randomthreshold with self.text_a.clone()", "(c, d) = f(t, p, metric='absolute') save(c, f) c.destroy() with", "Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with f(t, p, offset) as", "True) save(t, f) def test_clut(self): f = wpi.clut with Image(filename='gradient:red-blue',", "0) save(t, f) def test_fft(self): f = wpi.forwardfouriertransform # require", "= Color('white') with self.rose.clone() as t: with Image(width=t.width, height=t.height, background=color)", "self.logo.clone() as t: f(t, 3) save(t, f) def test_emboss(self): f", "def test_magnify(self): f = wpi.magnify with self.rose.clone() as t: f(t)", "dpi = 72 * 2 f(t, dpi, dpi, 'lanczos', 1.0)", "f, True) def test_polaroid(self): f = wpi.polaroid with self.logo.clone() as", "'gradient', 4, 4) save(t, f) with self.rose.clone() as t: f(t,", "d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0]) f(t, d) # not", "wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq) with f(t, 'compareany') as r:", "wpi.minify(t) t.background_color = Color('black') f(t, 0, 10, 20, 20) save(t,", "= wpi.adaptiveresize with self.rose.clone() as t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t,", "= wpi.chop with self.grad.clone() as t: t.gravity = 'north_west' f(t,", "noise f(t) save(t, f) def test_edge(self): f = wpi.edge with", "f) with self.text_a.clone() as t: rng = t.quantum_range f(t, int(rng", "test_affinetransform(self): f = wpi.affinetransform with self.rose.clone() as t: with Drawing()", "f = wpi.colormatrix with self.logo.clone() as t: kernel = [", "def test_emboss(self): f = wpi.emboss with self.logo.clone() as t: f(t,", "f = wpi.morphology with self.logo.clone() as t: f(t, 'dilate', 1,", "+ function.__name__ + ext # print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod", "10) t.composite_channel('default_channels', p, 'overlay') save(t, f) def test_sharpen(self): f =", "f(t, 'red') save(t, f) def test_sepiatone(self): f = wpi.sepiatone with", "wpi.enhance with Image(filename='plasma:', width=100, height=100) as t: f(t) save(t, f)", "wpi.extent with self.rose.clone() as t: t.gravity = 'center' t.background_color =", "as t: f(t, -30, 0) save(t, f) with self.rose.clone() as", "save(t, f) with self.grad.clone() as t: f(t, p, channel='green') save(t,", "channel='red') save(t, f, True) def test_cyclecolormap(self): f = wpi.cyclecolormap with", "w = 100 h = 100 with Drawing() as d:", "= wpi.label with self.rose.clone() as t: f(t, 'hello') save(t, f)", "f) def test_colormatrix(self): f = wpi.colormatrix with self.logo.clone() as t:", "with Image() as dst: rows = 2 columns = 3", "test_roll(self): f = wpi.roll with self.rose.clone() as t: f(t, 10,", "I couldn't build on Windows... f(t, True) save(t, f) #", "= wpi.montage with self.rose.clone() as base: with Image() as dst:", "def test_cyclecolormap(self): f = wpi.cyclecolormap with self.logo.clone() as t: f(t,", "self.rose.clone() as t: f(t, xml) save(t, f) def test_colorize(self): f", "t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95)) save(t, f)", "with self.rose.clone() as t: f(t, True, 3, 3) save(t, f)", "width=200, height=60) self.text_a = Image(width=70, height=60) with Drawing() as draw:", "save(t, f) def test_statistic(self): f = wpi.statistic with self.rose.clone() as", "<reponame>chromia/wandplus<gh_stars>0 #!/usr/bin/env python from wand.image import Image from wand.drawing import", "f = wpi.statistic with self.rose.clone() as t: f(t, 'gradient', 4,", "save(t, f) def test_sparsecolor(self): f = wpi.sparsecolor with Image(width=100, height=100,", "test_blur(self): f = wpi.blur with self.rose.clone() as t: f(t, 0,", "f) def test_wave(self): f = wpi.wave with self.grad.clone() as t:", "with Image() as t: w = 2 h = 2", "= \"frame\" with Drawing() as d: with f(dst, d, tile,", "b: wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t) with f(t, False, -3)", "(IM<6.9.9-36) save(t, f) def test_autogamma(self): f = wpi.autogamma with self.rose.clone()", "with self.grad.clone() as t: f(t, Color('red'), Color('gray(25%)')) save(t, f) def", "t: dpi = 72 * 2 f(t, dpi, dpi, 'lanczos',", "f(t, channel='red') save(t, f, True) def test_autolevel(self): f = wpi.autolevel", "channel='red') save(t, f, True) def test_splice(self): f = wpi.splice with", "] with self.rose.clone() as t: f(t, 3, 3, kernel) save(t,", "self.rose.clone() as t: f(t) save(t, f) def test_clippath(self): # NOTE:", "self.rose.clone() as t: f(t, '#1', True) save(t, f) def test_clut(self):", "self.rose.clone() as t: f(t, 0, 3) save(t, f) with self.rose.clone()", "# as degradation t2 = mag f(t2, phase, True) save(t2,", "f = wpi.tint with self.rose.clone() as t: f(t, Color('rgb'), Color('gray(25%)'))", "height=400) self.logo = Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200, height=60) self.text_a", "makeletter('B', 50, 30) as b: wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t)", "'gaussian') save(t, f) with self.grad.clone() as t: f(t, 'gaussian', channel='red')", "self.rose.clone() as t: f(t) save(t, f) with self.rose.clone() as t:", "= wpi.filterimage kernel = [ # Sobel filter -1.0, 0.0,", "45) save(t, f) with self.rose.clone() as t: f(t, 45, channel='red')", "# I couldn't build on Windows... f(t, True) save(t, f)", "t: f(t, channel='red') save(t, f, True) def test_blackthreshold(self): f =", "channel wpi.add(t, b) # add image for green channel wpi.add(t,", "0.0, 0.0, 1.0, 100, 100, 0.0, 1.0, 1.0, 1.0]) save(t,", "Color('black') f(t, 0, 10, 20, 20) save(t, f) def test_wave(self):", "0.0, 0.0, 1.0 ] f(t, 5, 5, kernel) save(t, f)", "= t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95), channel='red')", "wpi.motionblur with self.logo.clone() as t: f(t, 30, 10, 45) save(t,", "f(t, d) # not work correctly (IM<6.9.9-36) save(t, f) def", "f(t, 0, 00, 200, 200) save(t, f) def test_clamp(self): f", "0, 255, 255, 255, 255, 0, 0, 0, 255, 0]", "wpi.rotationalblur with self.rose.clone() as t: f(t, 45) save(t, f) with", "test_vignette(self): f = wpi.vignette with self.logo.clone() as t: wpi.minify(t) t.background_color", "t: f(t, p, channel='red') save(t, f, True) def test_implode(self): f", "def test_imagesize(self): with Drawing() as d: text = 'check' d.font", "def test_label(self): f = wpi.label with self.rose.clone() as t: f(t,", "as t: t.gravity = 'center' f(t, t.width//2, t.height//2, 20, 20)", "from wand.image import Image from wand.drawing import Drawing from wand.color", "def test_separate_channel(self): f = wpi.separate_channel with self.rose.clone() as t: f(t,", "f = wpi.chop with self.grad.clone() as t: t.gravity = 'north_west'", "base: with Image() as dst: rows = 2 columns =", "p.rotate(90) with self.grad.clone() as t: f(t, p) save(t, f) with", "= 2 columns = 3 for i in range(rows *", "def test_wave(self): f = wpi.wave with self.grad.clone() as t: f(t,", "self.rose.clone() as t: f(t, 10, 10, 10, 10, True) save(t,", "</ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist with self.rose.clone() as t: f(t,", "as t: f(t, t.width*2, t.height*2) save(t, f) def test_segment(self): f", "test_stegano(self): f = wpi.stegano with self.rose.clone() as t: w =", "with Image(width=50, height=50, background=Color('red')) as p: wpi.add(t, p) with Image(width=25,", "0.0, 1.0, ] with self.rose.clone() as t: f(t, 3, 3,", "f(t, 'o4x4,3,3', channel='red') save(t, f, True) def test_polaroid(self): f =", "def save(img, function, channel=False, ext='.png'): if channel: path = tmpdir", "t: f(t, p, channel='green') save(t, f, True) def test_coalesce(self): #", "as t: f(t, 3, 3, channel='red') save(t, f, True) def", "= '_tmp/' def save(img, function, channel=False, ext='.png'): if channel: path", "f) with self.logo.clone() as t: f(t, 30, 10, 45, channel='red')", "t.composite_channel('default_channels', p, 'overlay') save(t, f) def test_sharpen(self): f = wpi.sharpen", "def test_importpixels(self): f = wpi.importpixels with Image(width=4, height=4, background=Color('red')) as", "f) def test_randomthreshold(self): f = wpi.randomthreshold with self.text_a.clone() as t:", "0, 1.0, 0.0, 0.0, 1.0, 100, 100, 0.0, 1.0, 1.0,", "with t.sequence[1].clone() as phase: wpi.blur(mag, 0, 0.5) # as degradation", "two images(magnitude&phase) f = wpi.inversefouriertransform with t.sequence[0].clone() as mag: with", "def test_filterimage(self): f = wpi.filterimage kernel = [ # Sobel", "f = wpi.polaroid with self.logo.clone() as t: with Drawing() as", "self.rose.clone() as t: dpi = 72 * 2 f(t, dpi,", "= wpi.clut with Image(filename='gradient:red-blue', width=1, height=100) as p: p.rotate(90) with", "as t: f(t, 5, 30) save(t, f) def test_magnify(self): f", "with self.logo.clone() as t: f(t, 5) save(t, f) def test_cipher(self):", "as t: f(t, 'hello') save(t, f) def test_compare(self): f =", "[ # Sobel filter -1.0, 0.0, 1.0, -2.0, 0.0, 2.0,", "def test_smush(self): f = wpi.smush def makeletter(letter, w, h): img", "'bilinear', [0, 0, 1.0, 0.0, 0.0, 1.0, 100, 100, 0.0,", "as d: d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0]) f(t, d)", "as d: with f(dst, d, tile, thumb, mode, frame) as", "* columns): wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns, rows) thumb =", "t: f(t, 'gradient', 4, 4) save(t, f) with self.rose.clone() as", "self.rose.clone() as t: f(t, 5, 5, channel='red') save(t, f, True)", "'rgb', False, 5, 20) save(t, f) def test_selectiveblur(self): f =", "3.0, channel='red') save(t, f, True) def test_adaptiveresize(self): f = wpi.adaptiveresize", "= wpi.morph color = Color('white') with self.rose.clone() as t: with", "self.rose.clone() as t: f(t, 'gradient', 4, 4) save(t, f) with", "Color import wandplus.image as wpi from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize", "f) def test_tint(self): f = wpi.tint with self.rose.clone() as t:", "wpi.blueshift with self.logo.clone() as t: f(t, 0.5) save(t, f) def", "self.logo.clone() as t: with Drawing() as d: f(t, d, 1.0)", "background=Color('red')) as t: w = 2 h = 2 b", "as t: f(t, p, channel='red') save(t, f, True) def test_implode(self):", "100, 0.0, 1.0, 1.0, 1.0]) save(t, f) def test_spread(self): f", "f) def test_convolve(self): f = wpi.convolve kernel = [1/16, 2/16,", "code with self.rose.clone() as t: f(t) save(t, f) with self.rose.clone()", "save(t, f) def test_combine(self): f = wpi.combine with Image() as", "self.logo.clone() as t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0) save(t,", "filter -1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0,", "0.5) # as degradation t2 = mag f(t2, phase, True)", "* 0.95), channel='red') save(t, f, True) def test_remap(self): f =", "as t: f(t, 1.0) save(t, f) def test_importpixels(self): f =", "f(t, 1.0) save(t, f) def test_importpixels(self): f = wpi.importpixels with", "save(t, f) def test_fft(self): f = wpi.forwardfouriertransform # require IM", "have an image which has clipping path with self.rose.clone() as", "as t: f(t, 'o4x4,3,3', channel='red') save(t, f, True) def test_polaroid(self):", "1.0 ] f(t, 5, 5, kernel) save(t, f) def test_combine(self):", "+ 'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy()", "= wpi.sketch with self.logo.clone() as t: f(t, 10, 10, 45)", "test_shade(self): f = wpi.shade with self.logo.clone() as t: f(t, True,", "save(r, f, ext='.gif') def test_constitute(self): f = wpi.constitute with Image()", "self.rose.clone() as t: with t.clone() as p: (c, d) =", "Image() as t: with self.rose.clone() as p: for i in", "t: t.gravity = 'north_west' f(t, 0, 00, 200, 200) save(t,", "self.text_a.clone() as t: rng = t.quantum_range f(t, int(rng * 0.05),", "wpi.clut with Image(filename='gradient:red-blue', width=1, height=100) as p: p.rotate(90) with self.grad.clone()", "f) def test_montage(self): f = wpi.montage with self.rose.clone() as base:", "save(t, f, True) def test_scale(self): f = wpi.scale with self.rose.clone()", "channel='red') save(t, f, True) def test_shave(self): f = wpi.shave with", "= Image(filename='rose:') self.grad = Image(filename='gradient:', width=400, height=400) self.logo = Image(filename='logo:')", "self.text_a = Image(width=70, height=60) with Drawing() as draw: draw.font =", "with q.clone() as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq) with", "save(t, f) def test_randomthreshold(self): f = wpi.randomthreshold with self.text_a.clone() as", "255, 0] f(t, w, h, 'RGB', 'char', b) save(t, f)", "channel='red') save(t, f, True) def test_adaptiveresize(self): f = wpi.adaptiveresize with", "for blue channel wpi.setfirstiterator(t) # rewind the index pointer channel", "d.gravity = 'center' d.text(0, 0, letter) d(img) return img with", "self.rose.clone() as p: p.negate() with f(t, p) as q: save(q,", "channel='red') save(t, f, True) def test_charcoal(self): f = wpi.charcoal with", "with Image(filename='plasma:', width=100, height=100) as t: f(t) save(t, f) def", "with Image() as q: wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:' +", "0, 1) wpi.add(t, p) with f(t) as p: save(p, f)", "= wpi.emboss with self.logo.clone() as t: f(t, 0, 3) save(t,", "t: f(t, 'o4x4,3,3') save(t, f) with self.grad.clone() as t: f(t,", "white = Color('white') with Image(width=w, height=w, background=black) as b: with", "with self.logo.clone() as t: f(t, 'rgb', False, 5, 20) save(t,", "f = wpi.constitute with Image() as t: w = 2", "fontsize = calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize >", "with self.logo.clone() as t: f(t, 30, 10, 45) save(t, f)", "wpi.setfirstiterator(t) with f(t, False, -3) as p: save(p, f) def", "f = wpi.raiseimage with self.rose.clone() as t: f(t, 10, 10,", "20, 20) save(t, f) def test_wave(self): f = wpi.wave with", "f(t, 3, True) save(t, f) def test_raiseimage(self): f = wpi.raiseimage", "self.rose.clone() as t: f(t, 10, 10) save(t, f) def test_rotationalblur(self):", "wpi.selectiveblur with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range) save(t,", "test_swirl(self): f = wpi.swirl with self.rose.clone() as t: f(t, 180)", "test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with self.rose.clone() as t: f(t, True,", "save(t, f, True) def test_floodfillpaint(self): f = wpi.floodfillpaint with self.logo.clone()", "'center' d.text(0, 0, letter) d(img) return img with Image() as", "[1/16, 2/16, 1/16, 2/16, 4/16, 2/16, 1/16, 2/16, 1/16] with", "with self.rose.clone() as t: f(t, 0.4*t.quantum_range, channel='red') save(t, f, True)", "height=4, background=Color('red')) as t: w = 2 h = 2", "self.rose.clone() as t: f(t, 'gradient', 4, 4, channel='red') save(t, f,", "t: f(t, -30, 0) save(t, f) with self.rose.clone() as t:", "= 'RGB' with Image(width=w, height=h, background=Color('red')) as t: r =", "test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope>", "= \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2", "with self.rose.clone() as t: f(t, 5.0, 3.0) save(t, f) with", "= wpi.wave with self.grad.clone() as t: f(t, 40, 200) save(t,", "f(t, channel='red') save(t, f, True) def test_clip(self): # NOTE: result", "f, True) def test_floodfillpaint(self): f = wpi.floodfillpaint with self.logo.clone() as", "= wpi.implode with self.rose.clone() as t: f(t, 1.0) save(t, f)", "don't have an image which has clipping path with self.rose.clone()", "f, ext='.gif') def test_constitute(self): f = wpi.constitute with Image() as", "f = wpi.adaptiveresize with self.rose.clone() as t: f(t, int(t.width*1.5), int(t.height*2.0))", "= Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200, height=60) self.text_a = Image(width=70,", "offset = 15 tmpfile = 'tmp.png' with Image(width=w, height=h, background=Color('white'))", "save(t, f) with self.rose.clone() as t: f(t, 'gradient', 4, 4,", "f(t, 0.5*t.quantum_range) save(t, f) def test_shade(self): f = wpi.shade with", "'o4x4,3,3', channel='red') save(t, f, True) def test_polaroid(self): f = wpi.polaroid", "with self.rose.clone() as t: f(t, 'red') save(t, f) def test_sepiatone(self):", "0.5*t.quantum_range) save(t, f) with self.logo.clone() as t: f(t, 20, 20,", "def test_oilpaint(self): f = wpi.oilpaint with self.rose.clone() as t: f(t,", "save(t, f) def test_colormatrix(self): f = wpi.colormatrix with self.logo.clone() as", "f(t, 'hello') save(t, f) def test_compare(self): f = wpi.compare with", "f = wpi.localcontrast with self.logo.clone() as t: f(t, 5, 30)", "d: f(t, d, 1.0) save(t, f) def test_posterize(self): f =", "test_coalesce(self): # TODO: input optimized .gif file. f = wpi.coalesce", "f(t2, phase, True) save(t2, f) def test_haldclut(self): f = wpi.haldclut", "with self.rose.clone() as t: f(t, -30, 0, channel='red') save(t, f,", "f) with self.rose.clone() as t: f(t, 0, 3, channel='red') save(t,", "p) with f(t) as p: save(p, f) def test_colordecisionlist(self): xml", "Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing() as", "with t.clone() as p: (c, d) = f(t, p, metric='absolute',", "t.background_color = Color('blue') f(t, -10, -10, t.width+20, t.height+20) save(t, f)", "with Drawing() as d: with f(dst, d, tile, thumb, mode,", "f(t, -10, -10, t.width+20, t.height+20) save(t, f) def test_filterimage(self): f", "f(dst, d, tile, thumb, mode, frame) as result: save(result, f)", "channel='red') save(t, f, True) def test_blur(self): f = wpi.blur with", "0.0, 0.0, 0.0, 0.0, 1.5, 0.0, 0.0, 0.0, 0.0, 0.0,", "Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200, height=60) self.text_a = Image(width=70, height=60)", "as t: f(t, -30, 0, channel='red') save(t, f, True) def", "f(t, Color('rgb'), Color('gray(25%)')) save(t, f) def test_vignette(self): f = wpi.vignette", "save(t, f) def test_cipher(self): f = wpi.encipher with self.rose.clone() as", "f(t) as p: save(p, f) def test_colordecisionlist(self): xml = \"\"\"", "test_rotationalblur(self): f = wpi.rotationalblur with self.rose.clone() as t: f(t, 45)", "True) def test_implode(self): f = wpi.implode with self.rose.clone() as t:", "t: f(t, 45, channel='red') save(t, f, True) def test_scale(self): f", "0, letter) d(img) return img with Image() as t: with", "= Image(filename='label:Confirm', width=200, height=60) self.text_a = Image(width=70, height=60) with Drawing()", "as q: save(q, f) def test_thumbnail(self): f = wpi.thumbnail with", "def test_vignette(self): f = wpi.vignette with self.logo.clone() as t: wpi.minify(t)", "with self.rose.clone() as t: f(t, 'gradient', 4, 4, channel='red') save(t,", "wpi.encipher with self.rose.clone() as t: f(t, 'password') save(t, f) f", "with self.rose.clone() as p: for i in range(5): wpi.blur(p, 0,", "dpi, 'lanczos', 1.0) save(t, f) def test_roll(self): f = wpi.roll", "f) with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False,", "f(t, 5.0, 3.0) save(t, f) with self.rose.clone() as t: f(t,", "0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist with", "with self.logo.clone() as t: f(t, 20) save(t, f) def test_statistic(self):", "not work correctly (IM<6.9.9-36) save(t, f) def test_autogamma(self): f =", "0.0, 0.0, 1.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0,", "= wpi.addnoise with self.grad.clone() as t: f(t, 'gaussian') save(t, f)", "= wpi.segment with self.logo.clone() as t: f(t, 'rgb', False, 5,", "f) with self.rose.clone() as t: f(t, -30, 0, channel='red') save(t,", "45) save(t, f) with self.logo.clone() as t: f(t, 30, 10,", "as t: f(t, 0.5) save(t, f) def test_brightnesscontrast(self): f =", "True) def test_separate_channel(self): f = wpi.separate_channel with self.rose.clone() as t:", "as t: f(t, False) save(t, f) def test_convolve(self): f =", "0.0, 1.0, 1.0, 1.0]) save(t, f) def test_spread(self): f =", "def test_solarize(self): f = wpi.solarize with self.rose.clone() as t: f(t,", "def test_colorize(self): f = wpi.colorize with self.grad.clone() as t: f(t,", "with Drawing() as d: d.font = 'Arial' d.font_size = 24", "channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def test_extent(self):", "self.logo.clone() as t: f(t, 0.5) save(t, f) def test_brightnesscontrast(self): f", "2 h = 2 b = [0, 0, 0, 255,", "int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self): f = wpi.adaptivesharpen with self.rose.clone()", "f) def test_brightnesscontrast(self): f = wpi.brightnesscontrast with self.rose.clone() as t:", "as t: w = 2 h = 2 b =", "t: f(t, 'dilate', 1, 'Diamond') save(t, f) with self.logo.clone() as", "wpi.cyclecolormap with self.logo.clone() as t: f(t, 5) save(t, f) def", "3) save(t, f) def test_emboss(self): f = wpi.emboss with self.logo.clone()", "save(t, f) def test_posterize(self): f = wpi.posterize with self.rose.clone() as", "background=color) as p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t,", "save(t, f) def test_vignette(self): f = wpi.vignette with self.logo.clone() as", "= wpi.clip # I don't have an image which has", "= wpi.inversefouriertransform with t.sequence[0].clone() as mag: with t.sequence[1].clone() as phase:", "f, True) def test_charcoal(self): f = wpi.charcoal with self.rose.clone() as", "f = wpi.sketch with self.logo.clone() as t: f(t, 10, 10,", "f(t, 0, 3) save(t, f) with self.rose.clone() as t: f(t,", "with f(t, 5) as q: save(q, f, ext='.gif') def test_morphology(self):", "wpi.statistic with self.rose.clone() as t: f(t, 'gradient', 4, 4) save(t,", "height=t.height, background=color) as p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with", "4, 4) save(t, f) with self.rose.clone() as t: f(t, 'gradient',", "0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 ] f(t,", "channels = 'RGB' with Image(width=w, height=h, background=Color('red')) as t: r", "for i in range(4): with q.clone() as qq: wpi.resetpage(qq, 5*(i+1),", "= wpi.comparelayer with Image() as t: with Image(width=50, height=50, background=Color('red'))", "0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0,", "self.rose.clone() as t: f(t, 5, 5) save(t, f) with self.rose.clone()", "def test_fft(self): f = wpi.forwardfouriertransform # require IM build option", "h, 'RGB', 'char', b) save(t, f) def test_contrast(self): f =", "10, 10, 10, True) save(t, f) def test_randomthreshold(self): f =", "def test_shadow(self): f = wpi.shadow with self.text.clone() as t: with", "height=w, background=black) as b: with Image(width=h, height=h, background=white) as w:", "test_montage(self): f = wpi.montage with self.rose.clone() as base: with Image()", "self.rose.clone() as t: f(t, 5.0, 3.0) save(t, f) with self.rose.clone()", "15 tmpfile = 'tmp.png' with Image(width=w, height=h, background=Color('white')) as p:", "save(t, f) with self.logo.clone() as t: f(t, 30, 10, 45,", "import Image from wand.drawing import Drawing from wand.color import Color", "mode, frame) as result: save(result, f) def test_morph(self): f =", "which has clipping path with self.rose.clone() as t: f(t) save(t,", "as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq) with f(t, 'compareany')", "= wpi.importpixels with Image(width=4, height=4, background=Color('red')) as t: w =", "'center' d.fill_color = Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with f(t,", "path with self.rose.clone() as t: f(t) save(t, f) def test_clippath(self):", "t: f(t, 'hello') save(t, f) def test_compare(self): f = wpi.compare", "f = wpi.shave with self.logo.clone() as t: f(t, 100, 100)", "with f(t, p) as q: save(q, f) def test_swirl(self): f", "= wpi.colorize with self.grad.clone() as t: f(t, Color('red'), Color('gray(25%)')) save(t,", "p: p.negate() f(p, 100, 2, 10, 10) t.composite_channel('default_channels', p, 'overlay')", "50, 30) as a: with makeletter('B', 50, 30) as b:", "t2 = mag f(t2, phase, True) save(t2, f) def test_haldclut(self):", "kernel = [ # Sobel filter -1.0, 0.0, 1.0, -2.0,", "0.0, 2.0, -1.0, 0.0, 1.0, ] with self.rose.clone() as t:", "= f(t, p, metric='absolute', channel='red') save(c, f, True) c.destroy() def", "f(t, 0.4*t.quantum_range) save(t, f) with self.rose.clone() as t: f(t, 0.4*t.quantum_range,", "with self.grad.clone() as t: f(t, 'hello') save(t, f) def test_compare(self):", "f = wpi.autolevel with self.rose.clone() as t: f(t) save(t, f)", "f(p, 100, 2, 10, 10) t.composite_channel('default_channels', p, 'overlay') save(t, f)", "save(t, f) with self.rose.clone() as t: f(t, 0.4*t.quantum_range, channel='red') save(t,", "f = wpi.contrast with self.rose.clone() as t: f(t, False) save(t,", "save(q, f) except Exception: raise finally: os.remove(tmpfile) def test_stereo(self): f", "False) save(t, f) def test_convolve(self): f = wpi.convolve kernel =", "as t: f(t, Color('rgb'), Color('gray(25%)')) save(t, f) def test_vignette(self): f", "self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f = wpi.adaptiveblur with self.rose.clone()", "t: f(t, '#1', True) save(t, f) def test_clut(self): f =", "as t: f(t) save(t, f) def test_montage(self): f = wpi.montage", "<Slope> 0.9 1.2 0.5 </Slope> <Offset> 0.4 -0.5 0.6 </Offset>", "self.rose.clone() as t: f(t, channel='red') save(t, f, True) def test_clip(self):", "work correctly (IM<6.9.9-36) save(t, f) def test_autogamma(self): f = wpi.autogamma", "wpi.label with self.rose.clone() as t: f(t, 'hello') save(t, f) def", "f(t, 3, 3, channel='red') save(t, f, True) def test_shave(self): f", "save(t, f) with self.rose.clone() as t: f(t, 0, 3, channel='red')", "save(t, f) def test_deskew(self): f = wpi.deskew with Image(width=80, height=40,", "as t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self): f", "'check' d.font = 'Arial' fontsize = calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]:", "f) def test_combine(self): f = wpi.combine with Image() as t:", "True) def test_orderedposterize(self): f = wpi.orderedposterize with self.grad.clone() as t:", "as phase: wpi.blur(mag, 0, 0.5) # as degradation t2 =", "with self.rose.clone() as t: f(t, 1.0) save(t, f) def test_importpixels(self):", "as t: f(t, Color('gray(50%)')) save(t, f) def test_blueshift(self): f =", "with self.rose.clone() as t: t.gravity = 'center' f(t, t.width//2, t.height//2,", "Image(width=w, height=h, background=Color('white')) as p: with Drawing() as d: d.gravity", "wpi.affinetransform with self.rose.clone() as t: with Drawing() as d: d.affine([2.0,", "self.rose.clone() as t: f(t, 180) save(t, f) def test_texture(self): f", "f = wpi.opaquepaint with self.logo.clone() as t: f(t, Color('red'), Color('blue'),", "test_segment(self): f = wpi.segment with self.logo.clone() as t: f(t, 'rgb',", "t: w = 2 h = 2 b = [0,", "2 f(t, dpi, dpi, 'lanczos', 1.0) save(t, f) def test_roll(self):", "wpi.filterimage kernel = [ # Sobel filter -1.0, 0.0, 1.0,", "f(t, 45, channel='red') save(t, f, True) def test_scale(self): f =", "f, True) def test_splice(self): f = wpi.splice with self.rose.clone() as", "+ 'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod def", "f) def test_sparsecolor(self): f = wpi.sparsecolor with Image(width=100, height=100, background=Color('black'))", "w, h, offset) q.read(filename='stegano:' + tmpfile) save(q, f) except Exception:", "f, True) def test_coalesce(self): # TODO: input optimized .gif file.", "'char', b) save(t, f) def test_contrast(self): f = wpi.contrast with", "wpi.solarize with self.rose.clone() as t: f(t, 0.4*t.quantum_range) save(t, f) with", "with f(t, False, -3) as p: save(p, f) def test_solarize(self):", "os.remove(tmpfile) def test_stereo(self): f = wpi.stereo with self.rose.clone() as t:", "f) def test_compare(self): f = wpi.compare with self.rose.clone() as t:", "def test_shade(self): f = wpi.shade with self.logo.clone() as t: f(t,", "wpi.resample with self.rose.clone() as t: dpi = 72 * 2", "as t: f(t, 0, 3) save(t, f) with self.rose.clone() as", "i in range(5): wpi.blur(p, 0, 1) wpi.add(t, p) with f(t)", "channel) as q: save(q, f) def test_comment(self): f = wpi.comment", "test_charcoal(self): f = wpi.charcoal with self.rose.clone() as t: f(t, 5,", "wpi.segment with self.logo.clone() as t: f(t, 'rgb', False, 5, 20)", "as t: f(t, 'gaussian', channel='red') save(t, f, True) def test_affinetransform(self):", "72 * 2 f(t, dpi, dpi, 'lanczos', 1.0) save(t, f)", "= wpi.extent with self.rose.clone() as t: t.gravity = 'center' t.background_color", "Image() as t: with makeletter('A', 50, 30) as a: with", "test_smush(self): f = wpi.smush def makeletter(letter, w, h): img =", "= \"80x50+4+3\" frame = \"15x15+3+3\" mode = \"frame\" with Drawing()", "add image for blue channel wpi.setfirstiterator(t) # rewind the index", "'Diamond', channel='red') save(t, f, True) def test_motionblur(self): f = wpi.motionblur", "True) def test_sketch(self): f = wpi.sketch with self.logo.clone() as t:", "height=h, background=white) as w: wpi.add(t, b) # add image for", "def test_combine(self): f = wpi.combine with Image() as t: w", "f = wpi.remap with self.logo.clone() as t: with self.rose.clone() as", "</Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist with self.rose.clone()", "+ function.__name__ + \"_ch\" + ext else: path = tmpdir", "5, 5, kernel) save(t, f) def test_combine(self): f = wpi.combine", "wpi.clamp # TODO: more useful code with self.rose.clone() as t:", "wpi.inversefouriertransform with t.sequence[0].clone() as mag: with t.sequence[1].clone() as phase: wpi.blur(mag,", "True) def test_polaroid(self): f = wpi.polaroid with self.logo.clone() as t:", "as t: f(t, 10, 10, 10, 10, True) save(t, f)", "f) def test_whitethreshold(self): f = wpi.whitethreshold with self.grad.clone() as t:", "speckle noise f(t) save(t, f) def test_edge(self): f = wpi.edge", "test_deskew(self): f = wpi.deskew with Image(width=80, height=40, background=Color('black')) as t:", "t: t.gravity = 'center' f(t, t.width//2, t.height//2, 20, 20) save(t,", "0] f(t, 1, 1, w, h, 'RGB', 'char', b) save(t,", "self.rose.clone() as t: f(t, True, 3, 3, channel='red') save(t, f,", "save(t, f, True) def test_cyclecolormap(self): f = wpi.cyclecolormap with self.logo.clone()", "= wpi.whitethreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f)", "height=60) self.text_a = Image(width=70, height=60) with Drawing() as draw: draw.font", "as t: f(t, 0.4*t.quantum_range, channel='red') save(t, f, True) def test_splice(self):", "save(t, f) def test_despeckle(self): f = wpi.despeckle with self.rose.clone() as", "test_adaptiveresize(self): f = wpi.adaptiveresize with self.rose.clone() as t: f(t, int(t.width*1.5),", "f = wpi.affinetransform with self.rose.clone() as t: with Drawing() as", "f = wpi.haldclut # TODO: more useful code with Image(filename='hald:12')", "t: f(t, 1.0) save(t, f) def test_importpixels(self): f = wpi.importpixels", "t: f(t, 30, 10, 45) save(t, f) with self.logo.clone() as", "wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:' + tmpfile) save(q, f) except", "as t: with Drawing() as d: f(t, d, 1.0) save(t,", "ext else: path = tmpdir + function.__name__ + ext #", "with self.rose.clone() as t: f(t, 0.5*t.quantum_range) save(t, f) def test_shade(self):", "</SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist with self.rose.clone() as", "t: f(t, 'default_channels', 'bilinear', [0, 0, 1.0, 0.0, 0.0, 1.0,", "def test_adaptiveresize(self): f = wpi.adaptiveresize with self.rose.clone() as t: f(t,", "f(t, 20, 20, int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self): f =", "in range(5): wpi.blur(p, 0, 1) wpi.add(t, p) with f(t) as", "10, 45, channel='red') save(t, f, True) def test_oilpaint(self): f =", "p: p.negate() with f(t, p) as q: save(q, f) def", "True) def test_clip(self): # NOTE: result is always FAILED. f", "with t.clone() as p: (c, d) = f(t, p, metric='absolute')", "\"80x50+4+3\" frame = \"15x15+3+3\" mode = \"frame\" with Drawing() as", "save(img, function, channel=False, ext='.png'): if channel: path = tmpdir +", "f(t, 20) save(t, f) def test_statistic(self): f = wpi.statistic with", "the index pointer channel = 1 + 2 + 4", "100 h = 100 with Drawing() as d: text =", "t: f(t, 0, 3) save(t, f) with self.rose.clone() as t:", "f(t, t.width//2, t.height//2, 20, 20) save(t, f) def test_sparsecolor(self): f", "\"_ch\" + ext else: path = tmpdir + function.__name__ +", "0.0, 1.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0,", "as t: f(t, 40, 200) save(t, f) def test_whitethreshold(self): f", "with self.logo.clone() as t: f(t, 10, 10, 45) save(t, f)", "f = wpi.emboss with self.logo.clone() as t: f(t, 0, 3)", "f) def test_clippath(self): # NOTE: result is always FAILED. f", "save(t, f) def test_emboss(self): f = wpi.emboss with self.logo.clone() as", "as t: with self.rose.clone() as p: f(t, p, 'nodither') save(t,", "t: f(t, 5, 5) save(t, f) with self.rose.clone() as t:", "wpi.smush def makeletter(letter, w, h): img = Image(width=w, height=h) with", "3, 3, channel='red') save(t, f, True) def test_shave(self): f =", "with Drawing() as d: f(t, d, 1.0) save(t, f) def", "def test_comparelayer(self): f = wpi.comparelayer with Image() as t: with", "t: f(t, 100, 100) save(t, f) def test_shear(self): f =", "[0, 0, 0, 255, 255, 255, 255, 0, 0, 0,", "as b: with Image(width=h, height=h, background=white) as w: wpi.add(t, b)", "self.rose.clone() as t: t.gravity = 'center' f(t, t.width//2, t.height//2, 20,", "= 24 d.gravity = 'center' d.text(0, 0, letter) d(img) return", "'hello') save(t, f) def test_localcontrast(self): f = wpi.localcontrast with self.logo.clone()", "Image(width=80, height=40, background=Color('black')) as t: f(t, 0.5*t.quantum_range) # TODO: find", "= wpi.orderedposterize with self.grad.clone() as t: f(t, 'o4x4,3,3') save(t, f)", "f) with self.rose.clone() as t: f(t, 5, 5, channel='red') save(t,", "wpi.whitethreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) class", "self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond') save(t, f) with", "= 'Arial' d.font_size = 36 size = calcSuitableImagesize(d, text) print('calcSuitableImagesize:", "Color('white') with self.rose.clone() as t: with Image(width=t.width, height=t.height, background=color) as", "50, 30) as b: wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t) with", "save(t, f) def test_selectiveblur(self): f = wpi.selectiveblur with self.logo.clone() as", "f(t, p) as q: save(q, f) def test_thumbnail(self): f =", "t: f(t, 45) save(t, f) with self.rose.clone() as t: f(t,", "t: f(t, 0.4*t.quantum_range, channel='red') save(t, f, True) def test_splice(self): f", "test_tint(self): f = wpi.tint with self.rose.clone() as t: f(t, Color('rgb'),", "as t: f(t, 20) save(t, f) def test_statistic(self): f =", "f(t, 3, 3) save(t, f) with self.rose.clone() as t: f(t,", "save(t, f) with self.grad.clone() as t: f(t, 'gaussian', channel='red') save(t,", "= wpi.oilpaint with self.rose.clone() as t: f(t, 2.0) save(t, f)", "test_randomthreshold(self): f = wpi.randomthreshold with self.text_a.clone() as t: rng =", "with self.logo.clone() as t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0)", "wand.color import Color import wandplus.image as wpi from wandplus.textutil import", "# I don't have an image which has clipping path", "1.2 0.5 </Slope> <Offset> 0.4 -0.5 0.6 </Offset> <Power> 1.0", "= wpi.opaquepaint with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0,", "wpi.deskew with Image(width=80, height=40, background=Color('black')) as t: f(t, 0.5*t.quantum_range) #", "self.logo.clone() as t: f(t, 20) save(t, f) def test_statistic(self): f", "f = wpi.equalize with self.rose.clone() as t: f(t) save(t, f)", "rng = t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95),", "save(result, f) def test_morph(self): f = wpi.morph color = Color('white')", "wpi.haldclut # TODO: more useful code with Image(filename='hald:12') as p:", "with self.rose.clone() as t: f(t, 'gradient', 4, 4) save(t, f)", "text, width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize > 0) fontsize =", "= wpi.swirl with self.rose.clone() as t: f(t, 180) save(t, f)", "f, True) def test_autolevel(self): f = wpi.autolevel with self.rose.clone() as", "= wpi.floodfillpaint with self.logo.clone() as t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'),", "f = wpi.stereo with self.rose.clone() as t: with self.rose.clone() as", "as t: with Image(width=t.width, height=t.height, background=color) as p: wpi.add(t, p)", "images(magnitude&phase) f = wpi.inversefouriertransform with t.sequence[0].clone() as mag: with t.sequence[1].clone()", "def test_whitethreshold(self): f = wpi.whitethreshold with self.grad.clone() as t: f(t,", "= wpi.brightnesscontrast with self.rose.clone() as t: f(t, -30, 0) save(t,", "f = wpi.shade with self.logo.clone() as t: f(t, True, 45,", "t: rng = t.quantum_range f(t, int(rng * 0.05), int(rng *", "print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize > 0) fontsize = calcSuitableFontsize(d, text,", "import unittest tmpdir = '_tmp/' def save(img, function, channel=False, ext='.png'):", "as q: for i in range(4): with q.clone() as qq:", "with self.logo.clone() as t: wpi.minify(t) t.background_color = Color('black') f(t, 0,", "def test_autogamma(self): f = wpi.autogamma with self.rose.clone() as t: f(t)", "0.0, 0.0, 0.0, 1.0 ] f(t, 5, 5, kernel) save(t,", "f(t, 3, kernel, channel='red') save(t, f, True) def test_cyclecolormap(self): f", "self.rose.clone() as t: f(t, 2.0) save(t, f) def test_opaquepaint(self): f", "255, 0, 0, 0, 255, 0] f(t, w, h, 'RGB',", "self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f = wpi.adaptiveblur", "black = Color('black') white = Color('white') with Image(width=w, height=w, background=black)", "4 # R + G + B with f(t, channel)", "> 0) fontsize = calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ', fontsize)", "t: f(t, 3) save(t, f) def test_emboss(self): f = wpi.emboss", "= wpi.selectiveblur with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range)", "wpi.add(t, b) wpi.setfirstiterator(t) with f(t, False, -3) as p: save(p,", "f) def test_segment(self): f = wpi.segment with self.logo.clone() as t:", "save(t, f) def test_roll(self): f = wpi.roll with self.rose.clone() as", "'tmp.png' with Image(width=w, height=h, background=Color('white')) as p: with Drawing() as", "'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod def tearDownClass(self):", "</Offset> <Power> 1.0 0.8 1.5 </Power> </SOPNode> <SATNode> <Saturation> 0.85", "<Offset> 0.4 -0.5 0.6 </Offset> <Power> 1.0 0.8 1.5 </Power>", "channel='red') save(t, f, True) def test_motionblur(self): f = wpi.motionblur with", "= 'Arial' fontsize = calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ', fontsize)", "t: with self.rose.clone() as p: p.negate() with f(t, p) as", "size[1] > 0) def test_fontsize(self): w = 100 h =", "5.0, 3.0) save(t, f) with self.rose.clone() as t: f(t, 5.0,", "fontsize = calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize >", "def test_blueshift(self): f = wpi.blueshift with self.logo.clone() as t: f(t,", "with Image(filename='hald:12') as p: with self.rose.clone() as t: f(t, p)", "f = wpi.posterize with self.rose.clone() as t: f(t, 3, True)", "wpi.oilpaint with self.rose.clone() as t: f(t, 2.0) save(t, f) def", "f(t, 40, 200) save(t, f) def test_whitethreshold(self): f = wpi.whitethreshold", "calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0] > 0 and size[1]", "f) def test_cipher(self): f = wpi.encipher with self.rose.clone() as t:", "3) save(t, f) def test_enhance(self): f = wpi.enhance with Image(filename='plasma:',", "f(t, p) save(t, f) with self.grad.clone() as t: f(t, p,", "degradation t2 = mag f(t2, phase, True) save(t2, f) def", "1.0]) save(t, f) def test_spread(self): f = wpi.spread with self.logo.clone()", "f) def test_clamp(self): f = wpi.clamp # TODO: more useful", "3, 3, kernel) save(t, f) with self.rose.clone() as t: f(t,", "t: f(t, 20, 20, int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self): f", "f = wpi.oilpaint with self.rose.clone() as t: f(t, 2.0) save(t,", "test_shave(self): f = wpi.shave with self.logo.clone() as t: f(t, 100,", "channel='blue') save(t, f, True) def test_orderedposterize(self): f = wpi.orderedposterize with", "with self.grad.clone() as t: f(t, 'o4x4,3,3', channel='red') save(t, f, True)", "3, kernel, channel='red') save(t, f, True) def test_floodfillpaint(self): f =", "5.0, 3.0, channel='red') save(t, f, True) def test_adaptiveresize(self): f =", "as p: save(p, f) def test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection", "test_solarize(self): f = wpi.solarize with self.rose.clone() as t: f(t, 0.4*t.quantum_range)", "self.grad.clone() as t: f(t, 40, 200) save(t, f) def test_whitethreshold(self):", "t: with t.clone() as p: (c, d) = f(t, p,", "background=Color('red')) as t: r = wpi.exportpixels(t, 0, 0, w, h,", "10, 10) save(t, f) def test_rotationalblur(self): f = wpi.rotationalblur with", "f) def test_label(self): f = wpi.label with self.rose.clone() as t:", "f = wpi.clip # I don't have an image which", "for green channel wpi.add(t, w) # add image for blue", "kernel) save(t, f) def test_combine(self): f = wpi.combine with Image()", "0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0,", "# add image for green channel wpi.add(t, w) # add", "wpi.morph color = Color('white') with self.rose.clone() as t: with Image(width=t.width,", "channel='red') save(t, f, True) def test_remap(self): f = wpi.remap with", "f = wpi.floodfillpaint with self.logo.clone() as t: f(t, Color('green'), 0.10*t.quantum_range,", "= wpi.blueshift with self.logo.clone() as t: f(t, 0.5) save(t, f)", "def test_sharpen(self): f = wpi.sharpen with self.rose.clone() as t: f(t,", "channel='red') save(t, f, True) def test_stegano(self): f = wpi.stegano with", "50 h = 40 offset = 15 tmpfile = 'tmp.png'", "f) def test_thumbnail(self): f = wpi.thumbnail with self.logo.clone() as t:", "'center' t.background_color = Color('blue') f(t, -10, -10, t.width+20, t.height+20) save(t,", "= [ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5, 0.0,", "t: f(t, 3, 3, kernel) save(t, f) with self.rose.clone() as", "d: d.gravity = 'center' d.fill_color = Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon')", "f) def test_magnify(self): f = wpi.magnify with self.rose.clone() as t:", "# add image for blue channel wpi.setfirstiterator(t) # rewind the", "from wand.drawing import Drawing from wand.color import Color import wandplus.image", "t: wpi.minify(t) t.background_color = Color('black') f(t, 0, 10, 20, 20)", "int(rng * 0.95), channel='red') save(t, f, True) def test_remap(self): f", "= wpi.tint with self.rose.clone() as t: f(t, Color('rgb'), Color('gray(25%)')) save(t,", "= 15 tmpfile = 'tmp.png' with Image(width=w, height=h, background=Color('white')) as", "1/16] with self.rose.clone() as t: f(t, 3, kernel) save(t, f)", "t: f(t, 3, 3) save(t, f) with self.rose.clone() as t:", "t: f(t, 5, 5, channel='red') save(t, f, True) def test_adaptivethreshold(self):", "with self.rose.clone() as t: f(t, 45) save(t, f) with self.rose.clone()", "self.rose.clone() as t: f(t, channel='red') save(t, f, True) def test_blackthreshold(self):", "d(img) return img with Image() as t: with makeletter('A', 50,", "r = wpi.exportpixels(t, 0, 0, w, h, channels, 'double') self.assertEqual(r[0],", "with self.rose.clone() as t: f(t, 0, 3) save(t, f) with", "t: w = 50 h = 40 offset = 15", "save(p, f) def test_solarize(self): f = wpi.solarize with self.rose.clone() as", "f) def test_emboss(self): f = wpi.emboss with self.logo.clone() as t:", "test_posterize(self): f = wpi.posterize with self.rose.clone() as t: f(t, 3,", "range(4): with q.clone() as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq)", "with self.rose.clone() as t: f(t, p, channel='red') save(t, f, True)", "save(t, f, True) def test_motionblur(self): f = wpi.motionblur with self.logo.clone()", "1.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0,", "'default_channels', 'bilinear', [0, 0, 1.0, 0.0, 0.0, 1.0, 100, 100,", "0, 'Watch\\nthe\\nPidgeon') d(p) with f(t, p, offset) as q: q.save(filename=tmpfile)", "range(5): wpi.blur(p, 0, 1) wpi.add(t, p) with f(t) as p:", "10, 10, 10, 10, True) save(t, f) def test_randomthreshold(self): f", "save(t, f) with self.rose.clone() as t: f(t, 45, channel='red') save(t,", "self.logo.clone() as t: f(t, 100, 100) save(t, f) def test_tint(self):", "add image for green channel wpi.add(t, w) # add image", "True) def test_blur(self): f = wpi.blur with self.rose.clone() as t:", "= wpi.despeckle with self.rose.clone() as t: # TODO: add speckle", "t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0) save(t, f) def", "t: f(t, 0.5*t.quantum_range) # TODO: find an skewed image as", "as q: q.save(filename=tmpfile) try: with Image() as q: wpi.setsizeoffset(q, w,", "self.rose.clone() as p: with f(t, p) as q: save(q, f)", "self.rose.clone() as t: f(t, True, 3, 3) save(t, f) with", "save(t, f) def test_edge(self): f = wpi.edge with self.logo.clone() as", "0.0]) f(t, d) # not work correctly (IM<6.9.9-36) save(t, f)", "self.rose.clone() as t: with Image(width=t.width, height=t.height, background=color) as p: wpi.add(t,", "f(t, 30, 10, 45, channel='red') save(t, f, True) def test_oilpaint(self):", "self.rose.clone() as base: with Image() as dst: rows = 2", "as p: p.negate() with f(t, p) as q: save(q, f)", "self.assertTrue(fontsize > 0) fontsize = calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ',", "f(t, channel='red') save(t, f, True) def test_blackthreshold(self): f = wpi.blackthreshold", "with Image() as t: with Image(width=50, height=50, background=Color('red')) as p:", "f) with self.rose.clone() as t: f(t, 3, kernel, channel='red') save(t,", "= wpi.texture with Image(width=300, height=200) as t: with self.rose.clone() as", "def test_stereo(self): f = wpi.stereo with self.rose.clone() as t: with", "Color('black') draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir +", "2.0) save(t, f) def test_opaquepaint(self): f = wpi.opaquepaint with self.logo.clone()", "test_orderedposterize(self): f = wpi.orderedposterize with self.grad.clone() as t: f(t, 'o4x4,3,3')", "dst: rows = 2 columns = 3 for i in", "20, 20, 0.5*t.quantum_range, channel='red') save(t, f, True) def test_separate_channel(self): f", "image for red channel wpi.add(t, b) # add image for", "text = 'check' d.font = 'Arial' fontsize = calcSuitableFontsize(d, text,", "self.rose.clone() as t: with self.rose.clone() as p: p.negate() with f(t,", "save(c, f, True) c.destroy() def test_comparelayer(self): f = wpi.comparelayer with", "# TODO: find an skewed image as sample save(t, f)", "t: f(t, 20) save(t, f) def test_statistic(self): f = wpi.statistic", "f(t, 100, 100) save(t, f) def test_shear(self): f = wpi.shear", "self.rose.clone() as t: f(t, 3, kernel, channel='red') save(t, f, True)", "test_convolve(self): f = wpi.convolve kernel = [1/16, 2/16, 1/16, 2/16,", "wpi.remap with self.logo.clone() as t: with self.rose.clone() as p: f(t,", "with Image() as t: with makeletter('A', 50, 30) as a:", "# TODO: more useful code with Image(filename='hald:12') as p: with", "text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0] > 0 and size[1] >", "save(t, f) def test_enhance(self): f = wpi.enhance with Image(filename='plasma:', width=100,", "= 72 * 2 f(t, dpi, dpi, 'lanczos', 1.0) save(t,", "with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range, channel='red') save(t,", "t: with Drawing() as d: f(t, d, 1.0) save(t, f)", "def test_shear(self): f = wpi.shear with self.grad.clone() as t: f(t,", "0.5 </Slope> <Offset> 0.4 -0.5 0.6 </Offset> <Power> 1.0 0.8", "wpi.clip # I don't have an image which has clipping", "save(t, f) def test_convolve(self): f = wpi.convolve kernel = [1/16,", "as t: f(t, Color('red'), Color('gray(25%)')) save(t, f) def test_colormatrix(self): f", "save(t, f) def test_shadow(self): f = wpi.shadow with self.text.clone() as", "0.0, 0.0]) f(t, d) # not work correctly (IM<6.9.9-36) save(t,", "f, True) def test_blur(self): f = wpi.blur with self.rose.clone() as", "test_filterimage(self): f = wpi.filterimage kernel = [ # Sobel filter", "False, -3) as p: save(p, f) def test_solarize(self): f =", "0) fontsize = calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize", "except Exception: raise finally: os.remove(tmpfile) def test_stereo(self): f = wpi.stereo", "False, 5, 20) save(t, f) def test_selectiveblur(self): f = wpi.selectiveblur", "save(t, f) def test_whitethreshold(self): f = wpi.whitethreshold with self.grad.clone() as", "Exception: raise finally: os.remove(tmpfile) def test_stereo(self): f = wpi.stereo with", "Color('white'), 0, 0) save(t, f) def test_fft(self): f = wpi.forwardfouriertransform", "test_enhance(self): f = wpi.enhance with Image(filename='plasma:', width=100, height=100) as t:", "0, 0, 0, 255, 0] f(t, w, h, 'RGB', 'char',", "as t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0) save(t, f)", "3 for i in range(rows * columns): wpi.add(dst, base) tile", "with self.rose.clone() as t: f(t, Color('rgb'), Color('gray(25%)')) save(t, f) def", "10, 10, True) save(t, f) def test_randomthreshold(self): f = wpi.randomthreshold", "save(t, f) def test_opaquepaint(self): f = wpi.opaquepaint with self.logo.clone() as", "w, h): img = Image(width=w, height=h) with Drawing() as d:", "= wpi.decipher f(t, 'password') save(t, f) def test_deskew(self): f =", "= 100 h = 100 with Drawing() as d: text", "test_combine(self): f = wpi.combine with Image() as t: w =", "f, True) c.destroy() def test_comparelayer(self): f = wpi.comparelayer with Image()", "as t: f(t, Color('red'), Color('blue'), 1.0, False) save(t, f) with", "test_blackthreshold(self): f = wpi.blackthreshold with self.grad.clone() as t: f(t, Color('gray(50%)'))", "def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with self.rose.clone() as t: f(t,", "save(t, f) def test_equalize(self): f = wpi.equalize with self.rose.clone() as", "= wpi.rotationalblur with self.rose.clone() as t: f(t, 45) save(t, f)", "f(t, d, 1.0) save(t, f) def test_posterize(self): f = wpi.posterize", "with Image(width=25, height=25, background=Color('green1')) as q: for i in range(4):", "True) def test_exportpixels(self): w = 1 h = 1 channels", "= wpi.scale with self.rose.clone() as t: f(t, t.width*2, t.height*2) save(t,", "f) def test_shear(self): f = wpi.shear with self.grad.clone() as t:", "'RGB', 'char', b) save(t, f) def test_label(self): f = wpi.label", "save(t, f) def test_minify(self): f = wpi.minify with self.rose.clone() as", "with Drawing() as d: d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0])", "f(t, False, -3) as p: save(p, f) def test_solarize(self): f", "'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy()", "= 50 h = 40 offset = 15 tmpfile =", "= wpi.haldclut # TODO: more useful code with Image(filename='hald:12') as", "channel='red') save(t, f, True) def test_oilpaint(self): f = wpi.oilpaint with", "d.font_size = 36 size = calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size)", "self.logo.clone() as t: f(t, 30, 10, 45) save(t, f) with", "f = wpi.brightnesscontrast with self.rose.clone() as t: f(t, -30, 0)", "f(t, False) save(t, f) def test_convolve(self): f = wpi.convolve kernel", "f) def test_resample(self): f = wpi.resample with self.rose.clone() as t:", "self.rose.clone() as p: f(t, p, 'nodither') save(t, f) def test_resample(self):", "t.width//2, t.height//2, 20, 20) save(t, f) def test_sparsecolor(self): f =", "self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f = wpi.adaptiveblur with self.rose.clone() as", "test_localcontrast(self): f = wpi.localcontrast with self.logo.clone() as t: f(t, 5,", "test_texture(self): f = wpi.texture with Image(width=300, height=200) as t: with", "'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir +", "channel: path = tmpdir + function.__name__ + \"_ch\" + ext", "has clipping path with self.rose.clone() as t: f(t) save(t, f)", "<Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist", "with self.rose.clone() as t: f(t, 2.0) save(t, f) def test_opaquepaint(self):", "1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def test_extent(self): f = wpi.extent", "with self.rose.clone() as base: with Image() as dst: rows =", "wpi.equalize with self.rose.clone() as t: f(t) save(t, f) with self.rose.clone()", "as r: save(r, f, ext='.gif') def test_constitute(self): f = wpi.constitute", "f) def test_posterize(self): f = wpi.posterize with self.rose.clone() as t:", "wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t) with f(t, False, -3) as", "30) as b: wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t) with f(t,", "= wpi.remap with self.logo.clone() as t: with self.rose.clone() as p:", "3, 3) save(t, f) with self.rose.clone() as t: f(t, True,", "= wpi.constitute with Image() as t: w = 2 h", "t.clone() as p: (c, d) = f(t, p, metric='absolute') save(c,", "as t: f(t, channel='red') save(t, f, True) def test_exportpixels(self): w", "<SATNode> <Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\" f =", "3, channel='red') save(t, f, True) def test_shave(self): f = wpi.shave", "> 0 and size[1] > 0) def test_fontsize(self): w =", "test_shadow(self): f = wpi.shadow with self.text.clone() as t: with self.text.clone()", "p) save(t, f) with self.grad.clone() as t: f(t, p, channel='green')", "as t: with Image(width=50, height=50, background=Color('red')) as p: wpi.add(t, p)", "f(t, int(rng * 0.05), int(rng * 0.95), channel='red') save(t, f,", "with self.rose.clone() as t: f(t, t.width*2, t.height*2) save(t, f) def", "1.0, 1.0, 1.0]) save(t, f) def test_spread(self): f = wpi.spread", "f = wpi.shear with self.grad.clone() as t: f(t, Color('red'), 0,", "w = 50 h = 40 offset = 15 tmpfile", "100, 100, 0.0, 1.0, 1.0, 1.0]) save(t, f) def test_spread(self):", "= wpi.sigmoidalcontrast with self.rose.clone() as t: f(t, True, 3, 3)", "5*(i+1)) wpi.add(t, qq) with f(t, 'compareany') as r: save(r, f,", "as p: (c, d) = f(t, p, metric='absolute', channel='red') save(c,", "on Windows... f(t, True) save(t, f) # includes two images(magnitude&phase)", "p, metric='absolute') save(c, f) c.destroy() with self.rose.clone() as t: with", "'center' draw.fill_color = Color('white') draw.stroke_color = Color('black') draw.text(0, 0, 'A')", "= wpi.exportpixels(t, 0, 0, w, h, channels, 'double') self.assertEqual(r[0], 1.0)", "f) def test_blueshift(self): f = wpi.blueshift with self.logo.clone() as t:", "save(t, f) def test_magnify(self): f = wpi.magnify with self.rose.clone() as", "0, 10, 20, 20) save(t, f) def test_wave(self): f =", "def test_swirl(self): f = wpi.swirl with self.rose.clone() as t: f(t,", "as p: p.rotate(90) with self.grad.clone() as t: f(t, p) save(t,", "t: f(t, 'gradient', 4, 4, channel='red') save(t, f, True) def", "test_clut(self): f = wpi.clut with Image(filename='gradient:red-blue', width=1, height=100) as p:", "height=100) as t: f(t) save(t, f) def test_equalize(self): f =", "200, 200) save(t, f) def test_clamp(self): f = wpi.clamp #", "c.destroy() with self.rose.clone() as t: with t.clone() as p: (c,", "self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) def test_blueshift(self): f", "wpi.stereo with self.rose.clone() as t: with self.rose.clone() as p: p.negate()", "p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t, 5) as", "as t: f(t, 0.5*t.quantum_range) # TODO: find an skewed image", "f) def test_comment(self): f = wpi.comment with self.grad.clone() as t:", "t: f(t, 2.0) save(t, f) def test_opaquepaint(self): f = wpi.opaquepaint", "save(t, f) with self.rose.clone() as t: f(t, 3, 3, channel='red')", "as p: save(p, f) def test_solarize(self): f = wpi.solarize with", "with self.rose.clone() as t: w = 50 h = 40", "print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize > 0) if __name__ == '__main__':", "= 'center' f(t, t.width//2, t.height//2, 20, 20) save(t, f) def", "save(t, f) def test_clut(self): f = wpi.clut with Image(filename='gradient:red-blue', width=1,", "f) def test_smush(self): f = wpi.smush def makeletter(letter, w, h):", "save(t, f) def test_chop(self): f = wpi.chop with self.grad.clone() as", "save(t, f, True) def test_oilpaint(self): f = wpi.oilpaint with self.rose.clone()", "qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq) with f(t, 'compareany') as", "wpi.montage with self.rose.clone() as base: with Image() as dst: rows", "self.rose.clone() as t: f(t, 3, 3, channel='red') save(t, f, True)", "t: t.gravity = 'center' t.background_color = Color('blue') f(t, -10, -10,", "h = 1 channels = 'RGB' with Image(width=w, height=h, background=Color('red'))", "= f(t, p, metric='absolute') save(c, f) c.destroy() with self.rose.clone() as", "t: f(t, 20, 20, 0.5*t.quantum_range) save(t, f) with self.logo.clone() as", "f(t, 10, 10, 45) save(t, f) def test_smush(self): f =", "f = wpi.texture with Image(width=300, height=200) as t: with self.rose.clone()", "test_separate_channel(self): f = wpi.separate_channel with self.rose.clone() as t: f(t, 'red')", "build on Windows... f(t, True) save(t, f) # includes two", "20) save(t, f) def test_wave(self): f = wpi.wave with self.grad.clone()", "2.0, 0.0, 0.0]) f(t, d) # not work correctly (IM<6.9.9-36)", "TODO: input optimized .gif file. f = wpi.coalesce with Image()", "wpi.blackthreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) def", "test_clamp(self): f = wpi.clamp # TODO: more useful code with", "= 1 h = 1 channels = 'RGB' with Image(width=w,", "'a.png') @classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def", "self.grad.clone() as t: f(t, Color('red'), Color('gray(25%)')) save(t, f) def test_colormatrix(self):", "f(t, 0.4*t.quantum_range, channel='red') save(t, f, True) def test_splice(self): f =", "= 'tmp.png' with Image(width=w, height=h, background=Color('white')) as p: with Drawing()", "wpi.wave with self.grad.clone() as t: f(t, 40, 200) save(t, f)", "f(t, Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing()", "as p: for i in range(5): wpi.blur(p, 0, 1) wpi.add(t,", "height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize > 0) if __name__ ==", "t: f(t, 0.4*t.quantum_range) save(t, f) with self.rose.clone() as t: f(t,", "save(t, f, True) def test_sketch(self): f = wpi.sketch with self.logo.clone()", "channel='red') save(t, f, True) def test_exportpixels(self): w = 1 h", "f) def test_adaptivesharpen(self): f = wpi.adaptivesharpen with self.rose.clone() as t:", "save(t, f, True) def test_blur(self): f = wpi.blur with self.rose.clone()", "= wpi.localcontrast with self.logo.clone() as t: f(t, 5, 30) save(t,", "0.0, 1.0, 100, 100, 0.0, 1.0, 1.0, 1.0]) save(t, f)", "1.0) save(t, f) def test_importpixels(self): f = wpi.importpixels with Image(width=4,", "= wpi.autogamma with self.rose.clone() as t: f(t) save(t, f) with", "as t: f(t, 5, 1) save(t, f) def test_chop(self): f", "= 100 black = Color('black') white = Color('white') with Image(width=w,", "as mag: with t.sequence[1].clone() as phase: wpi.blur(mag, 0, 0.5) #", "width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize > 0) fontsize = calcSuitableFontsize(d,", "with Image(width=100, height=100, background=Color('black')) as t: f(t, 'default_channels', 'bilinear', [0,", "f) with self.grad.clone() as t: f(t, 'o4x4,3,3', channel='red') save(t, f,", "save(t, f) def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with self.rose.clone() as", "test_adaptivesharpen(self): f = wpi.adaptivesharpen with self.rose.clone() as t: f(t, 5,", "f(t, w, h, 'RGB', 'char', b) save(t, f) def test_contrast(self):", "add image for red channel wpi.add(t, b) # add image", "p: (c, d) = f(t, p, metric='absolute', channel='red') save(c, f,", "4) save(t, f) with self.rose.clone() as t: f(t, 'gradient', 4,", "f) def test_despeckle(self): f = wpi.despeckle with self.rose.clone() as t:", "f(t, '#1', True) save(t, f) def test_clut(self): f = wpi.clut", "save(q, f, ext='.gif') def test_morphology(self): f = wpi.morphology with self.logo.clone()", "-10, -10, t.width+20, t.height+20) save(t, f) def test_filterimage(self): f =", "d) = f(t, p, metric='absolute', channel='red') save(c, f, True) c.destroy()", "class CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad", "wpi.autogamma with self.rose.clone() as t: f(t) save(t, f) with self.rose.clone()", "with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond', channel='red') save(t,", "as d: text = 'check' d.font = 'Arial' d.font_size =", "0, 0) save(t, f) def test_fft(self): f = wpi.forwardfouriertransform #", "int(t.width*1.5), int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self): f = wpi.adaptivesharpen with", "self.rose.clone() as t: f(t, -30, 0) save(t, f) with self.rose.clone()", "t: with self.rose.clone() as p: f(t, p, 'nodither') save(t, f)", "self.logo.clone() as t: f(t, 5, 30) save(t, f) def test_magnify(self):", "Color('gray(25%)')) save(t, f) def test_colormatrix(self): f = wpi.colormatrix with self.logo.clone()", "draw.gravity = 'center' draw.fill_color = Color('white') draw.stroke_color = Color('black') draw.text(0,", "kernel) save(t, f) with self.rose.clone() as t: f(t, 3, kernel,", "test_sepiatone(self): f = wpi.sepiatone with self.rose.clone() as t: f(t, 0.5*t.quantum_range)", "d(p) with f(t, p, offset) as q: q.save(filename=tmpfile) try: with", "channel='red') save(t, f, True) def test_blackthreshold(self): f = wpi.blackthreshold with", "f, True) def test_clip(self): # NOTE: result is always FAILED.", "def test_comment(self): f = wpi.comment with self.grad.clone() as t: f(t,", "f, True) def test_cyclecolormap(self): f = wpi.cyclecolormap with self.logo.clone() as", "5*(i+1), 5*(i+1)) wpi.add(t, qq) with f(t, 'compareany') as r: save(r,", "4, 4, channel='red') save(t, f, True) def test_stegano(self): f =", "draw.font_size = 50 draw.gravity = 'center' draw.fill_color = Color('white') draw.stroke_color", "2 + 4 # R + G + B with", "t: f(t, Color('red'), Color('gray(25%)')) save(t, f) def test_colormatrix(self): f =", "Windows... f(t, True) save(t, f) # includes two images(magnitude&phase) f", "FAILED. f = wpi.clippath with self.rose.clone() as t: f(t, '#1',", "p: f(t, p, 'nodither') save(t, f) def test_resample(self): f =", "t: f(t, False) save(t, f) def test_convolve(self): f = wpi.convolve", "0, 0, 0, 255, 0] f(t, 1, 1, w, h,", "10) save(t, f) def test_rotationalblur(self): f = wpi.rotationalblur with self.rose.clone()", "as t: f(t, channel='red') save(t, f, True) def test_blackthreshold(self): f", "as p: with f(t, p) as q: save(q, f) def", "f) with self.rose.clone() as t: f(t, 45, channel='red') save(t, f,", "f, True) def test_remap(self): f = wpi.remap with self.logo.clone() as", "int(rng * 0.95)) save(t, f) with self.text_a.clone() as t: rng", "f = wpi.orderedposterize with self.grad.clone() as t: f(t, 'o4x4,3,3') save(t,", "0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,", "0) save(t, f) with self.rose.clone() as t: f(t, -30, 0,", "as base: with Image() as dst: rows = 2 columns", "test_remap(self): f = wpi.remap with self.logo.clone() as t: with self.rose.clone()", "Image() as t: with Image(width=50, height=50, background=Color('red')) as p: wpi.add(t,", "t: f(t, True, 3, 3, channel='red') save(t, f, True) def", "as t: f(t, 'gaussian') save(t, f) with self.grad.clone() as t:", "def test_thumbnail(self): f = wpi.thumbnail with self.logo.clone() as t: f(t,", "def test_deskew(self): f = wpi.deskew with Image(width=80, height=40, background=Color('black')) as", "phase: wpi.blur(mag, 0, 0.5) # as degradation t2 = mag", "5, kernel) save(t, f) def test_combine(self): f = wpi.combine with", "f = wpi.colordecisionlist with self.rose.clone() as t: f(t, xml) save(t,", "2.0, -1.0, 0.0, 1.0, ] with self.rose.clone() as t: f(t,", "as p: with self.rose.clone() as t: f(t, p) save(t, f)", "with f(dst, d, tile, thumb, mode, frame) as result: save(result,", "as wpi from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import os import", "= wpi.stegano with self.rose.clone() as t: w = 50 h", "+ ext else: path = tmpdir + function.__name__ + ext", "f(t, True, 3, 3, channel='red') save(t, f, True) def test_sketch(self):", "test_thumbnail(self): f = wpi.thumbnail with self.logo.clone() as t: f(t, 100,", "t: f(t, p) save(t, f) with self.rose.clone() as t: f(t,", "wpi.raiseimage with self.rose.clone() as t: f(t, 10, 10, 10, 10,", "def test_rotationalblur(self): f = wpi.rotationalblur with self.rose.clone() as t: f(t,", "# add image for red channel wpi.add(t, b) # add", "True) def test_floodfillpaint(self): f = wpi.floodfillpaint with self.logo.clone() as t:", "t: f(t, xml) save(t, f) def test_colorize(self): f = wpi.colorize", "with self.rose.clone() as t: f(t) save(t, f) def test_minify(self): f", "self.text = Image(filename='label:Confirm', width=200, height=60) self.text_a = Image(width=70, height=60) with", "0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0", "t: f(t, 20, 20, 0.5*t.quantum_range, channel='red') save(t, f, True) def", "', fontsize) self.assertTrue(fontsize > 0) if __name__ == '__main__': unittest.main()", "tmpdir = '_tmp/' def save(img, function, channel=False, ext='.png'): if channel:", "TODO: find an skewed image as sample save(t, f) def", "0.0, 0.0, 0.0, 1.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5,", "self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def test_extent(self): f = wpi.extent with", "f = wpi.label with self.rose.clone() as t: f(t, 'hello') save(t,", "def setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad = Image(filename='gradient:', width=400,", "Image(filename='gradient:red-blue', width=1, height=100) as p: p.rotate(90) with self.grad.clone() as t:", "b) # add image for green channel wpi.add(t, w) #", "1.0) save(t, f) def test_roll(self): f = wpi.roll with self.rose.clone()", "f = wpi.adaptiveblur with self.rose.clone() as t: f(t, 5.0, 3.0)", "self.grad.clone() as t: f(t, 'o4x4,3,3') save(t, f) with self.grad.clone() as", "save(t, f) with self.rose.clone() as t: f(t, channel='red') save(t, f,", "f = wpi.rotationalblur with self.rose.clone() as t: f(t, 45) save(t,", "wpi.contrast with self.rose.clone() as t: f(t, False) save(t, f) def", "as t: f(t) save(t, f) def test_minify(self): f = wpi.minify", "save(t, f) def test_brightnesscontrast(self): f = wpi.brightnesscontrast with self.rose.clone() as", "f) def test_chop(self): f = wpi.chop with self.grad.clone() as t:", "import os import unittest tmpdir = '_tmp/' def save(img, function,", "def test_despeckle(self): f = wpi.despeckle with self.rose.clone() as t: #", "t: f(t, 5, 30) save(t, f) def test_magnify(self): f =", "self.rose.clone() as t: f(t, 0.4*t.quantum_range, channel='red') save(t, f, True) def", "p, 'overlay') save(t, f) def test_sharpen(self): f = wpi.sharpen with", "as t: f(t, 'hello') save(t, f) def test_localcontrast(self): f =", "with self.grad.clone() as t: f(t, 'gaussian') save(t, f) with self.grad.clone()", "python from wand.image import Image from wand.drawing import Drawing from", "if channel: path = tmpdir + function.__name__ + \"_ch\" +", "test_colorize(self): f = wpi.colorize with self.grad.clone() as t: f(t, Color('red'),", "= wpi.resample with self.rose.clone() as t: dpi = 72 *", "= wpi.convolve kernel = [1/16, 2/16, 1/16, 2/16, 4/16, 2/16,", "f = wpi.whitethreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t,", "0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir", "True) save(t, f) # includes two images(magnitude&phase) f = wpi.inversefouriertransform", "as t: f(t, 'gradient', 4, 4) save(t, f) with self.rose.clone()", "True, 3, 3, channel='red') save(t, f, True) def test_sketch(self): f", "wpi.scale with self.rose.clone() as t: f(t, t.width*2, t.height*2) save(t, f)", "img with Image() as t: with makeletter('A', 50, 30) as", "20, 20) save(t, f) def test_sparsecolor(self): f = wpi.sparsecolor with", "t: f(t, channel='red') save(t, f, True) def test_clip(self): # NOTE:", "1, w, h, 'RGB', 'char', b) save(t, f) def test_label(self):", "f(t, p, metric='absolute') save(c, f) c.destroy() with self.rose.clone() as t:", "with Image(width=300, height=200) as t: with self.rose.clone() as p: with", "True) def test_affinetransform(self): f = wpi.affinetransform with self.rose.clone() as t:", "2/16, 1/16, 2/16, 1/16] with self.rose.clone() as t: f(t, 3,", "with self.text_a.clone() as t: rng = t.quantum_range f(t, int(rng *", "f(t, 10, 10, 10, 10, True) save(t, f) def test_randomthreshold(self):", "= [1/16, 2/16, 1/16, 2/16, 4/16, 2/16, 1/16, 2/16, 1/16]", "calcSuitableImagesize import os import unittest tmpdir = '_tmp/' def save(img,", "as t: f(t, 3, 3) save(t, f) with self.rose.clone() as", "width=1, height=100) as p: p.rotate(90) with self.grad.clone() as t: f(t,", "True, 3, 3) save(t, f) with self.rose.clone() as t: f(t,", "f = wpi.comment with self.grad.clone() as t: f(t, 'hello') save(t,", "'hello') save(t, f) def test_compare(self): f = wpi.compare with self.rose.clone()", "wpi.importpixels with Image(width=4, height=4, background=Color('red')) as t: w = 2", "self.logo = Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200, height=60) self.text_a =", "as p: p.negate() f(p, 100, 2, 10, 10) t.composite_channel('default_channels', p,", "100, 2, 10, 10) t.composite_channel('default_channels', p, 'overlay') save(t, f) def", "as t: f(t, 100, 100) save(t, f) def test_shear(self): f", "= wpi.stereo with self.rose.clone() as t: with self.rose.clone() as p:", "as t: f(t, 100, 100) save(t, f) def test_tint(self): f", "draw.font = 'Arial' draw.font_size = 50 draw.gravity = 'center' draw.fill_color", "w = 2 h = 2 b = [0, 0,", "f) f = wpi.decipher f(t, 'password') save(t, f) def test_deskew(self):", "t: f(t, 3, 3, kernel, channel='red') save(t, f, True) def", "* 2 f(t, dpi, dpi, 'lanczos', 1.0) save(t, f) def", "Drawing from wand.color import Color import wandplus.image as wpi from", "test_magnify(self): f = wpi.magnify with self.rose.clone() as t: f(t) save(t,", "0.6 </Offset> <Power> 1.0 0.8 1.5 </Power> </SOPNode> <SATNode> <Saturation>", "b: with Image(width=h, height=h, background=white) as w: wpi.add(t, b) #", "more useful code with self.rose.clone() as t: f(t) save(t, f)", "f(t, p) save(t, f) with self.rose.clone() as t: f(t, p,", "self.rose.clone() as t: f(t, False) save(t, f) def test_convolve(self): f", "+ 'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir +", "t: f(t, Color('gray(50%)')) save(t, f) def test_blueshift(self): f = wpi.blueshift", "= wpi.morphology with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond')", "self.logo.clone() as t: f(t, 'rgb', False, 5, 20) save(t, f)", "= wpi.deskew with Image(width=80, height=40, background=Color('black')) as t: f(t, 0.5*t.quantum_range)", "test_selectiveblur(self): f = wpi.selectiveblur with self.logo.clone() as t: f(t, 20,", "wpi.addnoise with self.grad.clone() as t: f(t, 'gaussian') save(t, f) with", "3, True) save(t, f) def test_raiseimage(self): f = wpi.raiseimage with", "def test_exportpixels(self): w = 1 h = 1 channels =", "f = wpi.roll with self.rose.clone() as t: f(t, 10, 10)", "ext='.png'): if channel: path = tmpdir + function.__name__ + \"_ch\"", "self.rose.clone() as p: for i in range(5): wpi.blur(p, 0, 1)", "f, True) def test_orderedposterize(self): f = wpi.orderedposterize with self.grad.clone() as", "# TODO: input optimized .gif file. f = wpi.coalesce with", "t.height*2) save(t, f) def test_segment(self): f = wpi.segment with self.logo.clone()", "height=100, background=Color('black')) as t: f(t, 'default_channels', 'bilinear', [0, 0, 1.0,", "+ 2 + 4 # R + G + B", "self.logo.clone() as t: f(t, 5) save(t, f) def test_cipher(self): f", "save(t, f, True) def test_charcoal(self): f = wpi.charcoal with self.rose.clone()", "f, True) def test_affinetransform(self): f = wpi.affinetransform with self.rose.clone() as", "t: f(t, 10, 10, 10, 10, True) save(t, f) def", "f = wpi.sharpen with self.rose.clone() as t: f(t, 3, 3)", "wpi.sigmoidalcontrast with self.rose.clone() as t: f(t, True, 3, 3) save(t,", "as p: wpi.add(t, p) with Image(width=25, height=25, background=Color('green1')) as q:", "= [ # Sobel filter -1.0, 0.0, 1.0, -2.0, 0.0,", "as t: f(t, 0, 3, channel='red') save(t, f, True) def", "def test_minify(self): f = wpi.minify with self.rose.clone() as t: f(t)", "'char', b) save(t, f) def test_label(self): f = wpi.label with", "f) def test_morph(self): f = wpi.morph color = Color('white') with", "= wpi.raiseimage with self.rose.clone() as t: f(t, 10, 10, 10,", "save(t, f, True) def test_separate_channel(self): f = wpi.separate_channel with self.rose.clone()", "t: f(t, 0, 3, channel='red') save(t, f, True) def test_charcoal(self):", "t: f(t, p) save(t, f) with self.grad.clone() as t: f(t,", "qq) with f(t, 'compareany') as r: save(r, f, ext='.gif') def", "f) def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with self.rose.clone() as t:", "as t: # TODO: add speckle noise f(t) save(t, f)", "f(t) save(t, f) def test_equalize(self): f = wpi.equalize with self.rose.clone()", "t: f(t, 'red') save(t, f) def test_sepiatone(self): f = wpi.sepiatone", "t.gravity = 'center' f(t, t.width//2, t.height//2, 20, 20) save(t, f)", "channel wpi.add(t, w) # add image for blue channel wpi.setfirstiterator(t)", "1.0, 100, 100, 0.0, 1.0, 1.0, 1.0]) save(t, f) def", "self.rose.clone() as t: f(t, channel='red') save(t, f, True) def test_exportpixels(self):", "def test_segment(self): f = wpi.segment with self.logo.clone() as t: f(t,", "\"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\" frame = \"15x15+3+3\" mode =", "correctly (IM<6.9.9-36) save(t, f) def test_autogamma(self): f = wpi.autogamma with", "'dilate', 1, 'Diamond') save(t, f) with self.logo.clone() as t: f(t,", "rows = 2 columns = 3 for i in range(rows", "as t: f(t, True, 45, 135) save(t, f) def test_shadow(self):", "= calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize > 0)", "channel=False, ext='.png'): if channel: path = tmpdir + function.__name__ +", "= Color('white') draw.stroke_color = Color('black') draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir", "test_adaptiveblur(self): f = wpi.adaptiveblur with self.rose.clone() as t: f(t, 5.0,", "as t: f(t, 5, 5, channel='red') save(t, f, True) def", "from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import os import unittest tmpdir", "as t: with self.rose.clone() as p: for i in range(5):", "save(t, f) def test_label(self): f = wpi.label with self.rose.clone() as", "f, True) def test_stegano(self): f = wpi.stegano with self.rose.clone() as", "3, kernel) save(t, f) with self.rose.clone() as t: f(t, 3,", "self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir", "test_sketch(self): f = wpi.sketch with self.logo.clone() as t: f(t, 10,", "= wpi.vignette with self.logo.clone() as t: wpi.minify(t) t.background_color = Color('black')", "b) save(t, f) def test_label(self): f = wpi.label with self.rose.clone()", "0) def test_fontsize(self): w = 100 h = 100 with", "5, 30) save(t, f) def test_magnify(self): f = wpi.magnify with", "d: text = 'check' d.font = 'Arial' d.font_size = 36", "</Power> </SOPNode> <SATNode> <Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection> \"\"\"", "Drawing() as d: d.gravity = 'center' d.fill_color = Color('black') d.text(0,", "t: w = 100 h = 100 black = Color('black')", "b = [0, 0, 0, 255, 255, 255, 255, 0,", "f(t, 0.5) save(t, f) def test_brightnesscontrast(self): f = wpi.brightnesscontrast with", "f = wpi.compare with self.rose.clone() as t: with t.clone() as", "channel='red') save(t, f, True) def test_sketch(self): f = wpi.sketch with", "100) save(t, f) def test_tint(self): f = wpi.tint with self.rose.clone()", "f = wpi.vignette with self.logo.clone() as t: wpi.minify(t) t.background_color =", "f) def test_sepiatone(self): f = wpi.sepiatone with self.rose.clone() as t:", "100, 100) save(t, f) def test_shear(self): f = wpi.shear with", "d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with f(t, p, offset) as q:", "d: d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0]) f(t, d) #", "with self.grad.clone() as t: t.gravity = 'north_west' f(t, 0, 00,", "wpi.autolevel with self.rose.clone() as t: f(t) save(t, f) with self.rose.clone()", "test_exportpixels(self): w = 1 h = 1 channels = 'RGB'", "f, ext='.gif') def test_morphology(self): f = wpi.morphology with self.logo.clone() as", "</ColorCorrection> </ColorCorrectionCollection> \"\"\" f = wpi.colordecisionlist with self.rose.clone() as t:", "wpi.adaptiveblur with self.rose.clone() as t: f(t, 5.0, 3.0) save(t, f)", "255, 255, 255, 255, 0, 0, 0, 255, 0] f(t,", "with f(t, p, offset) as q: q.save(filename=tmpfile) try: with Image()", "f(t, Color('gray(50%)')) save(t, f) def test_blueshift(self): f = wpi.blueshift with", "self.logo.clone() as t: f(t, 30, 10, 45, channel='red') save(t, f,", "save(t, f) with self.rose.clone() as t: f(t, -30, 0, channel='red')", "= wpi.shave with self.logo.clone() as t: f(t, 100, 100) save(t,", "10, True) save(t, f) def test_randomthreshold(self): f = wpi.randomthreshold with", "f(t, p, channel='green') save(t, f, True) def test_coalesce(self): # TODO:", "'lanczos', 1.0) save(t, f) def test_roll(self): f = wpi.roll with", "test_cyclecolormap(self): f = wpi.cyclecolormap with self.logo.clone() as t: f(t, 5)", "3) save(t, f) with self.rose.clone() as t: f(t, 0, 3,", "5, channel='red') save(t, f, True) def test_adaptivethreshold(self): f = wpi.adaptivethreshold", "save(t, f) def test_importpixels(self): f = wpi.importpixels with Image(width=4, height=4,", "w, h, 'RGB', 'char', b) save(t, f) def test_label(self): f", "def test_blur(self): f = wpi.blur with self.rose.clone() as t: f(t,", "tmpfile) save(q, f) except Exception: raise finally: os.remove(tmpfile) def test_stereo(self):", "image for green channel wpi.add(t, w) # add image for", "True) def test_adaptiveresize(self): f = wpi.adaptiveresize with self.rose.clone() as t:", "with self.logo.clone() as t: f(t, 100, 100) save(t, f) def", "'_tmp/' def save(img, function, channel=False, ext='.png'): if channel: path =", "as t: f(t, channel='red') save(t, f, True) def test_clip(self): #", "wpi.separate_channel with self.rose.clone() as t: f(t, 'red') save(t, f) def", "f) with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range, channel='red')", "def test_opaquepaint(self): f = wpi.opaquepaint with self.logo.clone() as t: f(t,", "self.logo.clone() as t: f(t, 20, 20, int(0.1*t.quantum_range)) save(t, f) def", "= Image(filename='gradient:', width=400, height=400) self.logo = Image(filename='logo:') self.text = Image(filename='label:Confirm',", "in range(4): with q.clone() as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t,", "f(t, 5, 30) save(t, f) def test_magnify(self): f = wpi.magnify", "save(t, f) with self.rose.clone() as t: f(t, True, 3, 3,", "f = wpi.inversefouriertransform with t.sequence[0].clone() as mag: with t.sequence[1].clone() as", "f(t, 20, 20, 0.5*t.quantum_range) save(t, f) with self.logo.clone() as t:", "f(t, channel='red') save(t, f, True) def test_exportpixels(self): w = 1", "1.0 0.8 1.5 </Power> </SOPNode> <SATNode> <Saturation> 0.85 </Saturation> </SATNode>", "-0.5 0.6 </Offset> <Power> 1.0 0.8 1.5 </Power> </SOPNode> <SATNode>", "'--with-fftw' with self.logo.clone() as t: # I couldn't build on", "def test_addnoise(self): f = wpi.addnoise with self.grad.clone() as t: f(t,", "test_comment(self): f = wpi.comment with self.grad.clone() as t: f(t, 'hello')", "wpi.blur with self.rose.clone() as t: f(t, 0, 3) save(t, f)", "Image(width=t.width, height=t.height, background=color) as p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60)", "0, 255, 0] f(t, w, h, 'RGB', 'char', b) save(t,", "\"\"\" f = wpi.colordecisionlist with self.rose.clone() as t: f(t, xml)", "clipping path with self.rose.clone() as t: f(t) save(t, f) def", "with self.rose.clone() as p: p.negate() with f(t, p) as q:", "0, 3) save(t, f) def test_enhance(self): f = wpi.enhance with", "test_sharpen(self): f = wpi.sharpen with self.rose.clone() as t: f(t, 3,", "as t: f(t, 30, 10, 45, channel='red') save(t, f, True)", "image which has clipping path with self.rose.clone() as t: f(t)", "(c, d) = f(t, p, metric='absolute', channel='red') save(c, f, True)", "save(t, f) def test_blueshift(self): f = wpi.blueshift with self.logo.clone() as", "f) def test_localcontrast(self): f = wpi.localcontrast with self.logo.clone() as t:", "d.gravity = 'center' d.fill_color = Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p)", "0.4*t.quantum_range, channel='red') save(t, f, True) def test_splice(self): f = wpi.splice", "background=Color('black')) as t: f(t, 0.5*t.quantum_range) # TODO: find an skewed", "print('calcSuitableImagesize: ', size) self.assertTrue(size[0] > 0 and size[1] > 0)", "rng = t.quantum_range f(t, int(rng * 0.05), int(rng * 0.95))", "try: with Image() as q: wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:'", "wand.drawing import Drawing from wand.color import Color import wandplus.image as", "f = wpi.blur with self.rose.clone() as t: f(t, 0, 3)", "save(t, f) with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range,", "f(t, 5.0, 3.0, channel='red') save(t, f, True) def test_adaptiveresize(self): f", "save(t, f) def test_montage(self): f = wpi.montage with self.rose.clone() as", "as t: f(t, 'red') save(t, f) def test_sepiatone(self): f =", "with self.rose.clone() as t: f(t, 'password') save(t, f) f =", "Image() as q: wpi.setsizeoffset(q, w, h, offset) q.read(filename='stegano:' + tmpfile)", "result is always FAILED. f = wpi.clip # I don't", "f) with self.rose.clone() as t: f(t, 5.0, 3.0, channel='red') save(t,", "f) def test_deskew(self): f = wpi.deskew with Image(width=80, height=40, background=Color('black'))", "test_whitethreshold(self): f = wpi.whitethreshold with self.grad.clone() as t: f(t, Color('gray(50%)'))", "30, 10, 45, channel='red') save(t, f, True) def test_oilpaint(self): f", "q: save(q, f) def test_comment(self): f = wpi.comment with self.grad.clone()", "# NOTE: result is always FAILED. f = wpi.clip #", "test_resample(self): f = wpi.resample with self.rose.clone() as t: dpi =", "f(t, 0.5*t.quantum_range) # TODO: find an skewed image as sample", "import wandplus.image as wpi from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import", "d) = f(t, p, metric='absolute') save(c, f) c.destroy() with self.rose.clone()", "f = wpi.segment with self.logo.clone() as t: f(t, 'rgb', False,", "as t: f(t, 'default_channels', 'bilinear', [0, 0, 1.0, 0.0, 0.0,", "self.rose.clone() as t: f(t, 3, kernel) save(t, f) with self.rose.clone()", "as p: wpi.add(t, p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t, 5)", "f(t, Color('red'), Color('blue'), 1.0, False, channel='blue') save(t, f, True) def", "f(t, Color('red'), 0, 10) save(t, f) def test_sigmoidalcontrast(self): f =", "f) with self.rose.clone() as t: f(t, 0.4*t.quantum_range, channel='red') save(t, f,", "Color('red'), Color('gray(25%)')) save(t, f) def test_colormatrix(self): f = wpi.colormatrix with", "t: with Drawing() as d: d.affine([2.0, 0.0, 0.0, 2.0, 0.0,", "+ 'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir +", "f) def test_importpixels(self): f = wpi.importpixels with Image(width=4, height=4, background=Color('red'))", "0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,", "self.rose.clone() as t: f(t) save(t, f) def test_minify(self): f =", "q: for i in range(4): with q.clone() as qq: wpi.resetpage(qq,", "wpi.tint with self.rose.clone() as t: f(t, Color('rgb'), Color('gray(25%)')) save(t, f)", "save(t, f) with self.rose.clone() as t: f(t, 3, 3, kernel,", "'Arial' d.font_size = 24 d.gravity = 'center' d.text(0, 0, letter)", "with self.logo.clone() as t: f(t, 30, 10, 45, channel='red') save(t,", "def test_sketch(self): f = wpi.sketch with self.logo.clone() as t: f(t,", "f = wpi.adaptivesharpen with self.rose.clone() as t: f(t, 5, 5)", "mode = \"frame\" with Drawing() as d: with f(dst, d,", "with self.rose.clone() as p: f(t, p, 'nodither') save(t, f) def", "self.text_a.destroy() def test_adaptiveblur(self): f = wpi.adaptiveblur with self.rose.clone() as t:", "self.grad.clone() as t: f(t, p, channel='green') save(t, f, True) def", "= wpi.motionblur with self.logo.clone() as t: f(t, 30, 10, 45)", "'Watch\\nthe\\nPidgeon') d(p) with f(t, p, offset) as q: q.save(filename=tmpfile) try:", "with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase):", "f, True) def test_separate_channel(self): f = wpi.separate_channel with self.rose.clone() as", "+ tmpfile) save(q, f) except Exception: raise finally: os.remove(tmpfile) def", "p: with f(t, p) as q: save(q, f) def test_thumbnail(self):", "require IM build option '--with-fftw' with self.logo.clone() as t: #", "with self.rose.clone() as t: t.gravity = 'center' t.background_color = Color('blue')", "save(t, f, True) def test_exportpixels(self): w = 1 h =", "wpi.blur(mag, 0, 0.5) # as degradation t2 = mag f(t2,", "f(t, 1, 1, w, h, 'RGB', 'char', b) save(t, f)", "t: f(t, True, 3, 3) save(t, f) with self.rose.clone() as", "50 draw.gravity = 'center' draw.fill_color = Color('white') draw.stroke_color = Color('black')", "Drawing() as draw: draw.font = 'Arial' draw.font_size = 50 draw.gravity", "q.clone() as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1)) wpi.add(t, qq) with f(t,", "wpi.constitute with Image() as t: w = 2 h =", "f) except Exception: raise finally: os.remove(tmpfile) def test_stereo(self): f =", "save(t, f) def test_wave(self): f = wpi.wave with self.grad.clone() as", "save(t, f) def test_filterimage(self): f = wpi.filterimage kernel = [", "= wpi.magnify with self.rose.clone() as t: f(t) save(t, f) def", "as t: f(t, 5.0, 3.0, channel='red') save(t, f, True) def", "def test_polaroid(self): f = wpi.polaroid with self.logo.clone() as t: with", "self.rose.clone() as t: f(t, 3, 3, kernel) save(t, f) with", "= wpi.contrast with self.rose.clone() as t: f(t, False) save(t, f)", "self.rose.clone() as t: f(t, 0, 3, channel='red') save(t, f, True)", "save(q, f) def test_swirl(self): f = wpi.swirl with self.rose.clone() as", "# require IM build option '--with-fftw' with self.logo.clone() as t:", "code with Image(filename='hald:12') as p: with self.rose.clone() as t: f(t,", "f = wpi.shadow with self.text.clone() as t: with self.text.clone() as", "size = calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0] > 0", "= \"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\" frame = \"15x15+3+3\" mode", "as t: f(t, 2.0) save(t, f) def test_opaquepaint(self): f =", "self.logo.clone() as t: wpi.minify(t) t.background_color = Color('black') f(t, 0, 10,", "wpi.charcoal with self.rose.clone() as t: f(t, 5, 1) save(t, f)", "45) save(t, f) def test_smush(self): f = wpi.smush def makeletter(letter,", "test_shear(self): f = wpi.shear with self.grad.clone() as t: f(t, Color('red'),", "= wpi.separate_channel with self.rose.clone() as t: f(t, 'red') save(t, f)", "-30, 0, channel='red') save(t, f, True) def test_blur(self): f =", "path = tmpdir + function.__name__ + ext # print(path) img.save(filename=path)", "00, 200, 200) save(t, f) def test_clamp(self): f = wpi.clamp", "save(t, f) def test_contrast(self): f = wpi.contrast with self.rose.clone() as", "f(t, p, 'nodither') save(t, f) def test_resample(self): f = wpi.resample", "f = wpi.spread with self.logo.clone() as t: f(t, 20) save(t,", "'grad.png') self.logo.save(filename=tmpdir + 'logo.png') self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir + 'a.png')", "wpi.forwardfouriertransform # require IM build option '--with-fftw' with self.logo.clone() as", "1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 ] f(t, 5,", "0, 0, 255, 0] f(t, 1, 1, w, h, 'RGB',", "d, tile, thumb, mode, frame) as result: save(result, f) def", "IM build option '--with-fftw' with self.logo.clone() as t: # I", "10) save(t, f) def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast with self.rose.clone()", "save(t, f, True) def test_remap(self): f = wpi.remap with self.logo.clone()", "with self.logo.clone() as t: f(t, 20, 20, int(0.1*t.quantum_range)) save(t, f)", "raise finally: os.remove(tmpfile) def test_stereo(self): f = wpi.stereo with self.rose.clone()", "function, channel=False, ext='.png'): if channel: path = tmpdir + function.__name__", "f) def test_spread(self): f = wpi.spread with self.logo.clone() as t:", "wpi.emboss with self.logo.clone() as t: f(t, 0, 3) save(t, f)", "wpi.shear with self.grad.clone() as t: f(t, Color('red'), 0, 10) save(t,", "wandplus.image as wpi from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import os", "as t: f(t, channel='red') save(t, f, True) def test_autolevel(self): f", "self.rose.clone() as t: f(t, 'password') save(t, f) f = wpi.decipher", "= \"15x15+3+3\" mode = \"frame\" with Drawing() as d: with", "img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:')", "test_blueshift(self): f = wpi.blueshift with self.logo.clone() as t: f(t, 0.5)", "test_statistic(self): f = wpi.statistic with self.rose.clone() as t: f(t, 'gradient',", "find an skewed image as sample save(t, f) def test_despeckle(self):", "as t: f(t, 3) save(t, f) def test_emboss(self): f =", "as t: r = wpi.exportpixels(t, 0, 0, w, h, channels,", "add speckle noise f(t) save(t, f) def test_edge(self): f =", "i in range(rows * columns): wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns,", "= wpi.thumbnail with self.logo.clone() as t: f(t, 100, 100) save(t,", "unittest tmpdir = '_tmp/' def save(img, function, channel=False, ext='.png'): if", "test_despeckle(self): f = wpi.despeckle with self.rose.clone() as t: # TODO:", "f = wpi.splice with self.rose.clone() as t: t.gravity = 'center'", "class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing() as d: text =", "f = wpi.deskew with Image(width=80, height=40, background=Color('black')) as t: f(t,", "f(t, 5) as q: save(q, f, ext='.gif') def test_morphology(self): f", "def test_posterize(self): f = wpi.posterize with self.rose.clone() as t: f(t,", "f(t) save(t, f) def test_minify(self): f = wpi.minify with self.rose.clone()", "= wpi.randomthreshold with self.text_a.clone() as t: rng = t.quantum_range f(t,", "Image(filename='gradient:', width=400, height=400) self.logo = Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200,", "p, channel='green') save(t, f, True) def test_coalesce(self): # TODO: input", "1, 'Diamond') save(t, f) with self.logo.clone() as t: f(t, 'dilate',", "with self.rose.clone() as t: with self.rose.clone() as p: p.negate() with", "and size[1] > 0) def test_fontsize(self): w = 100 h", "f = wpi.implode with self.rose.clone() as t: f(t, 1.0) save(t,", "wpi.stegano with self.rose.clone() as t: w = 50 h =", "as t: t.gravity = 'north_west' f(t, 0, 00, 200, 200)", "as degradation t2 = mag f(t2, phase, True) save(t2, f)", "with self.rose.clone() as t: f(t) save(t, f) def test_montage(self): f", "30) as a: with makeletter('B', 50, 30) as b: wpi.add(t,", "for i in range(rows * columns): wpi.add(dst, base) tile =", "as result: save(result, f) def test_morph(self): f = wpi.morph color", "as t: with self.rose.clone() as p: p.negate() with f(t, p)", "40 offset = 15 tmpfile = 'tmp.png' with Image(width=w, height=h,", "with self.rose.clone() as t: # TODO: add speckle noise f(t)", "self.rose.clone() as t: f(t, 5, 1) save(t, f) def test_chop(self):", "self.rose.clone() as t: f(t, 'hello') save(t, f) def test_localcontrast(self): f", "= wpi.sharpen with self.rose.clone() as t: f(t, 3, 3) save(t,", "with f(t, 'compareany') as r: save(r, f, ext='.gif') def test_constitute(self):", "is always FAILED. f = wpi.clippath with self.rose.clone() as t:", "def test_colormatrix(self): f = wpi.colormatrix with self.logo.clone() as t: kernel", "0.0, 0.0, 0.0, 0.0, 0.0, 1.5, 0.0, 0.0, 0.0, 0.0,", "as p: f(t, p, 'nodither') save(t, f) def test_resample(self): f", "t: f(t, 10, 10, 45) save(t, f) def test_smush(self): f", "= 36 size = calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0]", "with self.rose.clone() as p: with f(t, p) as q: save(q,", "f, True) def test_oilpaint(self): f = wpi.oilpaint with self.rose.clone() as", "with self.rose.clone() as t: f(t, 3, kernel, channel='red') save(t, f,", "5, 1) save(t, f) def test_chop(self): f = wpi.chop with", "= wpi.shear with self.grad.clone() as t: f(t, Color('red'), 0, 10)", "thumb, mode, frame) as result: save(result, f) def test_morph(self): f", "as t: f(t, 3, kernel, channel='red') save(t, f, True) def", "background=Color('white')) as p: with Drawing() as d: d.gravity = 'center'", "test_brightnesscontrast(self): f = wpi.brightnesscontrast with self.rose.clone() as t: f(t, -30,", "= mag f(t2, phase, True) save(t2, f) def test_haldclut(self): f", "more useful code with Image(filename='hald:12') as p: with self.rose.clone() as", "channel = 1 + 2 + 4 # R +", "wpi.comparelayer with Image() as t: with Image(width=50, height=50, background=Color('red')) as", "f(t, 3, 3, kernel, channel='red') save(t, f, True) def test_floodfillpaint(self):", "= tmpdir + function.__name__ + \"_ch\" + ext else: path", "wpi.add(t, qq) with f(t, 'compareany') as r: save(r, f, ext='.gif')", "calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]: ', fontsize) self.assertTrue(fontsize > 0) if", "wpi.sketch with self.logo.clone() as t: f(t, 10, 10, 45) save(t,", "0, 0, 255, 0] f(t, w, h, 'RGB', 'char', b)", "height=h, background=Color('red')) as t: r = wpi.exportpixels(t, 0, 0, w,", "255, 255, 255, 0, 0, 0, 255, 0] f(t, 1,", "= wpi.equalize with self.rose.clone() as t: f(t) save(t, f) with", "frame = \"15x15+3+3\" mode = \"frame\" with Drawing() as d:", "f(t, p, offset) as q: q.save(filename=tmpfile) try: with Image() as", "test_spread(self): f = wpi.spread with self.logo.clone() as t: f(t, 20)", "q: save(q, f, ext='.gif') def test_morphology(self): f = wpi.morphology with", "255, 255, 255, 0, 0, 0, 255, 0] f(t, w,", "offset) q.read(filename='stegano:' + tmpfile) save(q, f) except Exception: raise finally:", "test_motionblur(self): f = wpi.motionblur with self.logo.clone() as t: f(t, 30,", "save(t, f) def test_sepiatone(self): f = wpi.sepiatone with self.rose.clone() as", "with Image() as t: with self.rose.clone() as p: for i", "with self.rose.clone() as t: f(t) save(t, f) def test_clippath(self): #", "i in range(4): with q.clone() as qq: wpi.resetpage(qq, 5*(i+1), 5*(i+1))", "save(t, f) def test_smush(self): f = wpi.smush def makeletter(letter, w,", "self.grad.clone() as t: t.gravity = 'north_west' f(t, 0, 00, 200,", "test_morph(self): f = wpi.morph color = Color('white') with self.rose.clone() as", "True) def test_stegano(self): f = wpi.stegano with self.rose.clone() as t:", "Color('white') with Image(width=w, height=w, background=black) as b: with Image(width=h, height=h,", "p) wpi.setfirstiterator(t) wpi.setdelay(t, 60) with f(t, 5) as q: save(q,", "as t: dpi = 72 * 2 f(t, dpi, dpi,", "test_constitute(self): f = wpi.constitute with Image() as t: w =", "f = wpi.separate_channel with self.rose.clone() as t: f(t, 'red') save(t,", "<Power> 1.0 0.8 1.5 </Power> </SOPNode> <SATNode> <Saturation> 0.85 </Saturation>", "tmpdir + function.__name__ + ext # print(path) img.save(filename=path) class CheckImage(unittest.TestCase):", "as b: wpi.add(t, a) wpi.add(t, b) wpi.setfirstiterator(t) with f(t, False,", "f = wpi.despeckle with self.rose.clone() as t: # TODO: add", "Image(width=300, height=200) as t: with self.rose.clone() as p: with f(t,", "with self.rose.clone() as t: f(t, 3, True) save(t, f) def", "f = wpi.selectiveblur with self.logo.clone() as t: f(t, 20, 20,", "self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def test_extent(self): f =", "'o4x4,3,3') save(t, f) with self.grad.clone() as t: f(t, 'o4x4,3,3', channel='red')", "w = 100 h = 100 black = Color('black') white", "fontsize) self.assertTrue(fontsize > 0) fontsize = calcSuitableFontsize(d, text, height=h) print('calcSuitableImagesize[H]:", "'password') save(t, f) def test_deskew(self): f = wpi.deskew with Image(width=80,", "background=white) as w: wpi.add(t, b) # add image for red", "pointer channel = 1 + 2 + 4 # R", "kernel, channel='red') save(t, f, True) def test_floodfillpaint(self): f = wpi.floodfillpaint", "f = wpi.enhance with Image(filename='plasma:', width=100, height=100) as t: f(t)", "tile = \"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\" frame = \"15x15+3+3\"", "-10, t.width+20, t.height+20) save(t, f) def test_filterimage(self): f = wpi.filterimage", "= wpi.posterize with self.rose.clone() as t: f(t, 3, True) save(t,", "test_comparelayer(self): f = wpi.comparelayer with Image() as t: with Image(width=50,", "def test_autolevel(self): f = wpi.autolevel with self.rose.clone() as t: f(t)", "is always FAILED. f = wpi.clip # I don't have", "test_opaquepaint(self): f = wpi.opaquepaint with self.logo.clone() as t: f(t, Color('red'),", "40, 200) save(t, f) def test_whitethreshold(self): f = wpi.whitethreshold with", "t: f(t, 3, 3, channel='red') save(t, f, True) def test_shave(self):", "p.negate() with f(t, p) as q: save(q, f) def test_swirl(self):", "save(t, f) with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond',", "= 1 channels = 'RGB' with Image(width=w, height=h, background=Color('red')) as", "Color('gray(50%)')) save(t, f) def test_blueshift(self): f = wpi.blueshift with self.logo.clone()", "= wpi.autolevel with self.rose.clone() as t: f(t) save(t, f) with", "20, 20, 0.5*t.quantum_range) save(t, f) with self.logo.clone() as t: f(t,", "self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase): def", "f) def test_filterimage(self): f = wpi.filterimage kernel = [ #", "self.rose.clone() as t: f(t, 3, True) save(t, f) def test_raiseimage(self):", "True) def test_motionblur(self): f = wpi.motionblur with self.logo.clone() as t:", "'password') save(t, f) f = wpi.decipher f(t, 'password') save(t, f)", "test_minify(self): f = wpi.minify with self.rose.clone() as t: f(t) save(t,", "'Arial' d.font_size = 36 size = calcSuitableImagesize(d, text) print('calcSuitableImagesize: ',", "0.9 1.2 0.5 </Slope> <Offset> 0.4 -0.5 0.6 </Offset> <Power>", "'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def test_extent(self): f", "with self.rose.clone() as t: with Image(width=t.width, height=t.height, background=color) as p:", "t: f(t, 0.5*t.quantum_range) save(t, f) def test_shade(self): f = wpi.shade", "t: f(t, 5) save(t, f) def test_cipher(self): f = wpi.encipher", "def test_affinetransform(self): f = wpi.affinetransform with self.rose.clone() as t: with", "save(t, f) def test_colorize(self): f = wpi.colorize with self.grad.clone() as", "columns): wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\"", "True) save(t, f) def test_raiseimage(self): f = wpi.raiseimage with self.rose.clone()", "100 with Drawing() as d: text = 'check' d.font =", "180) save(t, f) def test_texture(self): f = wpi.texture with Image(width=300,", "TODO: more useful code with self.rose.clone() as t: f(t) save(t,", "= wpi.forwardfouriertransform # require IM build option '--with-fftw' with self.logo.clone()", "test_compare(self): f = wpi.compare with self.rose.clone() as t: with t.clone()", "100 black = Color('black') white = Color('white') with Image(width=w, height=w,", "wpi.orderedposterize with self.grad.clone() as t: f(t, 'o4x4,3,3') save(t, f) with", "def test_enhance(self): f = wpi.enhance with Image(filename='plasma:', width=100, height=100) as", "f(t, 'o4x4,3,3') save(t, f) with self.grad.clone() as t: f(t, 'o4x4,3,3',", "self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range) save(t, f) with", "wpi.add(t, p) with f(t) as p: save(p, f) def test_colordecisionlist(self):", "f) def test_edge(self): f = wpi.edge with self.logo.clone() as t:", "f(t, True) save(t, f) # includes two images(magnitude&phase) f =", "def test_statistic(self): f = wpi.statistic with self.rose.clone() as t: f(t,", "h = 100 with Drawing() as d: text = 'check'", "def test_coalesce(self): # TODO: input optimized .gif file. f =", "def test_extent(self): f = wpi.extent with self.rose.clone() as t: t.gravity", "always FAILED. f = wpi.clip # I don't have an", "= calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0] > 0 and", "wpi.colormatrix with self.logo.clone() as t: kernel = [ 0.5, 0.0,", "wpi.compare with self.rose.clone() as t: with t.clone() as p: (c,", "0, 0, 255, 255, 255, 255, 0, 0, 0, 255,", "10, 10, 45) save(t, f) def test_smush(self): f = wpi.smush", "0.5) save(t, f) def test_brightnesscontrast(self): f = wpi.brightnesscontrast with self.rose.clone()", "<ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2 0.5 </Slope> <Offset> 0.4", "wpi.adaptivesharpen with self.rose.clone() as t: f(t, 5, 5) save(t, f)", "f(t, -30, 0, channel='red') save(t, f, True) def test_blur(self): f", "f(t, 'dilate', 1, 'Diamond', channel='red') save(t, f, True) def test_motionblur(self):", "+ B with f(t, channel) as q: save(q, f) def", "3) save(t, f) with self.rose.clone() as t: f(t, True, 3,", "t: f(t, Color('rgb'), Color('gray(25%)')) save(t, f) def test_vignette(self): f =", "10, 10) t.composite_channel('default_channels', p, 'overlay') save(t, f) def test_sharpen(self): f", "wpi from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import os import unittest", "t: f(t, Color('red'), Color('blue'), 1.0, False) save(t, f) with self.logo.clone()", "= 'center' d.fill_color = Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with", "= Color('white') with Image(width=w, height=w, background=black) as b: with Image(width=h,", "20, 20, int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self): f = wpi.addnoise", "Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0) save(t, f) def test_fft(self): f", "b) wpi.setfirstiterator(t) with f(t, False, -3) as p: save(p, f)", "wpi.shave with self.logo.clone() as t: f(t, 100, 100) save(t, f)", "with Image(width=w, height=h, background=Color('red')) as t: r = wpi.exportpixels(t, 0,", "20) save(t, f) def test_sparsecolor(self): f = wpi.sparsecolor with Image(width=100,", "save(t, f, True) def test_adaptivethreshold(self): f = wpi.adaptivethreshold with self.logo.clone()", "as draw: draw.font = 'Arial' draw.font_size = 50 draw.gravity =", "= wpi.adaptivesharpen with self.rose.clone() as t: f(t, 5, 5) save(t,", "def test_morphology(self): f = wpi.morphology with self.logo.clone() as t: f(t,", "save(t, f) def test_segment(self): f = wpi.segment with self.logo.clone() as", "f(t, 'hello') save(t, f) def test_localcontrast(self): f = wpi.localcontrast with", "with self.rose.clone() as t: f(t, 5, 5, channel='red') save(t, f,", "0.0, 2.0, 0.0, 0.0]) f(t, d) # not work correctly", "with self.grad.clone() as t: f(t, p, channel='green') save(t, f, True)", "phase, True) save(t2, f) def test_haldclut(self): f = wpi.haldclut #", "20) save(t, f) def test_statistic(self): f = wpi.statistic with self.rose.clone()", "f) def test_equalize(self): f = wpi.equalize with self.rose.clone() as t:", "f, True) def test_implode(self): f = wpi.implode with self.rose.clone() as", "width=100, height=100) as t: f(t) save(t, f) def test_equalize(self): f", "an skewed image as sample save(t, f) def test_despeckle(self): f", "d, 1.0) save(t, f) def test_posterize(self): f = wpi.posterize with", "1, 'Diamond', channel='red') save(t, f, True) def test_motionblur(self): f =", "save(t, f) with self.rose.clone() as t: f(t, 3, kernel, channel='red')", "f) def test_addnoise(self): f = wpi.addnoise with self.grad.clone() as t:", "test_colormatrix(self): f = wpi.colormatrix with self.logo.clone() as t: kernel =", "as d: d.gravity = 'center' d.fill_color = Color('black') d.text(0, 0,", "Color('red'), Color('blue'), 1.0, False) save(t, f) with self.logo.clone() as t:", "test_equalize(self): f = wpi.equalize with self.rose.clone() as t: f(t) save(t,", "def test_splice(self): f = wpi.splice with self.rose.clone() as t: t.gravity", "f = wpi.wave with self.grad.clone() as t: f(t, 40, 200)", "as d: text = 'check' d.font = 'Arial' fontsize =", "f) with self.grad.clone() as t: f(t, 'gaussian', channel='red') save(t, f,", "as t: rng = t.quantum_range f(t, int(rng * 0.05), int(rng", "with self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range) save(t, f)", "with self.rose.clone() as t: f(t, True, 3, 3, channel='red') save(t,", "f) with self.rose.clone() as t: f(t, 'gradient', 4, 4, channel='red')", "t: f(t, 5, 1) save(t, f) def test_chop(self): f =", "wpi.coalesce with Image() as t: with self.rose.clone() as p: for", "c.destroy() def test_comparelayer(self): f = wpi.comparelayer with Image() as t:", "with self.rose.clone() as t: f(t, 3, 3, kernel, channel='red') save(t,", "save(t, f, True) def test_implode(self): f = wpi.implode with self.rose.clone()", "with self.rose.clone() as t: f(t, 'hello') save(t, f) def test_localcontrast(self):", "def test_remap(self): f = wpi.remap with self.logo.clone() as t: with", "wpi.thumbnail with self.logo.clone() as t: f(t, 100, 100) save(t, f)", "def test_convolve(self): f = wpi.convolve kernel = [1/16, 2/16, 1/16,", "useful code with self.rose.clone() as t: f(t) save(t, f) with", "f, True) def test_exportpixels(self): w = 1 h = 1", "Image(width=100, height=100, background=Color('black')) as t: f(t, 'default_channels', 'bilinear', [0, 0,", "1.0, False) save(t, f) with self.logo.clone() as t: f(t, Color('red'),", "= wpi.charcoal with self.rose.clone() as t: f(t, 5, 1) save(t,", "wpi.floodfillpaint with self.logo.clone() as t: f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0,", "100 h = 100 black = Color('black') white = Color('white')", "self.text.save(filename=tmpdir + 'text.png') self.text_a.save(filename=tmpdir + 'a.png') @classmethod def tearDownClass(self): self.rose.destroy()", "self.assertTrue(size[0] > 0 and size[1] > 0) def test_fontsize(self): w", "self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0, False, channel='blue') save(t,", "with self.rose.clone() as t: f(t, 45, channel='red') save(t, f, True)", "wpi.sharpen with self.rose.clone() as t: f(t, 3, 3) save(t, f)", "> 0) def test_fontsize(self): w = 100 h = 100", "p: save(p, f) def test_solarize(self): f = wpi.solarize with self.rose.clone()", "with self.logo.clone() as t: with Drawing() as d: f(t, d,", "with self.logo.clone() as t: with self.rose.clone() as p: f(t, p,", "t: # I couldn't build on Windows... f(t, True) save(t,", "0.0, 1.0 ] f(t, 5, 5, kernel) save(t, f) def", "with self.logo.clone() as t: f(t, 0.5) save(t, f) def test_brightnesscontrast(self):", "'#1', True) save(t, f) def test_clut(self): f = wpi.clut with", "d: with f(dst, d, tile, thumb, mode, frame) as result:", "0, 3) save(t, f) with self.rose.clone() as t: f(t, 0,", "d.font_size = 24 d.gravity = 'center' d.text(0, 0, letter) d(img)", "d: text = 'check' d.font = 'Arial' fontsize = calcSuitableFontsize(d,", "self.rose.clone() as t: f(t, 3, 3) save(t, f) with self.rose.clone()", "channel='red') save(t, f, True) def test_autolevel(self): f = wpi.autolevel with", "save(t, f) # includes two images(magnitude&phase) f = wpi.inversefouriertransform with", "f = wpi.comparelayer with Image() as t: with Image(width=50, height=50,", "f, True) def test_sketch(self): f = wpi.sketch with self.logo.clone() as", "save(t, f) with self.text_a.clone() as t: rng = t.quantum_range f(t,", "f(t, 100, 100) save(t, f) def test_tint(self): f = wpi.tint", "test_autolevel(self): f = wpi.autolevel with self.rose.clone() as t: f(t) save(t,", "= [0, 0, 0, 255, 255, 255, 255, 0, 0,", "self.rose.clone() as t: f(t, -30, 0, channel='red') save(t, f, True)", "useful code with Image(filename='hald:12') as p: with self.rose.clone() as t:", "as t: w = 50 h = 40 offset =", "= 'center' t.background_color = Color('blue') f(t, -10, -10, t.width+20, t.height+20)", "wpi.colorize with self.grad.clone() as t: f(t, Color('red'), Color('gray(25%)')) save(t, f)", "with self.grad.clone() as t: f(t, 'o4x4,3,3') save(t, f) with self.grad.clone()", "b) # add image for red channel wpi.add(t, b) #", "Image(filename='rose:') self.grad = Image(filename='gradient:', width=400, height=400) self.logo = Image(filename='logo:') self.text", "save(t, f, True) def test_blackthreshold(self): f = wpi.blackthreshold with self.grad.clone()", "0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,", "True) save(t2, f) def test_haldclut(self): f = wpi.haldclut # TODO:", "Image(width=w, height=h) with Drawing() as d: d.font = 'Arial' d.font_size", "1 channels = 'RGB' with Image(width=w, height=h, background=Color('red')) as t:", "channel='red') save(t, f, True) def test_polaroid(self): f = wpi.polaroid with", "= wpi.comment with self.grad.clone() as t: f(t, 'hello') save(t, f)", "20, 0.5*t.quantum_range, channel='red') save(t, f, True) def test_separate_channel(self): f =", "f(t) save(t, f) def test_montage(self): f = wpi.montage with self.rose.clone()", "as t: f(t, 20, 20, int(0.1*t.quantum_range)) save(t, f) def test_addnoise(self):", "= 'center' d.text(0, 0, letter) d(img) return img with Image()", "1/16, 2/16, 1/16] with self.rose.clone() as t: f(t, 3, kernel)", "wpi.shadow with self.text.clone() as t: with self.text.clone() as p: p.negate()", "# rewind the index pointer channel = 1 + 2", "= wpi.sepiatone with self.rose.clone() as t: f(t, 0.5*t.quantum_range) save(t, f)", "0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0,", "width=400, height=400) self.logo = Image(filename='logo:') self.text = Image(filename='label:Confirm', width=200, height=60)", "wpi.swirl with self.rose.clone() as t: f(t, 180) save(t, f) def", "135) save(t, f) def test_shadow(self): f = wpi.shadow with self.text.clone()", "with self.rose.clone() as t: f(t, 5.0, 3.0, channel='red') save(t, f,", "h, offset) q.read(filename='stegano:' + tmpfile) save(q, f) except Exception: raise", "* 0.05), int(rng * 0.95)) save(t, f) with self.text_a.clone() as", "t: f(t, Color('red'), Color('blue'), 1.0, False, channel='blue') save(t, f, True)", "as a: with makeletter('B', 50, 30) as b: wpi.add(t, a)", "f) def test_rotationalblur(self): f = wpi.rotationalblur with self.rose.clone() as t:", "channel='red') save(t, f, True) def test_clip(self): # NOTE: result is", "save(t, f) def test_addnoise(self): f = wpi.addnoise with self.grad.clone() as", "q.save(filename=tmpfile) try: with Image() as q: wpi.setsizeoffset(q, w, h, offset)", "self.grad.clone() as t: f(t, 'hello') save(t, f) def test_compare(self): f", "as t: wpi.minify(t) t.background_color = Color('black') f(t, 0, 10, 20,", "G + B with f(t, channel) as q: save(q, f)", "with self.rose.clone() as t: f(t, '#1', True) save(t, f) def", "t: f(t, Color('red'), 0, 10) save(t, f) def test_sigmoidalcontrast(self): f", "t: f(t, 'gaussian', channel='red') save(t, f, True) def test_affinetransform(self): f", "draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir + 'grad.png') self.logo.save(filename=tmpdir + 'logo.png')", "+ 'a.png') @classmethod def tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy()", "save(t, f, True) def test_stegano(self): f = wpi.stegano with self.rose.clone()", "255, 255, 0, 0, 0, 255, 0] f(t, 1, 1,", "wpi.polaroid with self.logo.clone() as t: with Drawing() as d: f(t,", "d.font = 'Arial' d.font_size = 36 size = calcSuitableImagesize(d, text)", "Color('blue'), 1.0, False, channel='blue') save(t, f, True) def test_orderedposterize(self): f", "= 100 with Drawing() as d: text = 'check' d.font", "as t: f(t, 45) save(t, f) with self.rose.clone() as t:", "wpi.spread with self.logo.clone() as t: f(t, 20) save(t, f) def", "xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2 0.5 </Slope> <Offset>", "f) def test_minify(self): f = wpi.minify with self.rose.clone() as t:", "wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns, rows) thumb = \"80x50+4+3\" frame", "f = wpi.coalesce with Image() as t: with self.rose.clone() as", "wpi.adaptivethreshold with self.logo.clone() as t: f(t, 20, 20, int(0.1*t.quantum_range)) save(t,", "as t: f(t, 'gradient', 4, 4, channel='red') save(t, f, True)", "Drawing() as d: with f(dst, d, tile, thumb, mode, frame)", "Sobel filter -1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0,", "f(t, 5, 5, kernel) save(t, f) def test_combine(self): f =", "# Sobel filter -1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0,", "wpi.localcontrast with self.logo.clone() as t: f(t, 5, 30) save(t, f)", "result: save(result, f) def test_morph(self): f = wpi.morph color =", "as t: f(t, 'dilate', 1, 'Diamond', channel='red') save(t, f, True)", "f = wpi.addnoise with self.grad.clone() as t: f(t, 'gaussian') save(t,", "self.rose.clone() as t: f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self):", "# not work correctly (IM<6.9.9-36) save(t, f) def test_autogamma(self): f", "f = wpi.stegano with self.rose.clone() as t: w = 50", "= 'check' d.font = 'Arial' d.font_size = 36 size =", "xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9", "as t: f(t, 'rgb', False, 5, 20) save(t, f) def", "height=h, background=Color('white')) as p: with Drawing() as d: d.gravity =", "f(t, 'compareany') as r: save(r, f, ext='.gif') def test_constitute(self): f", "0, 00, 200, 200) save(t, f) def test_clamp(self): f =", "with self.rose.clone() as t: f(t, channel='red') save(t, f, True) def", "t: f(t, -30, 0, channel='red') save(t, f, True) def test_blur(self):", "id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2 0.5 </Slope> <Offset> 0.4 -0.5", "p: with Drawing() as d: d.gravity = 'center' d.fill_color =", "True) def test_scale(self): f = wpi.scale with self.rose.clone() as t:", "save(t, f, True) def test_clip(self): # NOTE: result is always", "1.0, 1.0]) save(t, f) def test_spread(self): f = wpi.spread with", "test_imagesize(self): with Drawing() as d: text = 'check' d.font =", "save(t, f) def test_raiseimage(self): f = wpi.raiseimage with self.rose.clone() as", "1.0) save(t, f) def test_posterize(self): f = wpi.posterize with self.rose.clone()", "as t: f(t, 20, 20, 0.5*t.quantum_range, channel='red') save(t, f, True)", "self.logo.clone() as t: f(t, 100, 100) save(t, f) def test_shear(self):", "= wpi.minify with self.rose.clone() as t: f(t) save(t, f) def", "wpi.add(t, w) # add image for blue channel wpi.setfirstiterator(t) #", "def test_raiseimage(self): f = wpi.raiseimage with self.rose.clone() as t: f(t,", "1 h = 1 channels = 'RGB' with Image(width=w, height=h,", "self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f = wpi.adaptiveblur with", "wpi.combine with Image() as t: w = 100 h =", "200) save(t, f) def test_clamp(self): f = wpi.clamp # TODO:", "w, h, channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0)", "3, channel='red') save(t, f, True) def test_sketch(self): f = wpi.sketch", "with self.grad.clone() as t: f(t, p) save(t, f) with self.grad.clone()", "d.fill_color = Color('black') d.text(0, 0, 'Watch\\nthe\\nPidgeon') d(p) with f(t, p,", "kernel = [1/16, 2/16, 1/16, 2/16, 4/16, 2/16, 1/16, 2/16,", "t: with Image(width=50, height=50, background=Color('red')) as p: wpi.add(t, p) with", "= wpi.smush def makeletter(letter, w, h): img = Image(width=w, height=h)", "def test_contrast(self): f = wpi.contrast with self.rose.clone() as t: f(t,", "t.gravity = 'north_west' f(t, 0, 00, 200, 200) save(t, f)", "= 'Arial' d.font_size = 24 d.gravity = 'center' d.text(0, 0,", "p) as q: save(q, f) def test_swirl(self): f = wpi.swirl", "function.__name__ + ext # print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def", "test_splice(self): f = wpi.splice with self.rose.clone() as t: t.gravity =", "an image which has clipping path with self.rose.clone() as t:", "5, 20) save(t, f) def test_selectiveblur(self): f = wpi.selectiveblur with", "t.sequence[0].clone() as mag: with t.sequence[1].clone() as phase: wpi.blur(mag, 0, 0.5)", "= 1 + 2 + 4 # R + G", "= Color('black') white = Color('white') with Image(width=w, height=w, background=black) as", "f = wpi.sigmoidalcontrast with self.rose.clone() as t: f(t, True, 3,", "self.rose.clone() as t: f(t, 0.4*t.quantum_range) save(t, f) with self.rose.clone() as", "wpi.decipher f(t, 'password') save(t, f) def test_deskew(self): f = wpi.deskew", "10, 20, 20) save(t, f) def test_wave(self): f = wpi.wave", "with self.logo.clone() as t: kernel = [ 0.5, 0.0, 0.0,", "t: f(t, 5.0, 3.0) save(t, f) with self.rose.clone() as t:", "setUpClass(self): os.mkdir(tmpdir) self.rose = Image(filename='rose:') self.grad = Image(filename='gradient:', width=400, height=400)", "test_cipher(self): f = wpi.encipher with self.rose.clone() as t: f(t, 'password')", "f(t) save(t, f) def test_edge(self): f = wpi.edge with self.logo.clone()", "test_fontsize(self): w = 100 h = 100 with Drawing() as", "wpi.sepiatone with self.rose.clone() as t: f(t, 0.5*t.quantum_range) save(t, f) def", "0.95)) save(t, f) with self.text_a.clone() as t: rng = t.quantum_range", "save(t, f, True) def test_shave(self): f = wpi.shave with self.logo.clone()", "as t: f(t, 5.0, 3.0) save(t, f) with self.rose.clone() as", "Image(width=h, height=h, background=white) as w: wpi.add(t, b) # add image", "in range(rows * columns): wpi.add(dst, base) tile = \"{0}x{1}+0+0\".format(columns, rows)", "f = wpi.sparsecolor with Image(width=100, height=100, background=Color('black')) as t: f(t,", "f) with self.rose.clone() as t: f(t, 3, 3, kernel, channel='red')", "Color('red'), 0, 10) save(t, f) def test_sigmoidalcontrast(self): f = wpi.sigmoidalcontrast", "wpi.convolve kernel = [1/16, 2/16, 1/16, 2/16, 4/16, 2/16, 1/16,", "f = wpi.charcoal with self.rose.clone() as t: f(t, 5, 1)", "save(t, f) with self.logo.clone() as t: f(t, Color('red'), Color('blue'), 1.0,", "= wpi.blackthreshold with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f)", "= wpi.affinetransform with self.rose.clone() as t: with Drawing() as d:", "f = wpi.montage with self.rose.clone() as base: with Image() as", "calcSuitableFontsize, calcSuitableImagesize import os import unittest tmpdir = '_tmp/' def", "60) with f(t, 5) as q: save(q, f, ext='.gif') def", "wpi.brightnesscontrast with self.rose.clone() as t: f(t, -30, 0) save(t, f)", "f) def test_selectiveblur(self): f = wpi.selectiveblur with self.logo.clone() as t:", "-3) as p: save(p, f) def test_solarize(self): f = wpi.solarize", "as d: f(t, d, 1.0) save(t, f) def test_posterize(self): f", "self.rose = Image(filename='rose:') self.grad = Image(filename='gradient:', width=400, height=400) self.logo =", "t: f(t, Color('gray(50%)')) save(t, f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with", "metric='absolute') save(c, f) c.destroy() with self.rose.clone() as t: with t.clone()", "\"frame\" with Drawing() as d: with f(dst, d, tile, thumb,", "import Drawing from wand.color import Color import wandplus.image as wpi", "2/16, 1/16] with self.rose.clone() as t: f(t, 3, kernel) save(t,", "self.rose.clone() as t: # TODO: add speckle noise f(t) save(t,", "def test_charcoal(self): f = wpi.charcoal with self.rose.clone() as t: f(t,", "as sample save(t, f) def test_despeckle(self): f = wpi.despeckle with", "0, 255, 0] f(t, 1, 1, w, h, 'RGB', 'char',", "image for blue channel wpi.setfirstiterator(t) # rewind the index pointer", "self.logo.clone() as t: f(t, 0, 3) save(t, f) def test_enhance(self):", "with self.rose.clone() as t: with Drawing() as d: d.affine([2.0, 0.0,", "'RGB' with Image(width=w, height=h, background=Color('red')) as t: r = wpi.exportpixels(t,", "save(t, f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing() as d:", "path = tmpdir + function.__name__ + \"_ch\" + ext else:", "1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0, ] with self.rose.clone()", "#!/usr/bin/env python from wand.image import Image from wand.drawing import Drawing", "as t: kernel = [ 0.5, 0.0, 0.0, 0.0, 0.0,", "wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize import os import unittest tmpdir =", "h = 2 b = [0, 0, 0, 255, 255,", "q: save(q, f) def test_thumbnail(self): f = wpi.thumbnail with self.logo.clone()", "0, 0.5) # as degradation t2 = mag f(t2, phase,", "f(t, dpi, dpi, 'lanczos', 1.0) save(t, f) def test_roll(self): f", "f(t, int(t.width*1.5), int(t.height*2.0)) save(t, f) def test_adaptivesharpen(self): f = wpi.adaptivesharpen", "self.rose.clone() as t: f(t, p) save(t, f) with self.rose.clone() as", "save(t, f) def test_texture(self): f = wpi.texture with Image(width=300, height=200)", "def test_adaptivesharpen(self): f = wpi.adaptivesharpen with self.rose.clone() as t: f(t,", "f) c.destroy() with self.rose.clone() as t: with t.clone() as p:", "save(t, f) with self.grad.clone() as t: f(t, 'o4x4,3,3', channel='red') save(t,", "def test_sepiatone(self): f = wpi.sepiatone with self.rose.clone() as t: f(t,", "f) def test_texture(self): f = wpi.texture with Image(width=300, height=200) as", "= calcSuitableFontsize(d, text, width=w) print('calcSuitableImagesize[W]: ', fontsize) self.assertTrue(fontsize > 0)", "with f(t, channel) as q: save(q, f) def test_comment(self): f", "self.logo.clone() as t: f(t, 20, 20, 0.5*t.quantum_range, channel='red') save(t, f,", "save(t, f, True) def test_coalesce(self): # TODO: input optimized .gif", "= wpi.compare with self.rose.clone() as t: with t.clone() as p:", "Image(width=w, height=w, background=black) as b: with Image(width=h, height=h, background=white) as", "'north_west' f(t, 0, 00, 200, 200) save(t, f) def test_clamp(self):", "ext # print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir)", "= wpi.adaptivethreshold with self.logo.clone() as t: f(t, 20, 20, int(0.1*t.quantum_range))", "0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0,", "height=25, background=Color('green1')) as q: for i in range(4): with q.clone()", "test_importpixels(self): f = wpi.importpixels with Image(width=4, height=4, background=Color('red')) as t:", "with self.rose.clone() as t: f(t, 5, 5) save(t, f) with", "f = wpi.smush def makeletter(letter, w, h): img = Image(width=w,", "'center' f(t, t.width//2, t.height//2, 20, 20) save(t, f) def test_sparsecolor(self):", "wpi.posterize with self.rose.clone() as t: f(t, 3, True) save(t, f)", "f(t, p) as q: save(q, f) def test_swirl(self): f =", "save(t, f) def test_clippath(self): # NOTE: result is always FAILED.", "as t: f(t, p) save(t, f) with self.rose.clone() as t:", "as t: f(t, True, 3, 3) save(t, f) with self.rose.clone()", "as t: f(t, 'o4x4,3,3') save(t, f) with self.grad.clone() as t:", "p: save(p, f) def test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\">", "height=60) with Drawing() as draw: draw.font = 'Arial' draw.font_size =", "draw.text(0, 0, 'A') draw(self.text_a) self.rose.save(filename=tmpdir + 'rose.png') self.grad.save(filename=tmpdir + 'grad.png')", "Drawing() as d: f(t, d, 1.0) save(t, f) def test_posterize(self):", "f(t, Color('red'), Color('gray(25%)')) save(t, f) def test_colormatrix(self): f = wpi.colormatrix", "self.rose.clone() as t: with Drawing() as d: d.affine([2.0, 0.0, 0.0,", "def test_haldclut(self): f = wpi.haldclut # TODO: more useful code", "def test_shave(self): f = wpi.shave with self.logo.clone() as t: f(t,", "self.grad.clone() as t: f(t, Color('red'), 0, 10) save(t, f) def", "NOTE: result is always FAILED. f = wpi.clip # I", "Image(filename='hald:12') as p: with self.rose.clone() as t: f(t, p) save(t,", "def test_tint(self): f = wpi.tint with self.rose.clone() as t: f(t,", "as t: f(t) save(t, f) with self.rose.clone() as t: f(t,", "self.rose.clone() as t: f(t, 45, channel='red') save(t, f, True) def", "f(t, 3, 3, kernel) save(t, f) with self.rose.clone() as t:", "self.rose.clone() as t: f(t, Color('rgb'), Color('gray(25%)')) save(t, f) def test_vignette(self):", "= 'north_west' f(t, 0, 00, 200, 200) save(t, f) def", "f) def test_swirl(self): f = wpi.swirl with self.rose.clone() as t:", "as p: (c, d) = f(t, p, metric='absolute') save(c, f)", "wpi.magnify with self.rose.clone() as t: f(t) save(t, f) def test_minify(self):", "with self.grad.clone() as t: f(t, Color('gray(50%)')) save(t, f) def test_blueshift(self):", "t: f(t, 'gaussian') save(t, f) with self.grad.clone() as t: f(t,", "0.05), int(rng * 0.95), channel='red') save(t, f, True) def test_remap(self):", "f(t, 'dilate', 1, 'Diamond') save(t, f) with self.logo.clone() as t:", "t: f(t, 'hello') save(t, f) def test_localcontrast(self): f = wpi.localcontrast", "dpi, dpi, 'lanczos', 1.0) save(t, f) def test_roll(self): f =", "with self.rose.clone() as t: with t.clone() as p: (c, d)", "tile, thumb, mode, frame) as result: save(result, f) def test_morph(self):", "as t: f(t, 3, kernel) save(t, f) with self.rose.clone() as", "10, 45) save(t, f) def test_smush(self): f = wpi.smush def", "file. f = wpi.coalesce with Image() as t: with self.rose.clone()", "'Arial' draw.font_size = 50 draw.gravity = 'center' draw.fill_color = Color('white')", "f = wpi.extent with self.rose.clone() as t: t.gravity = 'center'", "p) save(t, f) with self.rose.clone() as t: f(t, p, channel='red')", "NOTE: result is always FAILED. f = wpi.clippath with self.rose.clone()", "with Image(width=80, height=40, background=Color('black')) as t: f(t, 0.5*t.quantum_range) # TODO:", "channel='red') save(t, f, True) def test_implode(self): f = wpi.implode with", "wpi.setdelay(t, 60) with f(t, 5) as q: save(q, f, ext='.gif')", "q.read(filename='stegano:' + tmpfile) save(q, f) except Exception: raise finally: os.remove(tmpfile)", "with self.rose.clone() as t: f(t) save(t, f) with self.rose.clone() as", "with self.rose.clone() as t: f(t, 0.4*t.quantum_range) save(t, f) with self.rose.clone()", "with Image(width=w, height=h, background=Color('white')) as p: with Drawing() as d:", "f(t, 5, 5) save(t, f) with self.rose.clone() as t: f(t,", "self.rose.clone() as t: t.gravity = 'center' t.background_color = Color('blue') f(t,", "-2.0, 0.0, 2.0, -1.0, 0.0, 1.0, ] with self.rose.clone() as", "f(t, 'rgb', False, 5, 20) save(t, f) def test_selectiveblur(self): f", "f(t, xml) save(t, f) def test_colorize(self): f = wpi.colorize with", "def test_clamp(self): f = wpi.clamp # TODO: more useful code", "t: f(t, 5.0, 3.0, channel='red') save(t, f, True) def test_adaptiveresize(self):", "I don't have an image which has clipping path with", "1.5 </Power> </SOPNode> <SATNode> <Saturation> 0.85 </Saturation> </SATNode> </ColorCorrection> </ColorCorrectionCollection>", "with Image(width=4, height=4, background=Color('red')) as t: w = 2 h", "t.gravity = 'center' t.background_color = Color('blue') f(t, -10, -10, t.width+20,", "# NOTE: result is always FAILED. f = wpi.clippath with", "Image(filename='plasma:', width=100, height=100) as t: f(t) save(t, f) def test_equalize(self):", "wpi.implode with self.rose.clone() as t: f(t, 1.0) save(t, f) def", "200) save(t, f) def test_whitethreshold(self): f = wpi.whitethreshold with self.grad.clone()", "f) class CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing() as d: text", "FAILED. f = wpi.clip # I don't have an image", "tearDownClass(self): self.rose.destroy() self.grad.destroy() self.logo.destroy() self.text.destroy() self.text_a.destroy() def test_adaptiveblur(self): f =", "= wpi.edge with self.logo.clone() as t: f(t, 3) save(t, f)", "frame) as result: save(result, f) def test_morph(self): f = wpi.morph", "255, 0] f(t, 1, 1, w, h, 'RGB', 'char', b)", "= wpi.splice with self.rose.clone() as t: t.gravity = 'center' f(t,", "def test_implode(self): f = wpi.implode with self.rose.clone() as t: f(t,", "test_addnoise(self): f = wpi.addnoise with self.grad.clone() as t: f(t, 'gaussian')", "-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0, ]", "self.logo.clone() as t: f(t, True, 45, 135) save(t, f) def", "f = wpi.convolve kernel = [1/16, 2/16, 1/16, 2/16, 4/16,", "\"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode> <Slope> 0.9 1.2 0.5", "test_floodfillpaint(self): f = wpi.floodfillpaint with self.logo.clone() as t: f(t, Color('green'),", "save(t, f) def test_tint(self): f = wpi.tint with self.rose.clone() as", "def test_blackthreshold(self): f = wpi.blackthreshold with self.grad.clone() as t: f(t,", "f, True) def test_shave(self): f = wpi.shave with self.logo.clone() as", "channel='red') save(t, f, True) def test_separate_channel(self): f = wpi.separate_channel with", "t: f(t) save(t, f) with self.rose.clone() as t: f(t, channel='red')", "save(t, f, True) def test_splice(self): f = wpi.splice with self.rose.clone()", "h, channels, 'double') self.assertEqual(r[0], 1.0) self.assertEqual(r[1], 0.0) self.assertEqual(r[2], 0.0) def", "= wpi.polaroid with self.logo.clone() as t: with Drawing() as d:", "0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,", "with self.rose.clone() as t: f(t, 10, 10, 10, 10, True)", "test_adaptivethreshold(self): f = wpi.adaptivethreshold with self.logo.clone() as t: f(t, 20,", "self.logo.clone() as t: f(t, 10, 10, 45) save(t, f) def", "0] f(t, w, h, 'RGB', 'char', b) save(t, f) def", "t: f(t, t.width*2, t.height*2) save(t, f) def test_segment(self): f =", "Drawing() as d: text = 'check' d.font = 'Arial' fontsize", "= 100 h = 100 black = Color('black') white =", "return img with Image() as t: with makeletter('A', 50, 30)", "f = wpi.magnify with self.rose.clone() as t: f(t) save(t, f)", "blue channel wpi.setfirstiterator(t) # rewind the index pointer channel =", "True, 45, 135) save(t, f) def test_shadow(self): f = wpi.shadow", "0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5, 0.0, 0.0, 0.0,", "kernel) save(t, f) with self.rose.clone() as t: f(t, 3, 3,", "def test_roll(self): f = wpi.roll with self.rose.clone() as t: f(t,", "w) # add image for blue channel wpi.setfirstiterator(t) # rewind", "f) def test_statistic(self): f = wpi.statistic with self.rose.clone() as t:", "as t: f(t, 10, 10) save(t, f) def test_rotationalblur(self): f", "f) # includes two images(magnitude&phase) f = wpi.inversefouriertransform with t.sequence[0].clone()", "as t: f(t, '#1', True) save(t, f) def test_clut(self): f", "build option '--with-fftw' with self.logo.clone() as t: # I couldn't", "save(t, f) def test_rotationalblur(self): f = wpi.rotationalblur with self.rose.clone() as", "t.background_color = Color('black') f(t, 0, 10, 20, 20) save(t, f)", "as q: save(q, f) def test_comment(self): f = wpi.comment with", "self.rose.clone() as t: f(t, 45) save(t, f) with self.rose.clone() as", "makeletter('A', 50, 30) as a: with makeletter('B', 50, 30) as", "skewed image as sample save(t, f) def test_despeckle(self): f =", "save(t, f) with self.rose.clone() as t: f(t, 5, 5, channel='red')", "'check' d.font = 'Arial' d.font_size = 36 size = calcSuitableImagesize(d,", "t: f(t, channel='red') save(t, f, True) def test_autolevel(self): f =", "print(path) img.save(filename=path) class CheckImage(unittest.TestCase): @classmethod def setUpClass(self): os.mkdir(tmpdir) self.rose =", "f) with self.logo.clone() as t: f(t, 'dilate', 1, 'Diamond', channel='red')", "f) def test_roll(self): f = wpi.roll with self.rose.clone() as t:", ".gif file. f = wpi.coalesce with Image() as t: with", "save(t2, f) def test_haldclut(self): f = wpi.haldclut # TODO: more", "f(t, 'password') save(t, f) f = wpi.decipher f(t, 'password') save(t,", "[ 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5, 0.0, 0.0,", "input optimized .gif file. f = wpi.coalesce with Image() as", "36 size = calcSuitableImagesize(d, text) print('calcSuitableImagesize: ', size) self.assertTrue(size[0] >", "t: f(t) save(t, f) def test_clippath(self): # NOTE: result is", "as t: f(t) save(t, f) def test_clippath(self): # NOTE: result", "with self.logo.clone() as t: f(t, True, 45, 135) save(t, f)", "t: f(t, channel='red') save(t, f, True) def test_exportpixels(self): w =", "test_haldclut(self): f = wpi.haldclut # TODO: more useful code with", "test_oilpaint(self): f = wpi.oilpaint with self.rose.clone() as t: f(t, 2.0)", "self.rose.clone() as t: f(t, t.width*2, t.height*2) save(t, f) def test_segment(self):", "= wpi.solarize with self.rose.clone() as t: f(t, 0.4*t.quantum_range) save(t, f)", "draw.fill_color = Color('white') draw.stroke_color = Color('black') draw.text(0, 0, 'A') draw(self.text_a)", "def test_colordecisionlist(self): xml = \"\"\" <ColorCorrectionCollection xmlns=\"urn:ASC:CDL:v1.2\"> <ColorCorrection id=\"cc03345\"> <SOPNode>", "f = wpi.sepiatone with self.rose.clone() as t: f(t, 0.5*t.quantum_range) save(t,", "t: f(t, True, 45, 135) save(t, f) def test_shadow(self): f", "= wpi.clamp # TODO: more useful code with self.rose.clone() as", "with self.rose.clone() as t: f(t, 10, 10) save(t, f) def", "save(t, f, True) def test_adaptiveresize(self): f = wpi.adaptiveresize with self.rose.clone()", "f(t, 5, 5, channel='red') save(t, f, True) def test_adaptivethreshold(self): f", "f = wpi.autogamma with self.rose.clone() as t: f(t) save(t, f)", "kernel, channel='red') save(t, f, True) def test_cyclecolormap(self): f = wpi.cyclecolormap", "= Color('black') f(t, 0, 10, 20, 20) save(t, f) def", "CheckTextUtil(unittest.TestCase): def test_imagesize(self): with Drawing() as d: text = 'check'", "def test_cipher(self): f = wpi.encipher with self.rose.clone() as t: f(t,", "False, channel='blue') save(t, f, True) def test_orderedposterize(self): f = wpi.orderedposterize", "Color('gray(25%)')) save(t, f) def test_vignette(self): f = wpi.vignette with self.logo.clone()", "def test_morph(self): f = wpi.morph color = Color('white') with self.rose.clone()", "0.4*t.quantum_range) save(t, f) with self.rose.clone() as t: f(t, 0.4*t.quantum_range, channel='red')", "True) def test_splice(self): f = wpi.splice with self.rose.clone() as t:", "test_polaroid(self): f = wpi.polaroid with self.logo.clone() as t: with Drawing()", "wpi.setfirstiterator(t) # rewind the index pointer channel = 1 +", "image as sample save(t, f) def test_despeckle(self): f = wpi.despeckle", "f, True) def test_adaptiveresize(self): f = wpi.adaptiveresize with self.rose.clone() as", "'gradient', 4, 4, channel='red') save(t, f, True) def test_stegano(self): f", "45, channel='red') save(t, f, True) def test_oilpaint(self): f = wpi.oilpaint", "wpi.colordecisionlist with self.rose.clone() as t: f(t, xml) save(t, f) def", "channel='red') save(c, f, True) c.destroy() def test_comparelayer(self): f = wpi.comparelayer", "else: path = tmpdir + function.__name__ + ext # print(path)", "f(t, 3) save(t, f) def test_emboss(self): f = wpi.emboss with", "f = wpi.encipher with self.rose.clone() as t: f(t, 'password') save(t,", "h = 100 black = Color('black') white = Color('white') with", "self.grad = Image(filename='gradient:', width=400, height=400) self.logo = Image(filename='logo:') self.text =", "with self.rose.clone() as t: f(t, 0, 3, channel='red') save(t, f,", "as t: f(t, 'dilate', 1, 'Diamond') save(t, f) with self.logo.clone()", "def test_adaptiveblur(self): f = wpi.adaptiveblur with self.rose.clone() as t: f(t,", "4/16, 2/16, 1/16, 2/16, 1/16] with self.rose.clone() as t: f(t,", "t: with makeletter('A', 50, 30) as a: with makeletter('B', 50,", "TODO: add speckle noise f(t) save(t, f) def test_edge(self): f", "f) def test_contrast(self): f = wpi.contrast with self.rose.clone() as t:", "1.0, False, channel='blue') save(t, f, True) def test_orderedposterize(self): f =", "as t: f(t, 5) save(t, f) def test_cipher(self): f =", "* 0.95)) save(t, f) with self.text_a.clone() as t: rng =", "= wpi.spread with self.logo.clone() as t: f(t, 20) save(t, f)", "for i in range(5): wpi.blur(p, 0, 1) wpi.add(t, p) with", "f(t, 3, kernel) save(t, f) with self.rose.clone() as t: f(t,", "= wpi.sparsecolor with Image(width=100, height=100, background=Color('black')) as t: f(t, 'default_channels',", "save(t, f) def test_compare(self): f = wpi.compare with self.rose.clone() as", "option '--with-fftw' with self.logo.clone() as t: # I couldn't build", "t: f(t) save(t, f) def test_equalize(self): f = wpi.equalize with", "background=black) as b: with Image(width=h, height=h, background=white) as w: wpi.add(t,", "t: # TODO: add speckle noise f(t) save(t, f) def", "'dilate', 1, 'Diamond', channel='red') save(t, f, True) def test_motionblur(self): f", "f = wpi.randomthreshold with self.text_a.clone() as t: rng = t.quantum_range", "= wpi.blur with self.rose.clone() as t: f(t, 0, 3) save(t,", "t.width*2, t.height*2) save(t, f) def test_segment(self): f = wpi.segment with", "with self.logo.clone() as t: f(t, 3) save(t, f) def test_emboss(self):", "test_clip(self): # NOTE: result is always FAILED. f = wpi.clip", "with Image(width=w, height=w, background=black) as b: with Image(width=h, height=h, background=white)", "f) with self.grad.clone() as t: f(t, p, channel='green') save(t, f,", "= wpi.cyclecolormap with self.logo.clone() as t: f(t, 5) save(t, f)", "q: save(q, f) def test_swirl(self): f = wpi.swirl with self.rose.clone()", "f = wpi.combine with Image() as t: w = 100", "t: f(t, 3, kernel, channel='red') save(t, f, True) def test_cyclecolormap(self):", "f = wpi.cyclecolormap with self.logo.clone() as t: f(t, 5) save(t,", "f = wpi.forwardfouriertransform # require IM build option '--with-fftw' with", "= wpi.combine with Image() as t: w = 100 h", "draw: draw.font = 'Arial' draw.font_size = 50 draw.gravity = 'center'", "= 40 offset = 15 tmpfile = 'tmp.png' with Image(width=w,", "background=Color('green1')) as q: for i in range(4): with q.clone() as", "f) def test_opaquepaint(self): f = wpi.opaquepaint with self.logo.clone() as t:", "as t: # I couldn't build on Windows... f(t, True)", "self.text.clone() as p: p.negate() f(p, 100, 2, 10, 10) t.composite_channel('default_channels',", "save(t, f) def test_sharpen(self): f = wpi.sharpen with self.rose.clone() as", "f) with self.rose.clone() as t: f(t, channel='red') save(t, f, True)", "<SOPNode> <Slope> 0.9 1.2 0.5 </Slope> <Offset> 0.4 -0.5 0.6", "as t: with Drawing() as d: d.affine([2.0, 0.0, 0.0, 2.0,", "def test_clippath(self): # NOTE: result is always FAILED. f =", "1/16, 2/16, 4/16, 2/16, 1/16, 2/16, 1/16] with self.rose.clone() as" ]
[ "half precision assert ek.allclose(normals[0:3], [-1, 0, 0]) assert ek.allclose(normals[3:6], [-1,", "m.parameters_changed() assert str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox =", "mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb): from", "== \"\"\"Mesh[ name = \"MyMesh\", bbox = BoundingBox3f[ min =", "from mitsuba.core.xml import load_string def test(): shape = load_string(\"\"\" <shape", "value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions = m.vertex_positions_buffer() faces", "(uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2,", "= BoundingBox3f[ min = [0, 0, 0], max = [0,", "m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0,", "positions = m.vertex_positions_buffer() faces = m.faces_buffer() assert not m.has_vertex_normals() assert", "<shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions =", "type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals = shape.vertex_normals_buffer()", "faces = [24 B of face data], disable_vertex_normals = 0,", "<shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m)", "0, -a, 1, 0, a, 1, 0, -b, 0, 1,", "= Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals =", "0] m.parameters_changed() assert str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox", "<shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\"", "3 floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct,", "= Vector3f(0.0, 1.0, 0.0) angle_0 = ek.pi / 2.0 angle_1", "from mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex normal computation for a", "for a PLY file that doesn't have them.\"\"\" shape =", "= load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\")", "ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from", "</shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions", "from mitsuba.core.xml import load_string \"\"\"Tests the OBJ and PLY loaders", "load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert", "Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2,", "from mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct,", "as np \"\"\"Tests the weighting scheme that is used to", "mesh_format in [\"obj\", \"ply\"]: shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\">", "== \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox = BoundingBox3f[ min =", "[72 B of vertex data], face_count = 2, faces =", "def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype, Vector3f from mitsuba.render", "atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589,", "4] m.recompute_vertex_normals() for i in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i],", "surface_area = 0, mesh attributes = [ face_color: 3 floats", "1.0, 0.5 vertices[:] = [0, 0, 0, -a, 1, 0,", "5.0) n2 = n0 * angle_0 + n1 * angle_1", "a simple example.\"\"\" for mesh_format in [\"obj\", \"ply\"]: shape =", "a PLY file that doesn't have them.\"\"\" shape = load_string(\"\"\"", "atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127],", "mitsuba.core import Vector3f from mitsuba.core.xml import load_string m = load_string(\"\"\"", "bbox = BoundingBox3f[ min = [0, 0, 0], max =", "import fresolver_append_path from mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core", "'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals):", "vertex normal computation for a PLY file that doesn't have", "1] ], vertex_count = 3, vertices = [72 B of", "n0 * angle_0 + n1 * angle_1 n2 /= ek.norm(n2)", "1, 2, 0] m.parameters_changed() assert str(m) == \"\"\"Mesh[ name =", "]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render", "assert ek.allclose(normals[6:9], [-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import", "\"\"\") positions = m.vertex_positions_buffer() faces = m.faces_buffer() assert not m.has_vertex_normals()", "that doesn't have them.\"\"\" shape = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\">", "= [ face_color: 3 floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from", "= [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]", "etc. \"\"\" from mitsuba.core.xml import load_string def test(): shape =", "@fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import", "normals = m.vertex_normals_buffer() a, b = 1.0, 0.5 vertices[:] =", "test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import load_string \"\"\"Checks(automatic)", "] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from", "1.0, 0.0] assert str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox", "= ek.acos(3.0 / 5.0) n2 = n0 * angle_0 +", "is flipped. if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589,", "(v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2,", "from mitsuba.core import Vector3f from mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex", "-b, 0, 1, b, 0, 1] n0 = Vector3f(0.0, 0.0,", "test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32, Vector3f from mitsuba.core.xml import load_string", "* angle_0 + n1 * angle_1 n2 /= ek.norm(n2) n", "def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests the OBJ & PLY", "Vector3f(0.0, 1.0, 0.0) angle_0 = ek.pi / 2.0 angle_1 =", "\"\"\"Mesh[ name = \"MyMesh\", bbox = BoundingBox3f[ min = [0,", "m.has_vertex_normals() assert ek.slices(positions) == 9 assert ek.allclose(positions[0:3], [0, 0, 0])", "from mitsuba.core import Struct, float_dtype from mitsuba.render import Mesh m", "5, 2, has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a,", "assert ek.allclose(positions[6:9], [0, 1, 0]) assert ek.slices(faces) == 3 assert", "ek.allclose(positions[3:6], [0, 0, 1]) assert ek.allclose(positions[6:9], [0, 1, 0]) assert", "0, a, 1, 0, -b, 0, 1, b, 0, 1]", "</shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces = shape.faces_buffer() assert shape.has_vertex_normals()", "= 0.96, mesh attributes = [ vertex_color: 3 floats ]", "load_string m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/>", "v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]]", "[0, 0, 0, -a, 1, 0, a, 1, 0, -b,", "0], max = [1, 1, 0] ], vertex_count = 3,", "generated from OBJ), UV.y is flipped. if mesh_format in ['obj',", "else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127],", "[0, 1, 1] ], vertex_count = 3, vertices = [72", "== UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f from", "data], face_count = 1, faces = [24 B of face", "36 assert ek.allclose(faces[6:9], [4, 5, 6]) assert ek.allclose(positions[:5], [130, 165,", "</shape> \"\"\") normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals are", "1.0, 0.0] m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]", "[72 B of vertex data], face_count = 1, faces =", "PLY loaders on a simple example.\"\"\" for mesh_format in [\"obj\",", "UVs, etc. \"\"\" from mitsuba.core.xml import load_string def test(): shape", "are stored in half precision assert ek.allclose(normals[0:3], [-1, 0, 0])", "Float32 as Float from mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util import", "m = Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals", "= [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]", "[0, 0, 0], max = [1, 1, 0] ], vertex_count", "angle_1 n2 /= ek.norm(n2) n = np.vstack([n2, n0, n0, n1,", "OBJs (and .serialized generated from OBJ), UV.y is flipped. if", "m = Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0,", "version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m) == \"\"\"PLYMesh[", "have them.\"\"\" shape = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\"", "in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb):", "value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces = shape.faces_buffer() assert", "m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0,", "assert faces[0] == UInt32(0) assert faces[1] == UInt32(1) assert faces[2]", "in [0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return", "np \"\"\"Tests the weighting scheme that is used to compute", "0] ], vertex_count = 3, vertices = [36 B of", ".serialized generated from OBJ), UV.y is flipped. if mesh_format in", "vertex_count = 3, vertices = [72 B of vertex data],", "mitsuba.core import Vector3f from mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex normal", "/ 5.0) n2 = n0 * angle_0 + n1 *", "[0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3,", "atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127],", "0]) assert ek.slices(faces) == 3 assert faces[0] == UInt32(0) assert", "ek.allclose(normals[6:9], [-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct,", "mitsuba.core import Struct, float_dtype from mitsuba.render import Mesh m =", "str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox = BoundingBox3f[ min", "0], max = [0, 1, 1] ], vertex_count = 3,", "</shape> \"\"\") positions = m.vertex_positions_buffer() faces = m.faces_buffer() assert not", "<boolean name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals()", "import load_string m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\"", "0, 1, b, 0, 1] n0 = Vector3f(0.0, 0.0, -1.0)", "str(m) == \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox = BoundingBox3f[ min", "\"\"\"Tests the OBJ & PLY loaders with combinations of vertex", "/ face normals, presence and absence of UVs, etc. \"\"\"", "min = [0, 0, 0], max = [0, 1, 1]", "PLY loaders with combinations of vertex / face normals, presence", "0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0,", "def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32, Vector3f from mitsuba.core.xml import", "= [0, 1, 2, 1, 2, 0] m.parameters_changed() assert str(m)", "load_string \"\"\"Checks(automatic) vertex normal computation for a PLY file that", "shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer()", "if 'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) =", "== UInt32(0) assert faces[1] == UInt32(1) assert faces[2] == UInt32(2)", "[positions[i*3:(i+1)*3] for i in [0, 2, 3]] assert ek.allclose(v0, [-2.85,", "1.0, 0.2, 0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:] = [0, 1,", "disable_vertex_normals = 0, surface_area = 0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb):", "'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests", "name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features,", "[0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)()", "[ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in features: assert", "assert ek.slices(positions) == 72 assert ek.slices(faces) == 36 assert ek.allclose(faces[6:9],", "[1, 1, 0] ], vertex_count = 3, vertices = [72", "name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions = m.vertex_positions_buffer()", "vertices[:] = [0, 0, 0, -a, 1, 0, a, 1,", "ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3)", "2, faces = [24 B of face data], disable_vertex_normals =", "shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0,", "0]) assert ek.allclose(normals[3:6], [-1, 0, 0]) assert ek.allclose(normals[6:9], [-1, 0,", "data], disable_vertex_normals = 0, surface_area = 0, mesh attributes =", "0.96, mesh attributes = [ vertex_color: 3 floats ] ]\"\"\"", "np.vstack([n2, n0, n0, n1, n1]).transpose() m.faces_buffer()[:] = [0, 1, 2,", "faces[0] == UInt32(0) assert faces[1] == UInt32(1) assert faces[2] ==", "value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m) == \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\",", "82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv'])", "\"\"\") normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals are stored", "0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:] = [0,", "ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3]", "test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string \"\"\"Tests the OBJ and PLY", "Struct, float_dtype, Vector3f from mitsuba.render import Mesh import numpy as", "], vertex_count = 3, vertices = [72 B of vertex", "doesn't have them.\"\"\" shape = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string", "name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals() #", "normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0,", "face_count = 2, faces = [24 B of face data],", "= 0, surface_area = 0.96, mesh attributes = [ vertex_color:", "ek.allclose(positions[:5], [130, 165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized'])", "72 assert ek.slices(faces) == 36 assert ek.allclose(faces[6:9], [4, 5, 6])", "in [0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3)", "ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if", "3, 4] m.recompute_vertex_normals() for i in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:,", "\"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces = shape.faces_buffer() assert shape.has_vertex_normals() assert", "= 0, mesh attributes = [ face_color: 3 floats ]", "assert shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer() normals =", "float_dtype from mitsuba.render import Mesh m = Mesh(\"MyMesh\", 3, 2)", "from mitsuba.render import Mesh import numpy as np \"\"\"Tests the", "texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3) =", "simple example.\"\"\" for mesh_format in [\"obj\", \"ply\"]: shape = load_string(\"\"\"", "0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i", "<string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions =", "ek.acos(3.0 / 5.0) n2 = n0 * angle_0 + n1", "UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml", "1, 0, a, 1, 0, -b, 0, 1, b, 0,", "def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render import", "n = np.vstack([n2, n0, n0, n1, n1]).transpose() m.faces_buffer()[:] = [0,", "0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589,", "import Vector3f from mitsuba.core.xml import load_string m = load_string(\"\"\" <shape", "0.0, 0.0, 1.0, 1.0, 0.0] assert str(m) == \"\"\"Mesh[ name", "= \"MyMesh\", bbox = BoundingBox3f[ min = [0, 0, 0],", "0, surface_area = 0.96, mesh attributes = [ vertex_color: 3", "= 1, faces = [24 B of face data], disable_vertex_normals", "\"triangle_face_colors.ply\", bbox = BoundingBox3f[ min = [0, 0, 0], max", "assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in", "uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]] #", "1, 2, 1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0,", "max = [1, 1, 0] ], vertex_count = 3, vertices", "shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0,", "from enoki.dynamic import Float32 as Float from mitsuba.python.test.util import fresolver_append_path", "1.0, 0.0) angle_0 = ek.pi / 2.0 angle_1 = ek.acos(3.0", "assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416],", "face data], disable_vertex_normals = 0, surface_area = 0.96, mesh attributes", "features, face_normals): \"\"\"Tests the OBJ & PLY loaders with combinations", "# Normals are stored in half precision assert ek.allclose(normals[0:3], [-1,", "load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\"", "for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]:", "['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105,", "B of vertex data], face_count = 1, faces = [24", "= Vector3f(0.0, 0.0, -1.0) n1 = Vector3f(0.0, 1.0, 0.0) angle_0", "-1.0) n1 = Vector3f(0.0, 1.0, 0.0) angle_0 = ek.pi /", "v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]] assert", "from mitsuba.core import Vector3f from mitsuba.core.xml import load_string m =", "test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render import Mesh", "version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions", "and absence of UVs, etc. \"\"\" from mitsuba.core.xml import load_string", "precision assert ek.allclose(normals[0:3], [-1, 0, 0]) assert ek.allclose(normals[3:6], [-1, 0,", "\"\"\"Tests the OBJ and PLY loaders on a simple example.\"\"\"", "from mitsuba.core import UInt32, Vector3f from mitsuba.core.xml import load_string m", "= [0, 0, 0], max = [1, 1, 0] ],", "type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\")", "max = [0, 1, 1] ], vertex_count = 3, vertices", "3 assert faces[0] == UInt32(0) assert faces[1] == UInt32(1) assert", "/ 2.0 angle_1 = ek.acos(3.0 / 5.0) n2 = n0", "# For OBJs (and .serialized generated from OBJ), UV.y is", "i in [0, 2, 3]] # For OBJs (and .serialized", "2, has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a, b", "== 72 assert ek.slices(faces) == 36 assert ek.allclose(faces[6:9], [4, 5,", "0, -b, 0, 1, b, 0, 1] n0 = Vector3f(0.0,", "load_string def test(): shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string", "of UVs, etc. \"\"\" from mitsuba.core.xml import load_string def test():", "1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589,", "n2 /= ek.norm(n2) n = np.vstack([n2, n0, n0, n1, n1]).transpose()", "positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces", "(not face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords =", "ek.norm(n2) n = np.vstack([n2, n0, n0, n1, n1]).transpose() m.faces_buffer()[:] =", "\"\"\" from mitsuba.core.xml import load_string def test(): shape = load_string(\"\"\"", "test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import load_string m", "0.5 vertices[:] = [0, 0, 0, -a, 1, 0, a,", "assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85,", "0.2, 1.0, 0.0] m.faces_buffer()[:] = [0, 1, 2, 1, 2,", "= load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\"", "[1, 1, 0] ], vertex_count = 3, vertices = [36", "atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n", "vertex data], face_count = 1, faces = [24 B of", "ek.allclose(normals[3:6], [-1, 0, 0]) assert ek.allclose(normals[6:9], [-1, 0, 0]) def", "atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3)", "vertex data], face_count = 2, faces = [24 B of", "B of face data], disable_vertex_normals = 0, surface_area = 0.96", "assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml", "5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string \"\"\"Tests the", "loaders on a simple example.\"\"\" for mesh_format in [\"obj\", \"ply\"]:", "m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean", "= 0, surface_area = 0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from", "as Float from mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util import traverse", "m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\",", "ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert", "\"\"\"Checks(automatic) vertex normal computation for a PLY file that doesn't", "atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i in", "has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a, b =", "import load_string \"\"\"Tests the OBJ and PLY loaders on a", "str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer() normals", "load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/>", "assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3)", "test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render import Mesh", "</shape> \"\"\") assert str(m) == \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox", "@fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string \"\"\"Tests the OBJ", "import Float32 as Float from mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util", "* angle_1 n2 /= ek.norm(n2) n = np.vstack([n2, n0, n0,", "165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals',", "OBJ), UV.y is flipped. if mesh_format in ['obj', 'serialized']: assert", "from mitsuba.core import Struct, float_dtype, Vector3f from mitsuba.render import Mesh", "0, mesh attributes = [ face_color: 3 floats ] ]\"\"\"", "shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer()", "0, 1]) assert ek.allclose(positions[6:9], [0, 1, 0]) assert ek.slices(faces) ==", "for i in [0, 2, 3]] # For OBJs (and", "assert shape.has_vertex_normals() assert ek.slices(positions) == 72 assert ek.slices(faces) == 36", "i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string \"\"\"Tests", "= 0, surface_area = 0, mesh attributes = [ face_color:", "import enoki as ek from enoki.dynamic import Float32 as Float", "0, surface_area = 0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core", "2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000],", "3, 2) m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0,", "OBJ and PLY loaders on a simple example.\"\"\" for mesh_format", "-7.600000], atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2,", "= shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions) == 72 assert ek.slices(faces)", "= 3, vertices = [36 B of vertex data], face_count", "[36 B of vertex data], face_count = 2, faces =", "2, 0, 3, 4] m.recompute_vertex_normals() for i in range(5): assert", "test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype, Vector3f from mitsuba.render import", "n[:, i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string", "[ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0,", "assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3)", "[0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert", "atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv'", "assert ek.allclose(normals[3:6], [-1, 0, 0]) assert ek.allclose(normals[6:9], [-1, 0, 0])", "import Struct, float_dtype, Vector3f from mitsuba.render import Mesh import numpy", "value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower()))", "type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer()", "for i in [0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0,", "version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces", "vertices = [36 B of vertex data], face_count = 2,", "65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv',", "disable_vertex_normals = 0, surface_area = 0.96, mesh attributes = [", "= m.faces_buffer() assert not m.has_vertex_normals() assert ek.slices(positions) == 9 assert", "b = 1.0, 0.5 vertices[:] = [0, 0, 0, -a,", "= load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean", "PLY file that doesn't have them.\"\"\" shape = load_string(\"\"\" <shape", "= shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals are stored in half", "return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f from", "surface normals.\"\"\" m = Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices =", "import numpy as np \"\"\"Tests the weighting scheme that is", "n1 * angle_1 n2 /= ek.norm(n2) n = np.vstack([n2, n0,", "shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape>", "assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb):", "= Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0,", "BoundingBox3f[ min = [0, 0, 0], max = [0, 1,", "a, 1, 0, -b, 0, 1, b, 0, 1] n0", "'uv' in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2]", "[0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3,", "value=\"true\"/> </shape> \"\"\") positions = m.vertex_positions_buffer() faces = m.faces_buffer() assert", "as ek from enoki.dynamic import Float32 as Float from mitsuba.python.test.util", "n2 = n0 * angle_0 + n1 * angle_1 n2", "assert ek.allclose(normals[0:3], [-1, 0, 0]) assert ek.allclose(normals[3:6], [-1, 0, 0])", "UInt32(1) assert faces[2] == UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core", "[ face_color: 3 floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core", "= [0, 1, 2, 1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:]", "UInt32, Vector3f from mitsuba.core.xml import load_string m = load_string(\"\"\" <shape", "import UInt32, Vector3f from mitsuba.core.xml import load_string m = load_string(\"\"\"", "= 3, vertices = [72 B of vertex data], face_count", "atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105,", "3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0,", "@pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True,", "faces = shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for i", "Vector3f(0.0, 0.0, -1.0) n1 = Vector3f(0.0, 1.0, 0.0) angle_0 =", "faces[1] == UInt32(1) assert faces[2] == UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb):", "ek.allclose(normals[0:3], [-1, 0, 0]) assert ek.allclose(normals[3:6], [-1, 0, 0]) assert", "for i in [0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0,", "ek from enoki.dynamic import Float32 as Float from mitsuba.python.test.util import", "min = [0, 0, 0], max = [1, 1, 0]", "mitsuba.core import UInt32, Vector3f from mitsuba.core.xml import load_string m =", "face data], disable_vertex_normals = 0, surface_area = 0.96 ]\"\"\" @fresolver_append_path", "[-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype,", "/= ek.norm(n2) n = np.vstack([n2, n0, n0, n1, n1]).transpose() m.faces_buffer()[:]", "\"\"\"Tests the weighting scheme that is used to compute surface", "angle_0 + n1 * angle_1 n2 /= ek.norm(n2) n =", "float_dtype, Vector3f from mitsuba.render import Mesh import numpy as np", "assert not m.has_vertex_normals() assert ek.slices(positions) == 9 assert ek.allclose(positions[0:3], [0,", "1, 0]) assert ek.slices(faces) == 3 assert faces[0] == UInt32(0)", "combinations of vertex / face normals, presence and absence of", "n1]).transpose() m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4] m.recompute_vertex_normals()", "floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype", "[4, 5, 6]) assert ek.allclose(positions[:5], [130, 165, 65, 82, 165])", "\"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox = BoundingBox3f[ min = [0,", "0]) assert ek.allclose(normals[6:9], [-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core", "atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3,", "Mesh m = Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] = [0.0, 0.0,", "[0, 1, 2, 0, 3, 4] m.recompute_vertex_normals() for i in", "value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not", "= [24 B of face data], disable_vertex_normals = 0, surface_area", "from mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb):", "B of face data], disable_vertex_normals = 0, surface_area = 0.96,", "b, 0, 1] n0 = Vector3f(0.0, 0.0, -1.0) n1 =", "1, b, 0, 1] n0 = Vector3f(0.0, 0.0, -1.0) n1", "0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:] =", "presence and absence of UVs, etc. \"\"\" from mitsuba.core.xml import", "= shape.vertex_positions_buffer() faces = shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions) ==", "mesh attributes = [ face_color: 3 floats ] ]\"\"\" def", "enoki as ek from enoki.dynamic import Float32 as Float from", "assert str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox = BoundingBox3f[", "= [0, 0, 0, -a, 1, 0, a, 1, 0,", "name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces = shape.faces_buffer()", "= ek.pi / 2.0 angle_1 = ek.acos(3.0 / 5.0) n2", "[130, 165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features',", "file that doesn't have them.\"\"\" shape = load_string(\"\"\" <shape type=\"ply\"", "1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0, 1.0,", "0.0]) return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f", "fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml", "def test(): shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\"", "fresolver_append_path from mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import", "surface_area = 0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import", "[24 B of face data], disable_vertex_normals = 0, surface_area =", "of vertex data], face_count = 2, faces = [24 B", "1, 0] ], vertex_count = 3, vertices = [72 B", "assert faces[1] == UInt32(1) assert faces[2] == UInt32(2) @fresolver_append_path def", "mitsuba.core import Struct, float_dtype, Vector3f from mitsuba.render import Mesh import", "3, vertices = [36 B of vertex data], face_count =", "data], disable_vertex_normals = 0, surface_area = 0.96, mesh attributes =", "import traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from", "n0 = Vector3f(0.0, 0.0, -1.0) n1 = Vector3f(0.0, 1.0, 0.0)", "import pytest import enoki as ek from enoki.dynamic import Float32", "0, 0]) assert ek.allclose(positions[3:6], [0, 0, 1]) assert ek.allclose(positions[6:9], [0,", "m.vertex_positions_buffer() faces = m.faces_buffer() assert not m.has_vertex_normals() assert ek.slices(positions) ==", "name = \"triangle_face_colors.ply\", bbox = BoundingBox3f[ min = [0, 0,", "shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals are stored in half precision", "mesh_format, features, face_normals): \"\"\"Tests the OBJ & PLY loaders with", "data], face_count = 2, faces = [24 B of face", "-a, 1, 0, a, 1, 0, -b, 0, 1, b,", "[texcoords[i*2:(i+1)*2] for i in [0, 2, 3]] # For OBJs", "\"MyMesh\", bbox = BoundingBox3f[ min = [0, 0, 0], max", "Mesh import numpy as np \"\"\"Tests the weighting scheme that", "mitsuba.render import Mesh m = Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] =", "]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32, Vector3f from", "vertex_count = 3, vertices = [36 B of vertex data],", "ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85,", "face normals, presence and absence of UVs, etc. \"\"\" from", "[-1, 0, 0]) assert ek.allclose(normals[6:9], [-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb):", "ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in features:", "enoki.dynamic import Float32 as Float from mitsuba.python.test.util import fresolver_append_path from", "B of vertex data], face_count = 2, faces = [24", "['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features,", "= [0, 0, 0], max = [0, 1, 1] ],", "m.faces_buffer() assert not m.has_vertex_normals() assert ek.slices(positions) == 9 assert ek.allclose(positions[0:3],", "weighting scheme that is used to compute surface normals.\"\"\" m", "faces[2] == UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f", "= n0 * angle_0 + n1 * angle_1 n2 /=", "disable_vertex_normals = 0, surface_area = 0, mesh attributes = [", "of face data], disable_vertex_normals = 0, surface_area = 0.96, mesh", "], vertex_count = 3, vertices = [36 B of vertex", "== UInt32(1) assert faces[2] == UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from", "= 1.0, 0.5 vertices[:] = [0, 0, 0, -a, 1,", "= [36 B of vertex data], face_count = 2, faces", "assert faces[2] == UInt32(2) @fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import", "type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m) ==", "<shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> <boolean name=\"face_normals\" value=\"true\"/> </shape>", "def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import load_string", "[0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for", "shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i in [0, 2,", "shape.has_vertex_normals() # Normals are stored in half precision assert ek.allclose(normals[0:3],", "mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex normal computation for a PLY", "example.\"\"\" for mesh_format in [\"obj\", \"ply\"]: shape = load_string(\"\"\" <shape", "type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\" />", "2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0, 1.0, 0.0,", "for i in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path", "import Mesh m = Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:] = [0.0,", "<boolean name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions = m.vertex_positions_buffer() faces =", "m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0,", "and PLY loaders on a simple example.\"\"\" for mesh_format in", "assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in", "@pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format,", "1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert ek.allclose(uv0,", "with combinations of vertex / face normals, presence and absence", "m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape>", "assert ek.slices(positions) == 9 assert ek.allclose(positions[0:3], [0, 0, 0]) assert", "face data], disable_vertex_normals = 0, surface_area = 0, mesh attributes", "scheme that is used to compute surface normals.\"\"\" m =", "1, faces = [24 B of face data], disable_vertex_normals =", "3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [", "shape = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape>", "/> <boolean name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert", "/> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals)", "version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\" /> </shape>", "0.0) angle_0 = ek.pi / 2.0 angle_1 = ek.acos(3.0 /", "2.85, 0.0, -7.600000], atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords()", "1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import", "0, 1] n0 = Vector3f(0.0, 0.0, -1.0) n1 = Vector3f(0.0,", "ek.allclose(positions[6:9], [0, 1, 0]) assert ek.slices(faces) == 3 assert faces[0]", "ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0,", "normal computation for a PLY file that doesn't have them.\"\"\"", "n0, n0, n1, n1]).transpose() m.faces_buffer()[:] = [0, 1, 2, 0,", "[0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals():", "the OBJ & PLY loaders with combinations of vertex /", "(and .serialized generated from OBJ), UV.y is flipped. if mesh_format", "mitsuba.render import Mesh import numpy as np \"\"\"Tests the weighting", "shape.has_vertex_normals() assert ek.slices(positions) == 72 assert ek.slices(faces) == 36 assert", "0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3)", "m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0] m.parameters_changed() assert", "@pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests the", "Struct, float_dtype from mitsuba.render import Mesh m = Mesh(\"MyMesh\", 3,", "range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from", "n0, n1, n1]).transpose() m.faces_buffer()[:] = [0, 1, 2, 0, 3,", "0.0, -1.0) n1 = Vector3f(0.0, 1.0, 0.0) angle_0 = ek.pi", "0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for", "0, 0], max = [1, 1, 0] ], vertex_count =", "the weighting scheme that is used to compute surface normals.\"\"\"", "= m.vertex_positions_buffer() faces = m.faces_buffer() assert not m.has_vertex_normals() assert ek.slices(positions)", "loaders with combinations of vertex / face normals, presence and", "= shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces =", "= \"triangle_face_colors.ply\", bbox = BoundingBox3f[ min = [0, 0, 0],", "= np.vstack([n2, n0, n0, n1, n1]).transpose() m.faces_buffer()[:] = [0, 1,", "mitsuba.core.xml import load_string def test(): shape = load_string(\"\"\" <shape type=\"{0}\"", "ek.slices(positions) == 9 assert ek.allclose(positions[0:3], [0, 0, 0]) assert ek.allclose(positions[3:6],", "mitsuba.core.xml import load_string \"\"\"Tests the OBJ and PLY loaders on", "[-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999],", "ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else:", "face_color: 3 floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import", "2, 1, 2, 0] m.parameters_changed() assert str(m) == \"\"\"Mesh[ name", "import Mesh import numpy as np \"\"\"Tests the weighting scheme", "the OBJ and PLY loaders on a simple example.\"\"\" for", "3, vertices = [72 B of vertex data], face_count =", "load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions", "ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import", "1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0] assert str(m) ==", "shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions) == 72 assert ek.slices(faces) ==", "normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals are stored in", "m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a, b = 1.0, 0.5 vertices[:]", "0] ], vertex_count = 3, vertices = [72 B of", "import load_string \"\"\"Checks(automatic) vertex normal computation for a PLY file", "0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3)", "2, 1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] = [0.0, 1.0,", "0.0, 0.0, 0.0, 1.0, 1.0, 0.0] assert str(m) == \"\"\"Mesh[", "m.vertex_normals_buffer() a, b = 1.0, 0.5 vertices[:] = [0, 0,", "'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def", "& PLY loaders with combinations of vertex / face normals,", "1-0.689127], atol=1e-3) else: assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2,", "= [0, 1, 2, 0, 3, 4] m.recompute_vertex_normals() for i", "2, 3]] # For OBJs (and .serialized generated from OBJ),", "ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3) assert", "compute surface normals.\"\"\" m = Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices", "uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]]", "= BoundingBox3f[ min = [0, 0, 0], max = [1,", "name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() ==", "0.599999], atol=1e-3) assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3) if", "normals, presence and absence of UVs, etc. \"\"\" from mitsuba.core.xml", "0, 3, 4] m.recompute_vertex_normals() for i in range(5): assert ek.allclose(normals[i*3:(i+1)*3],", "Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices = m.vertex_positions_buffer() normals = m.vertex_normals_buffer()", "i in [0, 2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0])", "<string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals()", "[0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0] assert", "not m.has_vertex_normals() assert ek.slices(positions) == 9 assert ek.allclose(positions[0:3], [0, 0,", "[0, 0, 1]) assert ek.allclose(positions[6:9], [0, 1, 0]) assert ek.slices(faces)", "vertices = [72 B of vertex data], face_count = 2,", "used to compute surface normals.\"\"\" m = Mesh(\"MyMesh\", 5, 2,", "<string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m) == \"\"\"PLYMesh[ name", "0, 0, -a, 1, 0, a, 1, 0, -b, 0,", "1, 2, 1, 2, 0] m.parameters_changed() assert str(m) == \"\"\"Mesh[", "n1, n1]).transpose() m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4]", "assert str(m) == \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox = BoundingBox3f[", "For OBJs (and .serialized generated from OBJ), UV.y is flipped.", "ek.allclose(faces[6:9], [4, 5, 6]) assert ek.allclose(positions[:5], [130, 165, 65, 82,", "that is used to compute surface normals.\"\"\" m = Mesh(\"MyMesh\",", "'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False]) def test06_load_various_features(variant_scalar_rgb,", "[True, False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests the OBJ", "@fresolver_append_path def test03_ply_computed_normals(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import", "computation for a PLY file that doesn't have them.\"\"\" shape", "shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" />", "0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype, Vector3f", "== (not face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords", "from mitsuba.render import Mesh m = Mesh(\"MyMesh\", 3, 2) m.vertex_positions_buffer()[:]", "is used to compute surface normals.\"\"\" m = Mesh(\"MyMesh\", 5,", "0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:] = [0, 1, 2, 1,", "= m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a, b = 1.0, 0.5", "name=\"face_normals\" value=\"true\"/> </shape> \"\"\") positions = m.vertex_positions_buffer() faces = m.faces_buffer()", "name = \"MyMesh\", bbox = BoundingBox3f[ min = [0, 0,", "mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert", "UInt32(0) assert faces[1] == UInt32(1) assert faces[2] == UInt32(2) @fresolver_append_path", "+ n1 * angle_1 n2 /= ek.norm(n2) n = np.vstack([n2,", "ek.slices(faces) == 3 assert faces[0] == UInt32(0) assert faces[1] ==", "flipped. if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416],", "1, 1] ], vertex_count = 3, vertices = [72 B", "assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3) if shape.has_vertex_normals(): for n in", "= shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for i in", "faces = shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions) == 72 assert", "= [0, 1, 1] ], vertex_count = 3, vertices =", "3]] # For OBJs (and .serialized generated from OBJ), UV.y", "vertex / face normals, presence and absence of UVs, etc.", "assert ek.allclose(faces[6:9], [4, 5, 6]) assert ek.allclose(positions[:5], [130, 165, 65,", "[0.025105, 1-0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3) else: assert", "= [72 B of vertex data], face_count = 1, faces", "[0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:]", "assert ek.allclose(positions[:5], [130, 165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply',", "value=\"data/triangle.ply\"/> </shape> \"\"\") normals = shape.vertex_normals_buffer() assert shape.has_vertex_normals() # Normals", "= [72 B of vertex data], face_count = 2, faces", "import load_string def test(): shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\">", "1.0, 1.0, 0.0] assert str(m) == \"\"\"Mesh[ name = \"MyMesh\",", "Vector3f from mitsuba.core.xml import load_string m = load_string(\"\"\" <shape type=\"ply\"", "== 3 assert faces[0] == UInt32(0) assert faces[1] == UInt32(1)", "= [positions[i*3:(i+1)*3] for i in [0, 2, 3]] assert ek.allclose(v0,", "[0, 1, 0]) assert ek.slices(faces) == 3 assert faces[0] ==", "vertices = m.vertex_positions_buffer() normals = m.vertex_normals_buffer() a, b = 1.0,", "['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals', [True, False])", "shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3] for", "pytest import enoki as ek from enoki.dynamic import Float32 as", "== 9 assert ek.allclose(positions[0:3], [0, 0, 0]) assert ek.allclose(positions[3:6], [0,", "UV.y is flipped. if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0,", "== 36 assert ek.allclose(faces[6:9], [4, 5, 6]) assert ek.allclose(positions[:5], [130,", "<string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format)) positions = shape.vertex_positions_buffer() faces =", "assert shape.has_vertex_normals() # Normals are stored in half precision assert", "5, 6]) assert ek.allclose(positions[:5], [130, 165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format',", "\"\"\".format(mesh_format, features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions =", "if mesh_format in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3)", "2, 0] m.parameters_changed() assert str(m) == \"\"\"Mesh[ name = \"MyMesh\",", "assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert ek.allclose(v3, [", "to compute surface normals.\"\"\" m = Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True)", "test(): shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\"", "0.2, 0.0, 0.2, 1.0, 0.0] m.faces_buffer()[:] = [0, 1, 2,", "angle_0 = ek.pi / 2.0 angle_1 = ek.acos(3.0 / 5.0)", "2) m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2,", "= m.vertex_normals_buffer() a, b = 1.0, 0.5 vertices[:] = [0,", "= shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3) = [positions[i*3:(i+1)*3]", "n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]: assert", "0, surface_area = 0, mesh attributes = [ face_color: 3", "load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals", "of face data], disable_vertex_normals = 0, surface_area = 0, mesh", "2.0 angle_1 = ek.acos(3.0 / 5.0) n2 = n0 *", "= [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]] # For", "\"\"\") assert str(m) == \"\"\"PLYMesh[ name = \"triangle_face_colors.ply\", bbox =", "def test05_load_simple_mesh(variant_scalar_rgb): from mitsuba.core.xml import load_string \"\"\"Tests the OBJ and", "1]) assert ek.allclose(positions[6:9], [0, 1, 0]) assert ek.slices(faces) == 3", "[0, 0, 0], max = [0, 1, 1] ], vertex_count", "assert ek.slices(faces) == 3 assert faces[0] == UInt32(0) assert faces[1]", "features, str(face_normals).lower())) assert shape.has_vertex_normals() == (not face_normals) positions = shape.vertex_positions_buffer()", "Float from mitsuba.python.test.util import fresolver_append_path from mitsuba.python.util import traverse def", "6]) assert ek.allclose(positions[:5], [130, 165, 65, 82, 165]) @pytest.mark.parametrize('mesh_format', ['obj',", "0, 0]) assert ek.allclose(normals[6:9], [-1, 0, 0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from", "m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4] m.recompute_vertex_normals() for", "[0, 0, 0]) assert ek.allclose(positions[3:6], [0, 0, 1]) assert ek.allclose(positions[6:9],", "face_normals) positions = shape.vertex_positions_buffer() normals = shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer()", "def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core import Vector3f from mitsuba.core.xml import load_string", "numpy as np \"\"\"Tests the weighting scheme that is used", "stored in half precision assert ek.allclose(normals[0:3], [-1, 0, 0]) assert", "[0.0, 1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path def test07_ply_stored_attribute(variant_scalar_rgb): from mitsuba.core", "absence of UVs, etc. \"\"\" from mitsuba.core.xml import load_string def", "0.0] m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0] m.parameters_changed()", "\"ply\"]: shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/>", "2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert ek.allclose(v2,", "load_string \"\"\"Tests the OBJ and PLY loaders on a simple", "features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i", "0]) def test04_normal_weighting_scheme(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype, Vector3f from", "n1 = Vector3f(0.0, 1.0, 0.0) angle_0 = ek.pi / 2.0", "attributes = [ face_color: 3 floats ] ]\"\"\" def test08_mesh_add_attribute(variant_scalar_rgb):", "face_count = 1, faces = [24 B of face data],", "of vertex / face normals, presence and absence of UVs,", "assert ek.allclose(positions[0:3], [0, 0, 0]) assert ek.allclose(positions[3:6], [0, 0, 1])", "0.0] assert str(m) == \"\"\"Mesh[ name = \"MyMesh\", bbox =", "of face data], disable_vertex_normals = 0, surface_area = 0.96 ]\"\"\"", "i in [0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000],", "i in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4) @fresolver_append_path def", "of vertex data], face_count = 1, faces = [24 B", "load_string m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/>", "= load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\" value=\"resources/data/tests/{0}/cbox_smallbox.{0}\"/> </shape> \"\"\".format(mesh_format))", "in ['obj', 'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2,", "Vector3f from mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex normal computation for", "'serialized']: assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 1-0.689127],", "165]) @pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized']) @pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv']) @pytest.mark.parametrize('face_normals',", "assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3) assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3)", "1] n0 = Vector3f(0.0, 0.0, -1.0) n1 = Vector3f(0.0, 1.0,", "= 2, faces = [24 B of face data], disable_vertex_normals", "from OBJ), UV.y is flipped. if mesh_format in ['obj', 'serialized']:", "import mitsuba import pytest import enoki as ek from enoki.dynamic", "if shape.has_vertex_normals(): for n in [normals[i*3:(i+1)*3] for i in [0,", "face_normals): \"\"\"Tests the OBJ & PLY loaders with combinations of", "B of face data], disable_vertex_normals = 0, surface_area = 0,", "Vector3f from mitsuba.render import Mesh import numpy as np \"\"\"Tests", "False]) def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests the OBJ &", "BoundingBox3f[ min = [0, 0, 0], max = [1, 1,", "in [\"obj\", \"ply\"]: shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string", "assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3) assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3)", "OBJ & PLY loaders with combinations of vertex / face", "for mesh_format in [\"obj\", \"ply\"]: shape = load_string(\"\"\" <shape type=\"{0}\"", "in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]: assert ek.allclose(n,", "a, b = 1.0, 0.5 vertices[:] = [0, 0, 0,", "from mitsuba.core.xml import load_string m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\">", "<gh_stars>1-10 import mitsuba import pytest import enoki as ek from", "1, 2, 0, 3, 4] m.recompute_vertex_normals() for i in range(5):", "= load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\")", "1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0] assert str(m)", "[0, 1, 2, 1, 2, 0] m.parameters_changed() m.add_attribute(\"vertex_color\", 3)[:] =", "ek.slices(faces) == 36 assert ek.allclose(faces[6:9], [4, 5, 6]) assert ek.allclose(positions[:5],", "= [1, 1, 0] ], vertex_count = 3, vertices =", "shape.vertex_positions_buffer() faces = shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions) == 72", "<string name=\"filename\" value=\"resources/data/tests/{0}/rectangle_{1}.{0}\" /> <boolean name=\"face_normals\" value=\"{2}\" /> </shape> \"\"\".format(mesh_format,", "test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals): \"\"\"Tests the OBJ & PLY loaders", "angle_1 = ek.acos(3.0 / 5.0) n2 = n0 * angle_0", "2, 3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path", "assert ek.slices(faces) == 36 assert ek.allclose(faces[6:9], [4, 5, 6]) assert", "0.0, -7.600000], atol=1e-3) if 'uv' in features: assert shape.has_vertex_texcoords() (uv0,", "= shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2,", "ek.slices(positions) == 72 assert ek.slices(faces) == 36 assert ek.allclose(faces[6:9], [4,", "assert ek.allclose(positions[3:6], [0, 0, 1]) assert ek.allclose(positions[6:9], [0, 1, 0])", "[0, 1, 2, 1, 2, 0] m.parameters_changed() assert str(m) ==", "0]) assert ek.allclose(positions[3:6], [0, 0, 1]) assert ek.allclose(positions[6:9], [0, 1,", "[\"obj\", \"ply\"]: shape = load_string(\"\"\" <shape type=\"{0}\" version=\"2.0.0\"> <string name=\"filename\"", "3]]: assert ek.allclose(n, [0.0, 1.0, 0.0]) return fresolver_append_path(test)() @fresolver_append_path def", "positions = shape.vertex_positions_buffer() faces = shape.faces_buffer() assert shape.has_vertex_normals() assert ek.slices(positions)", "0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32, Vector3f", "import Struct, float_dtype from mitsuba.render import Mesh m = Mesh(\"MyMesh\",", "0.0, 1.0, 1.0, 0.0] assert str(m) == \"\"\"Mesh[ name =", "version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals = shape.vertex_normals_buffer() assert", "mitsuba import pytest import enoki as ek from enoki.dynamic import", "[normals[i*3:(i+1)*3] for i in [0, 2, 3]]: assert ek.allclose(n, [0.0,", "[0, 2, 3]] assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3) assert", "0, 0]) assert ek.allclose(normals[3:6], [-1, 0, 0]) assert ek.allclose(normals[6:9], [-1,", "data], disable_vertex_normals = 0, surface_area = 0.96 ]\"\"\" @fresolver_append_path def", "on a simple example.\"\"\" for mesh_format in [\"obj\", \"ply\"]: shape", "in [0, 2, 3]] # For OBJs (and .serialized generated", "name=\"filename\" value=\"data/triangle_face_colors.ply\"/> </shape> \"\"\") assert str(m) == \"\"\"PLYMesh[ name =", "9 assert ek.allclose(positions[0:3], [0, 0, 0]) assert ek.allclose(positions[3:6], [0, 0,", "Normals are stored in half precision assert ek.allclose(normals[0:3], [-1, 0,", "1, 0, -b, 0, 1, b, 0, 1] n0 =", "in features: assert shape.has_vertex_texcoords() (uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for", "mitsuba.python.util import traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype", "faces = m.faces_buffer() assert not m.has_vertex_normals() assert ek.slices(positions) == 9", "[-1, 0, 0]) assert ek.allclose(normals[3:6], [-1, 0, 0]) assert ek.allclose(normals[6:9],", "def test08_mesh_add_attribute(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render import", "in half precision assert ek.allclose(normals[0:3], [-1, 0, 0]) assert ek.allclose(normals[3:6],", "mitsuba.core.xml import load_string m = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string", "= 0.96 ]\"\"\" @fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32,", "shape.vertex_normals_buffer() texcoords = shape.vertex_texcoords_buffer() faces = shape.faces_buffer() (v0, v2, v3)", "0, 0], max = [0, 1, 1] ], vertex_count =", "m.recompute_vertex_normals() for i in range(5): assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4)", "[0, 2, 3]] # For OBJs (and .serialized generated from", "normals.\"\"\" m = Mesh(\"MyMesh\", 5, 2, has_vertex_normals=True) vertices = m.vertex_positions_buffer()", "import Vector3f from mitsuba.core.xml import load_string \"\"\"Checks(automatic) vertex normal computation", "surface_area = 0.96, mesh attributes = [ vertex_color: 3 floats", "vertices = [72 B of vertex data], face_count = 1,", "traverse def test01_create_mesh(variant_scalar_rgb): from mitsuba.core import Struct, float_dtype from mitsuba.render", "@fresolver_append_path def test02_ply_triangle(variant_scalar_rgb): from mitsuba.core import UInt32, Vector3f from mitsuba.core.xml", "-7.600000], atol=1e-3) assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3) assert", "ek.allclose(positions[0:3], [0, 0, 0]) assert ek.allclose(positions[3:6], [0, 0, 1]) assert", "<shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/> </shape> \"\"\") normals =", "them.\"\"\" shape = load_string(\"\"\" <shape type=\"ply\" version=\"0.5.0\"> <string name=\"filename\" value=\"data/triangle.ply\"/>", "1, 0] ], vertex_count = 3, vertices = [36 B", "ek.pi / 2.0 angle_1 = ek.acos(3.0 / 5.0) n2 =" ]
[ "map, next, oct, open, pow, range, round, str, super, zip)", "range, round, str, super, zip) from ...._utils import send_session_request from", "class Group(PortalEndpointBase): @property def id(self): return self._pdata[\"id\"] @property def _url_full(self):", "return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session, url_base, id): super().__init__(requests_session, url_base)", "round, str, super, zip) from ...._utils import send_session_request from ..._PortalEndpointBase", "hex, input, int, map, next, oct, open, pow, range, round,", "...._utils import send_session_request from ..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams import", "__init__(self, requests_session, url_base, id): super().__init__(requests_session, url_base) self._pdata = {\"id\": id}", "input, int, map, next, oct, open, pow, range, round, str,", "isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if not \"clearEmptyFields\" in update_group_params:", "@property def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session, url_base,", "\"\"\" Gets the properties of the item. \"\"\" return self._get()", "not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r = self._create_operation_request(self,", "item. \"\"\" return self._get() def update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates", "def get_properties(self): \"\"\" Gets the properties of the item. \"\"\"", "next, oct, open, pow, range, round, str, super, zip) from", "if not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r =", "\"\"\" return self._get() def update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates the", "oct, open, pow, range, round, str, super, zip) from ...._utils", "\"\"\" update_group_params = update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy()", "import send_session_request from ..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams", "= update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if not", "url_base) self._pdata = {\"id\": id} def get_properties(self): \"\"\" Gets the", "return self._pdata[\"id\"] @property def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self,", "id(self): return self._pdata[\"id\"] @property def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def", "dict, filter, hex, input, int, map, next, oct, open, pow,", "update_group_params, clear_empty_fields=False): \"\"\" Updates the group properties. \"\"\" update_group_params =", "update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if not \"clearEmptyFields\"", "return self._get() def update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates the group", "requests_session, url_base, id): super().__init__(requests_session, url_base) self._pdata = {\"id\": id} def", "print_function, unicode_literals) from builtins import (ascii, bytes, chr, dict, filter,", "update_group_params = update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if", "str, super, zip) from ...._utils import send_session_request from ..._PortalEndpointBase import", "if isinstance( update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if not \"clearEmptyFields\" in", "pow, range, round, str, super, zip) from ...._utils import send_session_request", "unicode_literals) from builtins import (ascii, bytes, chr, dict, filter, hex,", "{\"id\": id} def get_properties(self): \"\"\" Gets the properties of the", "properties. \"\"\" update_group_params = update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams) else", "import CreateUpdateGroupParams class Group(PortalEndpointBase): @property def id(self): return self._pdata[\"id\"] @property", "the item. \"\"\" return self._get() def update(self, update_group_params, clear_empty_fields=False): \"\"\"", "self.id) def __init__(self, requests_session, url_base, id): super().__init__(requests_session, url_base) self._pdata =", "\"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r = self._create_operation_request(self, \"update\",", "filter, hex, input, int, map, next, oct, open, pow, range,", "__future__ import (absolute_import, division, print_function, unicode_literals) from builtins import (ascii,", "def id(self): return self._pdata[\"id\"] @property def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id)", "get_properties(self): \"\"\" Gets the properties of the item. \"\"\" return", "import (ascii, bytes, chr, dict, filter, hex, input, int, map,", "int, map, next, oct, open, pow, range, round, str, super,", "from ..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase):", "PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase): @property def id(self):", "@property def id(self): return self._pdata[\"id\"] @property def _url_full(self): return \"{0}/{1}\".format(self._url_base,", "update_group_params.copy() if not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r", "else update_group_params.copy() if not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields", "self._pdata[\"id\"] @property def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session,", "\"\"\" Updates the group properties. \"\"\" update_group_params = update_group_params._get_params() if", "in update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r = self._create_operation_request(self, \"update\", method=\"POST\",", "Gets the properties of the item. \"\"\" return self._get() def", "group properties. \"\"\" update_group_params = update_group_params._get_params() if isinstance( update_group_params, CreateUpdateGroupParams)", "id): super().__init__(requests_session, url_base) self._pdata = {\"id\": id} def get_properties(self): \"\"\"", "of the item. \"\"\" return self._get() def update(self, update_group_params, clear_empty_fields=False):", "from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import", "properties of the item. \"\"\" return self._get() def update(self, update_group_params,", "import (absolute_import, division, print_function, unicode_literals) from builtins import (ascii, bytes,", "def _url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session, url_base, id):", "self._pdata = {\"id\": id} def get_properties(self): \"\"\" Gets the properties", "\"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session, url_base, id): super().__init__(requests_session, url_base) self._pdata", "chr, dict, filter, hex, input, int, map, next, oct, open,", "update_group_params: update_group_params[\"clearEmptyFields\"] = clear_empty_fields r = self._create_operation_request(self, \"update\", method=\"POST\", data=update_group_params)", "builtins import (ascii, bytes, chr, dict, filter, hex, input, int,", "from .CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase): @property def id(self): return", "division, print_function, unicode_literals) from builtins import (ascii, bytes, chr, dict,", "from builtins import (ascii, bytes, chr, dict, filter, hex, input,", "bytes, chr, dict, filter, hex, input, int, map, next, oct,", "..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase): @property", "super, zip) from ...._utils import send_session_request from ..._PortalEndpointBase import PortalEndpointBase", "(ascii, bytes, chr, dict, filter, hex, input, int, map, next,", "send_session_request from ..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams class", "(absolute_import, division, print_function, unicode_literals) from builtins import (ascii, bytes, chr,", "update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates the group properties. \"\"\" update_group_params", "= clear_empty_fields r = self._create_operation_request(self, \"update\", method=\"POST\", data=update_group_params) return send_session_request(self._session,", "CreateUpdateGroupParams class Group(PortalEndpointBase): @property def id(self): return self._pdata[\"id\"] @property def", "the group properties. \"\"\" update_group_params = update_group_params._get_params() if isinstance( update_group_params,", "Updates the group properties. \"\"\" update_group_params = update_group_params._get_params() if isinstance(", "clear_empty_fields r = self._create_operation_request(self, \"update\", method=\"POST\", data=update_group_params) return send_session_request(self._session, r).json()", "from ...._utils import send_session_request from ..._PortalEndpointBase import PortalEndpointBase from .CreateUpdateGroupParams", ".CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase): @property def id(self): return self._pdata[\"id\"]", "self._get() def update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates the group properties.", "= {\"id\": id} def get_properties(self): \"\"\" Gets the properties of", "url_base, id): super().__init__(requests_session, url_base) self._pdata = {\"id\": id} def get_properties(self):", "update_group_params[\"clearEmptyFields\"] = clear_empty_fields r = self._create_operation_request(self, \"update\", method=\"POST\", data=update_group_params) return", "open, pow, range, round, str, super, zip) from ...._utils import", "id} def get_properties(self): \"\"\" Gets the properties of the item.", "the properties of the item. \"\"\" return self._get() def update(self,", "Group(PortalEndpointBase): @property def id(self): return self._pdata[\"id\"] @property def _url_full(self): return", "def __init__(self, requests_session, url_base, id): super().__init__(requests_session, url_base) self._pdata = {\"id\":", "import PortalEndpointBase from .CreateUpdateGroupParams import CreateUpdateGroupParams class Group(PortalEndpointBase): @property def", "super().__init__(requests_session, url_base) self._pdata = {\"id\": id} def get_properties(self): \"\"\" Gets", "def update(self, update_group_params, clear_empty_fields=False): \"\"\" Updates the group properties. \"\"\"", "_url_full(self): return \"{0}/{1}\".format(self._url_base, self.id) def __init__(self, requests_session, url_base, id): super().__init__(requests_session,", "update_group_params, CreateUpdateGroupParams) else update_group_params.copy() if not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"]", "clear_empty_fields=False): \"\"\" Updates the group properties. \"\"\" update_group_params = update_group_params._get_params()", "zip) from ...._utils import send_session_request from ..._PortalEndpointBase import PortalEndpointBase from", "CreateUpdateGroupParams) else update_group_params.copy() if not \"clearEmptyFields\" in update_group_params: update_group_params[\"clearEmptyFields\"] =" ]
[ "is open to learners by invitation only.'), ('closed', 'This event", "of the workshop host</a>, including recruiting local helpers to support", "Generated by Django 2.1.7 on 2019-08-09 09:36 from django.db import", "verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop host</a>, including", "support the workshop (1 helper for every 8-10 learners).'), ),", "our workshops restrict registration to learners from the hosting institution.", "[ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities", "learners are people working in library and information related roles", "Data Carpentry is likely the best choice. If your learners", "open to registrants outside of your institution please let us", "Migration(migrations.Migration): dependencies = [ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations", "learning data and software skills, Library Carpentry is the best", "verbose_name='Please explain the circumstances for your scholarship request and let", "recruiting local helpers to support the workshop (1 helper for", "your learners are new to programming and primarily interested in", "verbose_name='Which Carpentries workshop are you requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances',", "workshop host</a>, including recruiting local helpers to support the workshop", "page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library", "local helpers to support the workshop (1 helper for every", "of my institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops", "\\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration):", "including recruiting local helpers to support the workshop (1 helper", "SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies = [ ('workshops', '0190_auto_20190728_1118'),", "lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or the <a", "in learning more about programming, including version control and automation,", "a new contents (\"other\"). The field containing these options is", "is open to learners outside of my institution.'), ('other', 'Other:')],", "data, Data Carpentry is likely the best choice. If your", "hosting institution. If your workshop will be open to registrants", "href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for more information about any of", "fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event is", "09:36 from django.db import migrations, models def migrate_public_event(apps, schema_editor): \"\"\"Migrate", "information related roles interested in learning data and software skills,", "contents (\"other\"). The field containing these options is in CommonRequest", "these options is in CommonRequest abstract model, implemented in WorkshopRequest,", "request and let us know what budget you have towards", "CommonRequest abstract model, implemented in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\"", "requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if you", "scholarship request and let us know what budget you have", "in CommonRequest abstract model, implemented in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission", "The field containing these options is in CommonRequest abstract model,", "the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons", "open to learners inside of my institution.'), ('public', 'This event", "lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'),", "for more information about any of our lessons.', limit_choices_to={'active': True},", "the best choice. If your learners are interested in learning", "information about any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which", "my institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops restrict", "no contents (displayed as \"Other:\") to a new contents (\"other\").", "your learners are interested in learning more about programming, including", "and software skills, Library Carpentry is the best choice. Please", "every 8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners", "Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or the", "circumstances for your scholarship request and let us know what", "what budget you have towards The Carpentries workshop fees.'), ),", "# Generated by Django 2.1.7 on 2019-08-09 09:36 from django.db", "SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\")", "best choice. Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>,", "best choice. If your learners are interested in learning more", "learners outside of my institution.'), ('other', 'Other:')], default='', help_text='Many of", "version control and automation, Software Carpentry is likely the best", "learners are new to programming and primarily interested in working", "and automation, Software Carpentry is likely the best match. If", "your scholarship request and let us know what budget you", "is open to learners inside of my institution.'), ('public', 'This", "new contents (\"other\"). The field containing these options is in", "to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'), ), migrations.AlterField( model_name='workshoprequest',", "registrants outside of your institution please let us know below.',", "and primarily interested in working with data, Data Carpentry is", "learners by invitation only.'), ('closed', 'This event is open to", "\\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies = [ ('workshops', '0190_auto_20190728_1118'), ('extrequests',", "Carpentries workshop fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This", "Carpentry lessons page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a>", "import migrations, models def migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously with", "with data, Data Carpentry is likely the best choice. If", "migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously with no contents (displayed as", "('extrequests', '0008_auto_20190809_1004'), ] operations = [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False,", "(1 helper for every 8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types',", "2.1.7 on 2019-08-09 09:36 from django.db import migrations, models def", "migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are new to programming", "automation, Software Carpentry is likely the best match. If your", "field=models.TextField(blank=True, help_text='Required only if you request a scholarship.', verbose_name='Please explain", "institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops restrict registration", "If your learners are interested in learning more about programming,", "lessons page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for", "migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if you request a", "us know what budget you have towards The Carpentries workshop", "your learners are people working in library and information related", "containing these options is in CommonRequest abstract model, implemented in", "name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event is open to learners by", "us know below.', max_length=20, verbose_name='Is this workshop open to the", "Software Carpentry is likely the best match. If your learners", "any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop", "<a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry", "workshops restrict registration to learners from the hosting institution. If", "scholarship.', verbose_name='Please explain the circumstances for your scholarship request and", "towards The Carpentries workshop fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True,", "WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest =", "Library Carpentry is the best choice. Please visit the <a", "to registrants outside of your institution please let us know", "our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you", "'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\")", "operations = [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a", "), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if you request", "models def migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously with no contents", "to learners outside of my institution.'), ('other', 'Other:')], default='', help_text='Many", "in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest')", "related roles interested in learning data and software skills, Library", "max_length=20, verbose_name='Is this workshop open to the public?'), ), migrations.RunPython(migrate_public_event),", "if you request a scholarship.', verbose_name='Please explain the circumstances for", "a scholarship.', verbose_name='Please explain the circumstances for your scholarship request", "apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission')", "skills, Library Carpentry is the best choice. Please visit the", "the workshop host</a>, including recruiting local helpers to support the", "programming and primarily interested in working with data, Data Carpentry", "workshop are you requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required", "migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of", "help_text='Many of our workshops restrict registration to learners from the", "inside of my institution.'), ('public', 'This event is open to", "name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop", "and SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests',", "the best choice. Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons", "workshop (1 helper for every 8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest',", "workshop will be open to registrants outside of your institution", "learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are new", "people working in library and information related roles interested in", "working with data, Data Carpentry is likely the best choice.", ".update(public_event=\"other\") class Migration(migrations.Migration): dependencies = [ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'),", "of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are", "contents (displayed as \"Other:\") to a new contents (\"other\"). The", "budget you have towards The Carpentries workshop fees.'), ), migrations.AlterField(", "Django 2.1.7 on 2019-08-09 09:36 from django.db import migrations, models", "page</a> for more information about any of our lessons.', limit_choices_to={'active':", "field containing these options is in CommonRequest abstract model, implemented", "the workshop (1 helper for every 8-10 learners).'), ), migrations.AlterField(", "'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\", "<a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>,", "model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event is open to learners", "likely the best choice. If your learners are interested in", "field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop host</a>,", "interested in working with data, Data Carpentry is likely the", "from django.db import migrations, models def migrate_public_event(apps, schema_editor): \"\"\"Migrate options", "about programming, including version control and automation, Software Carpentry is", "('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations = [ migrations.AlterField( model_name='workshoprequest',", "you have towards The Carpentries workshop fees.'), ), migrations.AlterField( model_name='workshoprequest',", "abstract model, implemented in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest", "options is in CommonRequest abstract model, implemented in WorkshopRequest, WorkshopInquiryRequest,", "class Migration(migrations.Migration): dependencies = [ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ]", "are people working in library and information related roles interested", "by invitation only.'), ('closed', 'This event is open to learners", "only if you request a scholarship.', verbose_name='Please explain the circumstances", "<gh_stars>10-100 # Generated by Django 2.1.7 on 2019-08-09 09:36 from", "event is open to learners inside of my institution.'), ('public',", "learning more about programming, including version control and automation, Software", "about any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries", "dependencies = [ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations =", "migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event is open to", "SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest')", "are new to programming and primarily interested in working with", "restrict registration to learners from the hosting institution. If your", "know what budget you have towards The Carpentries workshop fees.'),", "'Other:')], default='', help_text='Many of our workshops restrict registration to learners", "learners inside of my institution.'), ('public', 'This event is open", "interested in learning more about programming, including version control and", "Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data", "for your scholarship request and let us know what budget", "<a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop host</a>, including recruiting local", "of your institution please let us know below.', max_length=20, verbose_name='Is", "name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are new to programming and primarily", "open to learners by invitation only.'), ('closed', 'This event is", "be open to registrants outside of your institution please let", "the best match. If your learners are people working in", "2019-08-09 09:36 from django.db import migrations, models def migrate_public_event(apps, schema_editor):", "from the hosting institution. If your workshop will be open", "the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for more information about", "in working with data, Data Carpentry is likely the best", "WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies =", "href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or", "primarily interested in working with data, Data Carpentry is likely", "of our workshops restrict registration to learners from the hosting", "including version control and automation, Software Carpentry is likely the", "event is open to learners outside of my institution.'), ('other',", "= [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the", "def migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously with no contents (displayed", "(displayed as \"Other:\") to a new contents (\"other\"). The field", "'0008_auto_20190809_1004'), ] operations = [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I", "request a scholarship.', verbose_name='Please explain the circumstances for your scholarship", "page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for more", "name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if you request a scholarship.', verbose_name='Please", "field=models.CharField(blank=True, choices=[('invite', 'This event is open to learners by invitation", "responsibilities of the workshop host</a>, including recruiting local helpers to", "to learners from the hosting institution. If your workshop will", "match. If your learners are people working in library and", "your institution please let us know below.', max_length=20, verbose_name='Is this", "the circumstances for your scholarship request and let us know", "workshop fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event", "models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission", "verbose_name='Is this workshop open to the public?'), ), migrations.RunPython(migrate_public_event), ]", "library and information related roles interested in learning data and", "model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the", "host</a>, including recruiting local helpers to support the workshop (1", "for every 8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your", "visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry", "my institution.'), ('public', 'This event is open to learners outside", "institution. If your workshop will be open to registrants outside", "('closed', 'This event is open to learners inside of my", "will be open to registrants outside of your institution please", "to support the workshop (1 helper for every 8-10 learners).'),", "only.'), ('closed', 'This event is open to learners inside of", "your workshop will be open to registrants outside of your", "invitation only.'), ('closed', 'This event is open to learners inside", "below.', max_length=20, verbose_name='Is this workshop open to the public?'), ),", "interested in learning data and software skills, Library Carpentry is", "href=\"http://www.datacarpentry.org/lessons/\">Data Carpentry lessons page</a>, or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons", "roles interested in learning data and software skills, Library Carpentry", "WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest", "helpers to support the workshop (1 helper for every 8-10", "If your learners are people working in library and information", "with no contents (displayed as \"Other:\") to a new contents", "= apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\")", "choices=[('invite', 'This event is open to learners by invitation only.'),", "options previously with no contents (displayed as \"Other:\") to a", "Carpentry is likely the best match. If your learners are", "in learning data and software skills, Library Carpentry is the", "Carpentry is likely the best choice. If your learners are", "understand <a href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop host</a>, including recruiting", "('public', 'This event is open to learners outside of my", "= apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\")", "let us know below.', max_length=20, verbose_name='Is this workshop open to", "explain the circumstances for your scholarship request and let us", "previously with no contents (displayed as \"Other:\") to a new", "(\"other\"). The field containing these options is in CommonRequest abstract", "apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\")", "control and automation, Software Carpentry is likely the best match.", "more information about any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum',", "programming, including version control and automation, Software Carpentry is likely", "'0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations = [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities',", "the hosting institution. If your workshop will be open to", "open to learners outside of my institution.'), ('other', 'Other:')], default='',", "to a new contents (\"other\"). The field containing these options", "migrations, models def migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously with no", "outside of your institution please let us know below.', max_length=20,", "registration to learners from the hosting institution. If your workshop", "to learners by invitation only.'), ('closed', 'This event is open", "helper for every 8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If", "or the <a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for more information", "likely the best match. If your learners are people working", "WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\", "is the best choice. Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry", "), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite', 'This event is open", "] operations = [ migrations.AlterField( model_name='workshoprequest', name='host_responsibilities', field=models.BooleanField(default=False, verbose_name='I understand", "are interested in learning more about programming, including version control", "model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are new to programming and", "The Carpentries workshop fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event', field=models.CharField(blank=True, choices=[('invite',", "please let us know below.', max_length=20, verbose_name='Is this workshop open", "WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission =", "outside of my institution.'), ('other', 'Other:')], default='', help_text='Many of our", "model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if you request a scholarship.',", "href=\"https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist\">the responsibilities of the workshop host</a>, including recruiting local helpers", "have towards The Carpentries workshop fees.'), ), migrations.AlterField( model_name='workshoprequest', name='public_event',", "Carpentry lessons page</a> for more information about any of our", "WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class", "field=models.ManyToManyField(help_text='If your learners are new to programming and primarily interested", "of my institution.'), ('public', 'This event is open to learners", ".update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies = [ ('workshops',", ".update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies", "('other', 'Other:')], default='', help_text='Many of our workshops restrict registration to", "If your workshop will be open to registrants outside of", "\\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") class Migration(migrations.Migration): dependencies = [", "default='', help_text='Many of our workshops restrict registration to learners from", "as \"Other:\") to a new contents (\"other\"). The field containing", "learners are interested in learning more about programming, including version", "lessons page</a> for more information about any of our lessons.',", "new to programming and primarily interested in working with data,", "'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\")", "you request a scholarship.', verbose_name='Please explain the circumstances for your", "and let us know what budget you have towards The", "and information related roles interested in learning data and software", "Carpentries workshop are you requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True,", "is in CommonRequest abstract model, implemented in WorkshopRequest, WorkshopInquiryRequest, and", "'This event is open to learners outside of my institution.'),", "= apps.get_model('workshops', 'WorkshopRequest') WorkshopInquiryRequest = apps.get_model('extrequests', 'WorkshopInquiryRequest') SelfOrganizedSubmission = apps.get_model('extrequests',", "working in library and information related roles interested in learning", "django.db import migrations, models def migrate_public_event(apps, schema_editor): \"\"\"Migrate options previously", "choice. Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software Carpentry lessons page</a>, <a", "choice. If your learners are interested in learning more about", "8-10 learners).'), ), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are", "\"Other:\") to a new contents (\"other\"). The field containing these", "True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'), ), migrations.AlterField(", "to learners inside of my institution.'), ('public', 'This event is", "is likely the best choice. If your learners are interested", "<a href=\"https://librarycarpentry.org/lessons/\">Library Carpentry lessons page</a> for more information about any", "data and software skills, Library Carpentry is the best choice.", "event is open to learners by invitation only.'), ('closed', 'This", "software skills, Library Carpentry is the best choice. Please visit", "'This event is open to learners inside of my institution.'),", "\"\"\"Migrate options previously with no contents (displayed as \"Other:\") to", "'This event is open to learners by invitation only.'), ('closed',", "institution.'), ('public', 'This event is open to learners outside of", "by Django 2.1.7 on 2019-08-09 09:36 from django.db import migrations,", "apps.get_model('extrequests', 'SelfOrganizedSubmission') WorkshopRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") WorkshopInquiryRequest.objects.filter(public_event=\"\") \\ .update(public_event=\"other\") SelfOrganizedSubmission.objects.filter(public_event=\"\") \\", "let us know what budget you have towards The Carpentries", "schema_editor): \"\"\"Migrate options previously with no contents (displayed as \"Other:\")", "on 2019-08-09 09:36 from django.db import migrations, models def migrate_public_event(apps,", "is likely the best match. If your learners are people", "learners from the hosting institution. If your workshop will be", "help_text='Required only if you request a scholarship.', verbose_name='Please explain the", "limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'), ),", "more about programming, including version control and automation, Software Carpentry", "= [ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations = [", "model, implemented in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest =", "), migrations.AlterField( model_name='workshoprequest', name='requested_workshop_types', field=models.ManyToManyField(help_text='If your learners are new to", "you requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only if", "[ ('workshops', '0190_auto_20190728_1118'), ('extrequests', '0008_auto_20190809_1004'), ] operations = [ migrations.AlterField(", "Carpentry is the best choice. Please visit the <a href=\"https://software-carpentry.org/lessons/\">Software", "institution please let us know below.', max_length=20, verbose_name='Is this workshop", "in library and information related roles interested in learning data", "best match. If your learners are people working in library", "implemented in WorkshopRequest, WorkshopInquiryRequest, and SelfOrganizedSubmission models.\"\"\" WorkshopRequest = apps.get_model('workshops',", "to programming and primarily interested in working with data, Data", "are you requesting?'), ), migrations.AlterField( model_name='workshoprequest', name='scholarship_circumstances', field=models.TextField(blank=True, help_text='Required only", "know below.', max_length=20, verbose_name='Is this workshop open to the public?')," ]
[ "use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2,", "4, ndf * 8, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 8),", "nn class Discriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) :", "kernel_size=4, stride=2, padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2, True) ) self.conv4", "8), nn.LeakyReLU(0.2, True) ) if use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf", "4, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2, True) )", "use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4,", "<gh_stars>0 import torch import torch.nn as nn class Discriminator(nn.Module): def", "kernel_size=4, stride=2, padding=1), nn.Sigmoid() ) else: self.conv5 = nn.Sequential( nn.Conv2d(ndf", "8, 1, kernel_size=4, stride=2, padding=1) ) def forward(self, x): x", "self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1)", "nn.LeakyReLU(0.2, True) ) self.conv4 = nn.Sequential( nn.Conv2d(ndf * 4, ndf", "torch import torch.nn as nn class Discriminator(nn.Module): def __init__(self, input_nc,", "norm_layer(ndf * 8), nn.LeakyReLU(0.2, True) ) if use_sigmoid: self.conv5 =", "else: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2,", "4), nn.LeakyReLU(0.2, True) ) self.conv4 = nn.Sequential( nn.Conv2d(ndf * 4,", ": super(Discriminator, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2,", "__init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1 =", "8, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2, True) )", "input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1 = nn.Sequential(", "x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x", "def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1", "nn.Sequential( nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ndf *", "= nn.Sequential( nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2,", "x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) return", "x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x", "stride=2, padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True) ) self.conv3 =", "= self.conv3(x) x = self.conv4(x) x = self.conv5(x) return x", "kernel_size=4, stride=2, padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True) ) self.conv3", "self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2,", "self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)", "x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x)", "nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), norm_layer(ndf", "if use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4,", "stride=2, padding=1) ) def forward(self, x): x = self.conv1(x) x", "nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1), nn.Sigmoid() )", "import torch.nn as nn class Discriminator(nn.Module): def __init__(self, input_nc, ndf=64,", ") def forward(self, x): x = self.conv1(x) x = self.conv2(x)", "2, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True) )", "Discriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__()", "* 4, ndf * 8, kernel_size=4, stride=2, padding=1), norm_layer(ndf *", "* 8, 1, kernel_size=4, stride=2, padding=1), nn.Sigmoid() ) else: self.conv5", "= self.conv2(x) x = self.conv3(x) x = self.conv4(x) x =", "norm_layer(ndf * 2), nn.LeakyReLU(0.2, True) ) self.conv3 = nn.Sequential( nn.Conv2d(ndf", "kernel_size=4, stride=2, padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2, True) ) if", "self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x)", "def forward(self, x): x = self.conv1(x) x = self.conv2(x) x", "nn.LeakyReLU(0.2, True) ) self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf * 2,", "padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True) ) self.conv3 = nn.Sequential(", "True) ) self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf * 2, kernel_size=4,", "nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.conv2 =", ") self.conv4 = nn.Sequential( nn.Conv2d(ndf * 4, ndf * 8,", "ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.conv2 = nn.Sequential(", "1, kernel_size=4, stride=2, padding=1), nn.Sigmoid() ) else: self.conv5 = nn.Sequential(", "* 4), nn.LeakyReLU(0.2, True) ) self.conv4 = nn.Sequential( nn.Conv2d(ndf *", "norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf,", "2), nn.LeakyReLU(0.2, True) ) self.conv3 = nn.Sequential( nn.Conv2d(ndf * 2,", "nn.LeakyReLU(0.2, True) ) self.conv3 = nn.Sequential( nn.Conv2d(ndf * 2, ndf", "stride=2, padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2, True) ) if use_sigmoid:", "True) ) self.conv4 = nn.Sequential( nn.Conv2d(ndf * 4, ndf *", "8, 1, kernel_size=4, stride=2, padding=1), nn.Sigmoid() ) else: self.conv5 =", "padding=1), nn.Sigmoid() ) else: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8,", "* 8, 1, kernel_size=4, stride=2, padding=1) ) def forward(self, x):", "stride=2, padding=1), nn.Sigmoid() ) else: self.conv5 = nn.Sequential( nn.Conv2d(ndf *", "ndf * 8, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2,", "nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) ) def", "ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc,", "= nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) )", "kernel_size=4, stride=2, padding=1) ) def forward(self, x): x = self.conv1(x)", "import torch import torch.nn as nn class Discriminator(nn.Module): def __init__(self,", ") if use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1,", "= self.conv1(x) x = self.conv2(x) x = self.conv3(x) x =", ") else: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4,", "self.conv3 = nn.Sequential( nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4,", "padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2, True) ) if use_sigmoid: self.conv5", "kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.conv2 = nn.Sequential( nn.Conv2d(ndf,", "True) ) self.conv3 = nn.Sequential( nn.Conv2d(ndf * 2, ndf *", "padding=1) ) def forward(self, x): x = self.conv1(x) x =", "nn.Sequential( nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1),", "self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1),", "= nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1), nn.Sigmoid()", "class Discriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) : super(Discriminator,", "* 2, ndf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ndf *", "self.conv4 = nn.Sequential( nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4,", "* 2), nn.LeakyReLU(0.2, True) ) self.conv3 = nn.Sequential( nn.Conv2d(ndf *", "2, ndf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 4),", "nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) ) def forward(self,", "* 8, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 8), nn.LeakyReLU(0.2, True)", "as nn class Discriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False)", "super(Discriminator, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1),", "stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf", "nn.Sigmoid() ) else: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1,", "self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = self.conv5(x)", "nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1), nn.Sigmoid() ) else:", "torch.nn as nn class Discriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d,", "padding=1), nn.LeakyReLU(0.2, True) ) self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf *", "= nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1) )", "= nn.Sequential( nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ndf", "nn.Sequential( nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1),", "nn.LeakyReLU(0.2, True) ) if use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf *", "= nn.Sequential( nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2,", "True) ) if use_sigmoid: self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8,", "nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ndf", "padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2, True) ) self.conv4 = nn.Sequential(", "* 2, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True)", "* 4, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2, True)", "norm_layer(ndf * 4), nn.LeakyReLU(0.2, True) ) self.conv4 = nn.Sequential( nn.Conv2d(ndf", "1, kernel_size=4, stride=2, padding=1) ) def forward(self, x): x =", "forward(self, x): x = self.conv1(x) x = self.conv2(x) x =", "nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 2),", ") self.conv3 = nn.Sequential( nn.Conv2d(ndf * 2, ndf * 4,", "ndf * 2, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 2), nn.LeakyReLU(0.2,", "* 8), nn.LeakyReLU(0.2, True) ) if use_sigmoid: self.conv5 = nn.Sequential(", "self.conv5 = nn.Sequential( nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1),", "stride=2, padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2, True) ) self.conv4 =", "ndf * 4, kernel_size=4, stride=2, padding=1), norm_layer(ndf * 4), nn.LeakyReLU(0.2,", "nn.Sequential( nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.conv2", ") self.conv2 = nn.Sequential( nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2," ]
[ "def setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def", "subpopulation slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) # Check that the", "length = LengthSubpopulation(intervals=[(1, 3), (4, 5)]) # Compute scores scores", "3), (4, 5)]) # Compute scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"])", "MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): # Create the", "length(self.testbed.dataset, columns=[\"text\"]) # Check that the slice membership lines up", "Apply the subpopulation slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) # Check", "test_score(self): # Create the length subpopulation length = LengthSubpopulation(intervals=[(1, 3),", "= LengthSubpopulation(intervals=[(1, 3), (4, 5)]) # Compute scores scores =", "numpy as np from robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length import", "columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:],", "robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds import", "from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase):", "5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the subpopulation", "# Apply the subpopulation slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) #", "self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): # Create the length", "the subpopulation slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) # Check that", "# Compute scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5,", "TestCase import numpy as np from robustnessgym.cachedops.spacy import Spacy from", "slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) # Check that the slice membership", "LengthSubpopulation from tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed", "scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5,", "MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset =", "5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the subpopulation slices, slice_matrix", "the slice membership lines up self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] * 6)))", "np from robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from", "import TestCase import numpy as np from robustnessgym.cachedops.spacy import Spacy", "slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"]) # Check that the slice", "# Check that the slice membership lines up self.assertTrue(np.allclose(slice_matrix, np.array([[0,", "= Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): # Create the length subpopulation", "import numpy as np from robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length", "self.testbed = MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): #", "LengthSubpopulation(intervals=[(1, 3), (4, 5)]) # Compute scores scores = length.score(self.testbed.dataset[:],", "that the slice membership lines up self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] *", "5, 5, 5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply", "print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the subpopulation slices, slice_matrix = length(self.testbed.dataset,", "columns=[\"text\"]) def test_score(self): # Create the length subpopulation length =", "import MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset", "= length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5])))", "setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self):", "subpopulation length = LengthSubpopulation(intervals=[(1, 3), (4, 5)]) # Compute scores", "length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5]))) print(self.testbed.dataset.column_names)", "[\"text\"])) # Apply the subpopulation slices, slice_matrix = length(self.testbed.dataset, columns=[\"text\"])", "columns=[\"text\"]) # Check that the slice membership lines up self.assertTrue(np.allclose(slice_matrix,", "import Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds import MockTestBedv0", "from unittest import TestCase import numpy as np from robustnessgym.cachedops.spacy", "from tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed =", "print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the subpopulation slices, slice_matrix =", "import LengthSubpopulation from tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self):", "5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the subpopulation slices,", "5, 5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) # Apply the", "= MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): # Create", "TestLengthSubpopulation(TestCase): def setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset, columns=[\"text\"])", "as np from robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation", "length subpopulation length = LengthSubpopulation(intervals=[(1, 3), (4, 5)]) # Compute", "unittest import TestCase import numpy as np from robustnessgym.cachedops.spacy import", "def test_score(self): # Create the length subpopulation length = LengthSubpopulation(intervals=[(1,", "(4, 5)]) # Compute scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores,", "Check that the slice membership lines up self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]]", "tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed = MockTestBedv0()", "# Create the length subpopulation length = LengthSubpopulation(intervals=[(1, 3), (4,", "Spacy()(self.testbed.dataset, columns=[\"text\"]) def test_score(self): # Create the length subpopulation length", "robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds import MockTestBedv0 class TestLengthSubpopulation(TestCase): def", "Compute scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5,", "scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5,", "class TestLengthSubpopulation(TestCase): def setUp(self): self.testbed = MockTestBedv0() self.testbed.dataset = Spacy()(self.testbed.dataset,", "5)]) # Compute scores scores = length.score(self.testbed.dataset[:], columns=[\"text\"]) self.assertTrue(np.allclose(scores, np.array([5,", "np.array([5, 5, 5, 5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"])) #", "= length(self.testbed.dataset, columns=[\"text\"]) # Check that the slice membership lines", "self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5]))) print(self.testbed.dataset.column_names) print(Spacy.retrieve(self.testbed.dataset[:], [\"text\"]))", "from robustnessgym.cachedops.spacy import Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds", "Create the length subpopulation length = LengthSubpopulation(intervals=[(1, 3), (4, 5)])", "Spacy from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation from tests.testbeds import MockTestBedv0 class", "the length subpopulation length = LengthSubpopulation(intervals=[(1, 3), (4, 5)]) #" ]
[ "!= \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\" and", "\"\" and row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] =", "= {} for row in lines[3:]: row = row.split(\"|\") print(row)", "import Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines = raw_file.readlines()", "no error means it's an upper limit andnow sure how", "fluxs = [] flux_errs = [] # If no error", "row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs,", "[] flux_errs = [] # If no error means it's", "open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines = raw_file.readlines() print(lines) pulsar_dict =", "!= \"\" and row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if", "!= \"\" and row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if", "handle it if row[1].strip() != \"\" and row[2].strip() != \"\":", "raw_file: lines = raw_file.readlines() print(lines) pulsar_dict = {} for row", "and row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() !=", "in lines[3:]: row = row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\", \"-\")", "= [] # If no error means it's an upper", "with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines = raw_file.readlines() print(lines) pulsar_dict", "json from astroquery.vizier import Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file:", "pulsar = row[0].strip().replace(\"−\", \"-\") freqs = [] fluxs = []", "= raw_file.readlines() print(lines) pulsar_dict = {} for row in lines[3:]:", "for row in lines[3:]: row = row.split(\"|\") print(row) pulsar =", "row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\"", "row in lines[3:]: row = row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\",", "row[1].strip() != \"\" and row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip()))", "row[5].strip() != \"\" and row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip()))", "upper limit andnow sure how to handle it if row[1].strip()", "means it's an upper limit andnow sure how to handle", "print(lines) pulsar_dict = {} for row in lines[3:]: row =", "row = row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\", \"-\") freqs =", "if row[5].strip() != \"\" and row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip()))", "= row[0].strip().replace(\"−\", \"-\") freqs = [] fluxs = [] flux_errs", "lines = raw_file.readlines() print(lines) pulsar_dict = {} for row in", "= {\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux Density error mJy\":flux_errs}", "flux_errs = [] # If no error means it's an", "Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines = raw_file.readlines() print(lines)", "sure how to handle it if row[1].strip() != \"\" and", "If no error means it's an upper limit andnow sure", "freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\" and row[4].strip() !=", "pulsar_dict = {} for row in lines[3:]: row = row.split(\"|\")", "to handle it if row[1].strip() != \"\" and row[2].strip() !=", "flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\" and row[4].strip() != \"\": freqs.append(1382)", "= [] fluxs = [] flux_errs = [] # If", "row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\"", "[] fluxs = [] flux_errs = [] # If no", "how to handle it if row[1].strip() != \"\" and row[2].strip()", "MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\",", "it if row[1].strip() != \"\" and row[2].strip() != \"\": freqs.append(728)", "{} for row in lines[3:]: row = row.split(\"|\") print(row) pulsar", "Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\", \"w\") as cat_file: cat_file.write(json.dumps(pulsar_dict)) print(pulsar_dict)", "!= \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\" and", "freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs,", "\"\" and row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip()", "import json from astroquery.vizier import Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as", "\"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\" and row[6].strip()", "row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\", \"-\") freqs = [] fluxs", "[] # If no error means it's an upper limit", "limit andnow sure how to handle it if row[1].strip() !=", "and row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency", "fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux", "and row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() !=", "flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\" and row[6].strip() != \"\": freqs.append(3100)", "raw_file.readlines() print(lines) pulsar_dict = {} for row in lines[3:]: row", "# If no error means it's an upper limit andnow", "\"Flux Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\", \"w\") as cat_file: cat_file.write(json.dumps(pulsar_dict))", "astroquery.vizier import Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines =", "error means it's an upper limit andnow sure how to", "\"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\" and row[4].strip()", "fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\" and row[6].strip() != \"\":", "\"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux Density", "andnow sure how to handle it if row[1].strip() != \"\"", "!= \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux", "\"-\") freqs = [] fluxs = [] flux_errs = []", "row[0].strip().replace(\"−\", \"-\") freqs = [] fluxs = [] flux_errs =", "flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux Density", "print(row) pulsar = row[0].strip().replace(\"−\", \"-\") freqs = [] fluxs =", "as raw_file: lines = raw_file.readlines() print(lines) pulsar_dict = {} for", "= [] flux_errs = [] # If no error means", "freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip())) if row[5].strip() != \"\" and row[6].strip() !=", "lines[3:]: row = row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\", \"-\") freqs", "an upper limit andnow sure how to handle it if", "if row[1].strip() != \"\" and row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip()))", "pulsar_dict[pulsar] = {\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux Density error", "\"r\") as raw_file: lines = raw_file.readlines() print(lines) pulsar_dict = {}", "fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip() != \"\" and row[4].strip() != \"\":", "from astroquery.vizier import Vizier with open(\"Jankowski_2018_raw.txt\", \"r\") as raw_file: lines", "freqs = [] fluxs = [] flux_errs = [] #", "= row.split(\"|\") print(row) pulsar = row[0].strip().replace(\"−\", \"-\") freqs = []", "\"\" and row[2].strip() != \"\": freqs.append(728) fluxs.append(float(row[1].strip())) flux_errs.append(float(row[2].strip())) if row[3].strip()", "mJy\":fluxs, \"Flux Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\", \"w\") as cat_file:", "!= \"\" and row[6].strip() != \"\": freqs.append(3100) fluxs.append(float(row[5].strip())) flux_errs.append(float(row[6].strip())) pulsar_dict[pulsar]", "if row[3].strip() != \"\" and row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip()))", "row[3].strip() != \"\" and row[4].strip() != \"\": freqs.append(1382) fluxs.append(float(row[3].strip())) flux_errs.append(float(row[4].strip()))", "Density mJy\":fluxs, \"Flux Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\", \"w\") as", "it's an upper limit andnow sure how to handle it", "\"Flux Density mJy\":fluxs, \"Flux Density error mJy\":flux_errs} with open(\"Jankowski_2018.yaml\", \"w\")", "{\"Frequency MHz\":freqs, \"Flux Density mJy\":fluxs, \"Flux Density error mJy\":flux_errs} with" ]
[ "log_file_paths: for file in log_file_paths: absolute_file_path = Path(cm.workspace + \"/\"", "global variable called pom_file_paths in the # configure_product method and", "cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\" +", "2.0 (the \"License\"); # you may not use this file", "script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if datasource_path", "Please verify the content of the property file \" \"and", "importing required modules import sys from xml.etree import ElementTree as", "build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\" logger.info('Building the source skipping tests')", "= ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS) plugins", "\"/\" + INTEGRATION_PATH) logger.info('Building integration module. Build path: '+ str(intg_module_path)", "make samples and required artifacts to be available. build_source_without_tests(cm.workspace +", "= Path(WSO2SERVER) script_path = Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path)", "cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user", "all products def save_log_files(): log_storage = Path(cm.workspace + \"/\" +", "DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names", "plugin.find('d:artifactId', NS) if artifact_id is not None and artifact_id.text ==", "+ \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif ORACLE_DB_ENGINE", "# and import it to run-intg-test.py. Thereafter assign it to", "cm.product_id + \"/\" + pom) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path)", "os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"]", "onerror=cm.on_rm_error) return database_names except FileNotFoundError as e: logger.error(\"Error occurred while", "cm.product_id + \"/\" + INTEGRATION_PATH) logger.info('Building integration module. Build path:", "identity_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\"", "= \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key == 'shared_db': shared_db_config = database_config['shared_db']", "paths in const_<prod>.py as a constant # and import it", "# if cm.use_custom_testng_file == \"TRUE\": # testng_source = Path(cm.workspace +", "== \"SNAPSHOT\": # product name retrieve from product pom files", "License for the specific language governing permissions and # limitations", "e: logger.error(\"Error occurred while accessing files\", exc_info=True) except Exception as", "Reserved. # # Licensed under the Apache License, Version 2.0", "artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins', NS) for", "TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = []", "\"/\" + cm.product_id + \"/\" + 'modules/integration/tests-common') logger.info('Building common module.", "Path(storage_dir_abs_path / dist_name) storage_zip_abs_path = Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc =", "\\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif ORACLE_DB_ENGINE ==", "+ \"/\" + cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source", "artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties',", "\"/\" + cm.product_id + \"/\" + pom) if sys.platform.startswith('win'): file_path", "\"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config)) # Since", "cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url'] + \"/\" + identity_db +", "POM_FILE_PATHS from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\", "product configuring\") cm.setup_databases(db_names, db_meta_data) # run integration tests # Buld", "+ \"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace + \"/\"", "'w') as writer: writer.write(toml.dumps(deployment_toml_config)) # Since we have added a", "shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher = DB_META_DATA return switcher.get(argument,", "testng_source = Path(cm.workspace + \"/\" + \"testng.xml\") # testng_destination =", "cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve the method in generic way", "key in database_config: if key == 'identity_db': identity_db_config = database_config['identity_db']", "can define pom file paths in const_<prod>.py as a constant", "= <PASSWORD> identity_db_driver = None shared_db_url = None shared_db_username =", "# importing required modules import sys from xml.etree import ElementTree", "files dist_name = cm.get_dist_name(pom_path) # build the product without test", "shared_url = cm.database_config[ 'url'] + \"/\" + shared_db + \\", "artifact_tree = ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS)", "import it to run-intg-test.py. Thereafter assign it to global variable", "cm.replace_file(testng_source, testng_destination) # # replace testng server mgt source #", "\\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace + \"/\" + cm.product_id +", "ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS) plugins =", "elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID", "Hence in the current implementation this method is not using.", "without test once to make samples and required artifacts to", "= ARTIFACT_REPORTS_PATHS for key, value in report_file_paths.items(): for file in", "'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is completed. Module:", "+ LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS", "module_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "OF ANY KIND, either express or implied. # See the", "See the License for the specific language governing permissions and", "in generic way to support all products def save_test_output(): report_folder", "global datasource_path global target_dir_abs_path global storage_dist_abs_path global pom_file_paths datasource_path =", "git branch and checkout to the latest released tag it", "to in writing, software # distributed under the License is", "is completed. Module: ' + str(source_path)) def main(): try: global", "accessing files\", exc_info=True) except Exception as e: logger.error(\"Error occurred while", "TESTNG_SERVER_MGT_DIST) # # replace testng source # cm.replace_file(testng_source, testng_destination) #", "run integration tests # Buld Common module add_environmental_variables() module_path =", "plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties', NS) for neighbor in system_properties.iter('{'", "or agreed to in writing, software # distributed under the", "file paths in const_<prod>.py as a constant # and import", "storage_dist_abs_path global pom_file_paths datasource_path = DATASOURCE_PATHS zip_name = dist_name +", "is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration',", "pom_file_paths in the # configure_product method and call the modify_pom_files", "os from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS,", "None shared_db_username = None shared_db_password = <PASSWORD> shared_db_driver = None", "os import shutil import pymysql import sqlparse import re from", "the product-source. \"\"\" logger.info('Building the source skipping tests') if sys.platform.startswith('win'):", "NS) for neighbor in system_properties.iter('{' + NS['d'] + '}' +", "else: logger.error(\"File doesn't contain in the given location: \" +", "VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME,", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "configured_dist_storing_loc = Path(target_dir_abs_path / dist_name) script_name = Path(WSO2SERVER) script_path =", "+ \"testng.xml\") # testng_destination = Path(cm.workspace + \"/\" + cm.product_id", "database_config: if key == 'identity_db': identity_db_config = database_config['identity_db'] identity_db_config ['url']", "replace testng source # cm.replace_file(testng_source, testng_destination) # # replace testng", "= configure_product() if db_names is None or not db_names: raise", "+ str(source_path)) def main(): try: global logger global dist_name logger", "not use this file except in compliance with the License.", "= Path(cm.workspace + \"/\" + \"testng.xml\") # testng_destination = Path(cm.workspace", "return database_names except FileNotFoundError as e: logger.error(\"Error occurred while finding", "if key == 'database': database_config = deployment_toml_config[key] for key in", "\"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace + \"/\" +", "as urllib2 from xml.dom import minidom import intg_test_manager as cm", "cm.modify_distribution_name(neighbor) for prop in system_properties: name = prop.find('d:name', NS) if", "2018, WSO2 Inc. (http://wso2.com) All Rights Reserved. # # Licensed", "database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config", "you may not use this file except in compliance with", "\"/\" + 'modules/integration/tests-common') logger.info('Building common module. Build path: '+ str(module_path)", "add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url'] +", "# # replace testng server mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination)", "\"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher = DB_META_DATA return", "from xml.dom import minidom import intg_test_manager as cm from subprocess", "testng_server_mgt_destination = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "= cm.get_dist_name(pom_path) # build the product without test once to", "+ TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\")", "= cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" + str(file_path)) deployment_toml_config = toml.load(file_path)", "'+ str(module_path) + ' \\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace +", "deployment_toml_config: if key == 'database': database_config = deployment_toml_config[key] for key", "\"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace + \"/\" +", "subprocess import wget import logging import inspect import os import", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "\"TRUE\": # testng_source = Path(cm.workspace + \"/\" + \"testng.xml\") #", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + pom) if", "= cm.database_config['url'] + \"/\" + identity_db user = cm.database_config['user'] password", "the database configurations cm.construct_db_config(db_meta_data) # clone the repository cm.clone_repo() if", "once to make samples and required artifacts to be available.", "have mandatory key-value pair. Please verify the content of the", "= DATASOURCE_PATHS zip_name = dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace", "data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve the method in", "log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths: for file in log_file_paths: absolute_file_path", "+ \"/\" + cm.product_id + \"/\" + file) if Path.exists(absolute_file_path):", "database configurations cm.construct_db_config(db_meta_data) # clone the repository cm.clone_repo() if cm.test_mode", "return switcher.get(argument, False) def add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url", "(c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved. # #", "TEST_OUTPUT_DIR_NAME + \"/\" + key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\")", "in the given location: \" + str(absolute_file_path)) #TODO: Improve the", "== SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties', NS)", "# clone the repository cm.clone_repo() if cm.test_mode == \"RELEASE\": cm.checkout_to_tag()", "method to clone a given git branch and checkout to", "def add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url']", "+ \"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url =", "while configuring the product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the product-source.", "logger.info('Building integration module. Build path: '+ str(intg_module_path) + ' \\n')", "pom_file_paths datasource_path = DATASOURCE_PATHS zip_name = dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path", "<PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w') as", "set_custom_testng(): # if cm.use_custom_testng_file == \"TRUE\": # testng_source = Path(cm.workspace", "+ DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path / dist_name) storage_zip_abs_path = Path(storage_dir_abs_path", "key in deployment_toml_config: if key == 'database': database_config = deployment_toml_config[key]", "+ file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't contain", "from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME,", "intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME,", "to support all products def save_log_files(): log_storage = Path(cm.workspace +", "\"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\" identity_db_config", "os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"]", "logger.error(\"Error occurred while configuring the product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build", "storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if datasource_path is not", "import re from pathlib import Path import urllib.request as urllib2", "shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path)", "import ElementTree as ET import toml import subprocess import wget", "branch and checkout to the latest released tag it is", "is not None and name.text == CARBON_NAME: for data in", "FileNotFoundError as e: logger.error(\"Error occurred while finding files\", exc_info=True) except", "\"To run run-intg-test.py script you must have Python 3.6 or", "cm.database_config['url'] + \";\" + \"databaseName=\" + identity_db shared_url = cm.database_config['url']", "/ zip_name) configured_dist_storing_loc = Path(target_dir_abs_path / dist_name) script_name = Path(WSO2SERVER)", "+ TESTNG_SERVER_MGT_DIST) # # replace testng source # cm.replace_file(testng_source, testng_destination)", "cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if datasource_path is", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] =", "global target_dir_abs_path global storage_dist_abs_path global pom_file_paths datasource_path = DATASOURCE_PATHS zip_name", "def get_db_meta_data(argument): switcher = DB_META_DATA return switcher.get(argument, False) def add_environmental_variables():", "dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME)", "= cm.modify_distribution_name(neighbor) for prop in system_properties: name = prop.find('d:name', NS)", "add_environmental_variables() module_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "Path.exists(absolute_file_path): report_storage = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME + \"/\"", "repository cm.clone_repo() if cm.test_mode == \"RELEASE\": cm.checkout_to_tag() # product name", "logger.info('Building the source skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install',", "except IOError as e: logger.error(\"Error occurred while accessing files\", exc_info=True)", "the source skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B',", "file except in compliance with the License. # You may", "configurations cm.construct_db_config(db_meta_data) # clone the repository cm.clone_repo() if cm.test_mode ==", "= cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info < (3, 6): raise Exception(", "Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS))", "not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS)", "prop.find('d:name', NS) if name is not None and name.text ==", "sql_driver_location = None identity_db_url = None identity_db_username = None identity_db_password", "BaseException as e: logger.error(\"Error occurred while doing the configuration\", exc_info=True)", "save_test_output() cm.create_output_property_fle() except Exception as e: logger.error(\"Error occurred while running", "you must have Python 3.6 or latest. Current version info:", "engine = cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH #", "system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor)", "if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" + str(file_path))", "logger.info(\"Datasource paths are not defined in the config file\") os.remove(str(storage_zip_abs_path))", "plugins = data_sources.find('d:plugins', NS) for plugin in plugins.findall('d:plugin', NS): artifact_id", "it is not required to # modify pom files. Hence", "import sys from xml.etree import ElementTree as ET import toml", "occurred while accessing files\", exc_info=True) except Exception as e: logger.error(\"Error", "products def save_log_files(): log_storage = Path(cm.workspace + \"/\" + LOG_STORAGE)", "the product without test once to make samples and required", "modules import sys from xml.etree import ElementTree as ET import", "logger.info(deployment_toml_config) for key in deployment_toml_config: if key == 'database': database_config", "= \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\"", "Exception( \"Property file doesn't have mandatory key-value pair. Please verify", "for file in value: absolute_file_path = Path(cm.workspace + \"/\" +", "way to support all products def save_log_files(): log_storage = Path(cm.workspace", "checkout to the latest released tag it is not required", "cm.database_config['url'] + \";\" + \"databaseName=\" + shared_db user = cm.database_config['user']", "= Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc = Path(target_dir_abs_path / dist_name) script_name", "KIND, either express or implied. # See the License for", "DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID user = cm.database_config['user']", "constant # and import it to run-intg-test.py. Thereafter assign it", "+ cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST) # # replace testng", "from xml.etree import ElementTree as ET import toml import subprocess", "== 'shared_db': shared_db_config = database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config", "Python 3.6 or latest. Current version info: \" + sys.version_info)", "if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url'] + \"/\"", "exc_info=True) except IOError as e: logger.error(\"Error occurred while accessing files\",", "<PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key == 'shared_db':", "'-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install', '-B', '-e',", "(the \"License\"); # you may not use this file except", "this method you can define pom file paths in const_<prod>.py", "paths are not defined in the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc,", "pair. Please verify the content of the property file \"", "import sqlparse import re from pathlib import Path import urllib.request", "# # Unless required by applicable law or agreed to", "method in generic way to support all products def save_log_files():", "name is not None and name.text == CARBON_NAME: for data", "method in generic way to support all products def save_test_output():", "Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for key, value", "\"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper():", "str(module_path) + ' \\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace + \"/\"", "implied. # See the License for the specific language governing", "\"/\" + DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path / dist_name) storage_zip_abs_path =", "user = cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url']", "'shared_db': shared_db_config = database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username']", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + file) if", "info: \" + sys.version_info) cm.read_property_files() if not cm.validate_property_readings(): raise Exception(", "global pom_file_paths datasource_path = DATASOURCE_PATHS zip_name = dist_name + ZIP_FILE_EXTENSION", "if not cm.validate_property_readings(): raise Exception( \"Property file doesn't have mandatory", "'}' + CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for prop in system_properties:", "== \"RELEASE\": cm.checkout_to_tag() # product name retrieve from product pom", "cm.test_mode == \"SNAPSHOT\": # product name retrieve from product pom", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST) #", "in plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId', NS) if artifact_id is", "finding files\", exc_info=True) except IOError as e: logger.error(\"Error occurred while", "<PASSWORD> shared_db_driver = None identity_db = \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\"", "cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH # construct the", "Unless required by applicable law or agreed to in writing,", "the given location: \" + str(absolute_file_path)) #TODO: Improve the method", "+ \"databaseName=\" + identity_db shared_url = cm.database_config['url'] + \";\" +", "the specific language governing permissions and # limitations under the", "LIB_PATH)) if datasource_path is not None: modify_datasources() else: logger.info(\"Datasource paths", "cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except FileNotFoundError", "ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME,", "product without test once to make samples and required artifacts", "# cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try: global datasource_path global target_dir_abs_path", "configure_product() if db_names is None or not db_names: raise Exception(\"Failed", "6): raise Exception( \"To run run-intg-test.py script you must have", "(3, 6): raise Exception( \"To run run-intg-test.py script you must", "the property file \" \"and the format\") # get properties", "identity_url = cm.database_config['url'] + \";\" + \"databaseName=\" + identity_db shared_url", "import shutil import pymysql import sqlparse import re from pathlib", "if db_names is None or not db_names: raise Exception(\"Failed the", "+ 'modules/integration/tests-common') logger.info('Building common module. Build path: '+ str(module_path) +", "shared_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID user = cm.database_config['user'] elif", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS) #", "logger.error(\"File doesn't contain in the given location: \" + str(absolute_file_path))", "= configuration.find('d:systemProperties', NS) for neighbor in system_properties.iter('{' + NS['d'] +", "clone a given git branch and checkout to the latest", "\"/\" + key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\") else: logger.error(\"File", "\"/\" + DEFAULT_ORACLE_SID user = cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper():", "to be available. build_source_without_tests(cm.workspace + \"/\" + cm.product_id + \"/\")", "raise Exception( \"Property file doesn't have mandatory key-value pair. Please", "clone the repository cm.clone_repo() if cm.test_mode == \"RELEASE\": cm.checkout_to_tag() #", "mandatory key-value pair. Please verify the content of the property", "= prop.find('d:name', NS) if name is not None and name.text", "+ \"/\" + cm.product_id + \"/\") cm.get_latest_released_dist() elif cm.test_mode ==", "key, value in report_file_paths.items(): for file in value: absolute_file_path =", "# testng_source = Path(cm.workspace + \"/\" + \"testng.xml\") # testng_destination", "shared_db_config = database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] =", "configure_product method and call the modify_pom_files method. def modify_pom_files(): for", "def configure_product(): try: global datasource_path global target_dir_abs_path global storage_dist_abs_path global", "while doing the configuration\", exc_info=True) if __name__ == \"__main__\": main()", "global logger global dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info", "not cm.validate_property_readings(): raise Exception( \"Property file doesn't have mandatory key-value", "cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except FileNotFoundError as e:", "+ \"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace + \"/\"", "' \\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace + \"/\" + cm.product_id", "Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths: for file in", "(http://wso2.com) All Rights Reserved. # # Licensed under the Apache", "= Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME + \"/\" + key)", "\\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url'] + \"/\" + shared_db", "for integration test\") def modify_datasources(): file_path = Path(storage_dist_abs_path / datasource_path)", "\"databaseName=\" + identity_db shared_url = cm.database_config['url'] + \";\" + \"databaseName=\"", "+ shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user']", "shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with", "log_storage) else: logger.error(\"File doesn't contain in the given location: \"", "import logging import inspect import os import shutil import pymysql", "# populate databases db_names = configure_product() if db_names is None", "database_names.append(shared_db) with open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config)) # Since we", "= None sql_driver_location = None identity_db_url = None identity_db_username =", "import intg_test_manager as cm from subprocess import Popen, PIPE import", "\"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config", "datasource_path is not None: modify_datasources() else: logger.info(\"Datasource paths are not", "in const_<prod>.py as a constant # and import it to", "= cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH # construct", "configuring the product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\"", "def modify_datasources(): file_path = Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'): file_path", "pom file paths in const_<prod>.py as a constant # and", "logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for key in deployment_toml_config: if key", "= \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config)) #", "You may obtain a copy of the License at #", "= \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher = DB_META_DATA", "user = cm.database_config['user'] else: shared_url = cm.database_config['url'] + \"/\" +", "os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"]", "'-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is completed. Module: ' + str(source_path))", "MSSQL_DB_ENGINE database_names = [] db_engine = None sql_driver_location = None", "+ str(file_path)) ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path) artifarct_root = artifact_tree.getroot()", "import inspect import os import shutil import pymysql import sqlparse", "\"/\" + \"testng.xml\") # testng_destination = Path(cm.workspace + \"/\" +", "= <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w')", "e: logger.error(\"Error occurred while finding files\", exc_info=True) except IOError as", "generic way to support all products # def set_custom_testng(): #", "if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for key,", "database_names.append(identity_db) if key == 'shared_db': shared_db_config = database_config['shared_db'] shared_db_config ['url']", "global storage_dist_abs_path global pom_file_paths datasource_path = DATASOURCE_PATHS zip_name = dist_name", "str(source_path)) def main(): try: global logger global dist_name logger =", "integration test\") def modify_datasources(): file_path = Path(storage_dist_abs_path / datasource_path) if", "ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = [] db_engine = None sql_driver_location", "integration module. Build path: '+ str(intg_module_path) + ' \\n') cm.build_module(intg_module_path)", "TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\") #", "artifarct_root = artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins',", "cm.setup_databases(db_names, db_meta_data) # run integration tests # Buld Common module", "and name.text == CARBON_NAME: for data in prop: if data.tag", "urllib2 from xml.dom import minidom import intg_test_manager as cm from", "sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom file: \" + str(file_path))", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "+ \"/\" + TESTNG_SERVER_MGT_DIST) # # replace testng source #", "db_names is None or not db_names: raise Exception(\"Failed the product", "logging.DEBUG) if sys.version_info < (3, 6): raise Exception( \"To run", "# Buld Common module add_environmental_variables() module_path = Path(cm.workspace + \"/\"", "in the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc,", "import pymysql import sqlparse import re from pathlib import Path", "occurred while doing the configuration\", exc_info=True) if __name__ == \"__main__\":", "script\", exc_info=True) except BaseException as e: logger.error(\"Error occurred while doing", "for pom in POM_FILE_PATHS: file_path = Path(cm.workspace + \"/\" +", "None identity_db_password = <PASSWORD> identity_db_driver = None shared_db_url = None", "file) if Path.exists(absolute_file_path): report_storage = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME", "os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"]", "+ \"/\" + 'modules/integration/tests-common') logger.info('Building common module. Build path: '+", "Path(cm.workspace + \"/\" + LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True)", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "to run-intg-test.py. Thereafter assign it to global variable called pom_file_paths", "toml import subprocess import wget import logging import inspect import", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS)", "License. # You may obtain a copy of the License", "version info: \" + sys.version_info) cm.read_property_files() if not cm.validate_property_readings(): raise", "\" + sys.version_info) cm.read_property_files() if not cm.validate_property_readings(): raise Exception( \"Property", "'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install',", "subprocess import Popen, PIPE import os from prod_test_constant import DB_META_DATA,", "'identity_db': identity_db_config = database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username']", "populate databases db_names = configure_product() if db_names is None or", "Common module add_environmental_variables() module_path = Path(cm.workspace + \"/\" + cm.product_id", "retrieve from product pom files dist_name = cm.get_dist_name(pom_path) # build", "db_meta_data) # run integration tests # Buld Common module add_environmental_variables()", "CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE,", "required artifacts to be available. build_source_without_tests(cm.workspace + \"/\" + cm.product_id", "\"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url'] + \"/\" + shared_db +", "shared_db_url = None shared_db_username = None shared_db_password = <PASSWORD> shared_db_driver", "/ dist_name) script_name = Path(WSO2SERVER) script_path = Path(storage_dist_abs_path / script_name)", "= user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] =", "def save_log_files(): log_storage = Path(cm.workspace + \"/\" + LOG_STORAGE) if", "+ key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\") else: logger.error(\"File doesn't", "for file in log_file_paths: absolute_file_path = Path(cm.workspace + \"/\" +", "key == 'identity_db': identity_db_config = database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\"", "NS) if artifact_id is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID:", "file: \" + str(file_path)) ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path) artifarct_root", "not None and name.text == CARBON_NAME: for data in prop:", "= plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties', NS) for neighbor in", "+ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url'] + \"/\"", "pom_path = DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path", "Exception as e: logger.error(\"Error occurred while running the run-intg-test.py script\",", "+ cm.product_id + \"/\" + INTEGRATION_PATH) logger.info('Building integration module. Build", "'modules/integration/tests-common') logger.info('Building common module. Build path: '+ str(module_path) + '", "value in report_file_paths.items(): for file in value: absolute_file_path = Path(cm.workspace", "= None identity_db_url = None identity_db_username = None identity_db_password =", "doesn't have mandatory key-value pair. Please verify the content of", "None identity_db_url = None identity_db_username = None identity_db_password = <PASSWORD>", "# product name retrieve from product pom files dist_name =", "cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "limitations under the License. # importing required modules import sys", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "required to # modify pom files. Hence in the current", "configuration = plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties', NS) for neighbor", "cm.database_config[ 'url'] + \"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\"", "if cm.use_custom_testng_file == \"TRUE\": # testng_source = Path(cm.workspace + \"/\"", "language governing permissions and # limitations under the License. #", "product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\" logger.info('Building the", "if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else:", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "= cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] =", "execute this method you can define pom file paths in", "cm.database_config['user'] else: shared_url = cm.database_config['url'] + \"/\" + shared_db identity_url", "IOError as e: logger.error(\"Error occurred while accessing files\", exc_info=True) except", "DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = [] db_engine", "configuring\") cm.setup_databases(db_names, db_meta_data) # run integration tests # Buld Common", "as e: logger.error(\"Error occurred while running the run-intg-test.py script\", exc_info=True)", "of the property file \" \"and the format\") # get", "while finding files\", exc_info=True) except IOError as e: logger.error(\"Error occurred", "elif cm.test_mode == \"SNAPSHOT\": # product name retrieve from product", "= deployment_toml_config[key] for key in database_config: if key == 'identity_db':", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "def modify_pom_files(): for pom in POM_FILE_PATHS: file_path = Path(cm.workspace +", "source # cm.replace_file(testng_source, testng_destination) # # replace testng server mgt", "neighbor in system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME): neighbor.text", "logger.error(\"Error occurred while running the run-intg-test.py script\", exc_info=True) except BaseException", "verify the content of the property file \" \"and the", "# cm.replace_file(testng_source, testng_destination) # # replace testng server mgt source", "+ identity_db shared_url = cm.database_config['url'] + \";\" + \"databaseName=\" +", "== cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] + \";\" + \"databaseName=\" +", "to global variable called pom_file_paths in the # configure_product method", "distribution_path = DISTRIBUTION_PATH # construct the database configurations cm.construct_db_config(db_meta_data) #", "+ TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths =", "= Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder)", "testng_destination = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "+ \"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID", "main(): try: global logger global dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG)", "test once to make samples and required artifacts to be", "file in value: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id", "= plugin.find('d:artifactId', NS) if artifact_id is not None and artifact_id.text", "file_path = Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path)", "in system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME): neighbor.text =", "you can define pom file paths in const_<prod>.py as a", "artifact_id is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration =", "as e: logger.error(\"Error occurred while doing the configuration\", exc_info=True) if", "+ sys.version_info) cm.read_property_files() if not cm.validate_property_readings(): raise Exception( \"Property file", "import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH,", "= cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\"", "subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean',", "identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url'] +", "MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = [] db_engine =", "the content of the property file \" \"and the format\")", "import Popen, PIPE import os from prod_test_constant import DB_META_DATA, DIST_POM_PATH,", "called pom_file_paths in the # configure_product method and call the", "DEFAULT_ORACLE_SID user = cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url =", "+ \"/\" + file) if Path.exists(absolute_file_path): report_storage = Path(cm.workspace +", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "file in log_file_paths: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id", "DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS,", "zip_name) configured_dist_storing_loc = Path(target_dir_abs_path / dist_name) script_name = Path(WSO2SERVER) script_path", "cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info < (3, 6): raise Exception( \"To", "import Path import urllib.request as urllib2 from xml.dom import minidom", "tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path)", "Buld Common module add_environmental_variables() module_path = Path(cm.workspace + \"/\" +", "law or agreed to in writing, software # distributed under", "identity_db_driver = None shared_db_url = None shared_db_username = None shared_db_password", "prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER,", "\"/\" + cm.product_id + \"/\" + INTEGRATION_PATH) logger.info('Building integration module.", "cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" + str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading", "DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import NS,", "= cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\": dist_name = cm.get_dist_name_wum()", "\"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key == 'shared_db': shared_db_config = database_config['shared_db'] shared_db_config", "may obtain a copy of the License at # #", "the License. # importing required modules import sys from xml.etree", "assign it to global variable called pom_file_paths in the #", "testng source # cm.replace_file(testng_source, testng_destination) # # replace testng server", "may not use this file except in compliance with the", "= None shared_db_password = <PASSWORD> shared_db_driver = None identity_db =", "db_engine = None sql_driver_location = None identity_db_url = None identity_db_username", "this file except in compliance with the License. # You", "# limitations under the License. # importing required modules import", "as a constant # and import it to run-intg-test.py. Thereafter", "cm.build_module(module_path) intg_module_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "to local variables pom_path = DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data", "+ cm.product_id + \"/\" + pom) if sys.platform.startswith('win'): file_path =", "the method in generic way to support all products #", "is not required to # modify pom files. Hence in", "CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for prop in system_properties: name =", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "\";\" + \"databaseName=\" + identity_db shared_url = cm.database_config['url'] + \";\"", "products # def set_custom_testng(): # if cm.use_custom_testng_file == \"TRUE\": #", "pom file: \" + str(file_path)) ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path)", "dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info < (3, 6):", "= None shared_db_url = None shared_db_username = None shared_db_password =", "testng_destination) # # replace testng server mgt source # cm.replace_file(testng_server_mgt_source,", "= DISTRIBUTION_PATH # construct the database configurations cm.construct_db_config(db_meta_data) # clone", "\";\" + \"databaseName=\" + shared_db user = cm.database_config['user'] else: shared_url", "generic way to support all products def save_test_output(): report_folder =", "absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "variable called pom_file_paths in the # configure_product method and call", "is None or not db_names: raise Exception(\"Failed the product configuring\")", "'database': database_config = deployment_toml_config[key] for key in database_config: if key", "properties assigned to local variables pom_path = DIST_POM_PATH engine =", "= Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying", "order to execute this method you can define pom file", "\"Property file doesn't have mandatory key-value pair. Please verify the", "+ identity_db user = cm.database_config['user'] password = cm.database_config['password'] driver_class_name =", "+ PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace + \"/\" + cm.product_id +", "\\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE,", "environmental variables for integration test\") def modify_datasources(): file_path = Path(storage_dist_abs_path", "or implied. # See the License for the specific language", "datasource_path global target_dir_abs_path global storage_dist_abs_path global pom_file_paths datasource_path = DATASOURCE_PATHS", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST)", "cm.database_config[ 'url'] + \"/\" + shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\", "= user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental", "DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH", "source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try: global datasource_path global", "cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] + \";\"", "/ datasource_path) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource: \"", "in report_file_paths.items(): for file in value: absolute_file_path = Path(cm.workspace +", "as e: logger.error(\"Error occurred while accessing files\", exc_info=True) except Exception", "mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try: global datasource_path", "inspect import os import shutil import pymysql import sqlparse import", "MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url'] + \"/\" +", "a method to clone a given git branch and checkout", "VALUE_TAG: data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve the method", "driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password", "WSO2 Inc. (http://wso2.com) All Rights Reserved. # # Licensed under", "plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId', NS) if artifact_id is not", "re from pathlib import Path import urllib.request as urllib2 from", "save_log_files(): log_storage = Path(cm.workspace + \"/\" + LOG_STORAGE) if not", "cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\") else: logger.error(\"File doesn't contain in", "from pathlib import Path import urllib.request as urllib2 from xml.dom", "user = cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] +", "= database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\"", "= cm.database_config[ 'url'] + \"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\", "file \" \"and the format\") # get properties assigned to", "tag it is not required to # modify pom files.", "+ DEFAULT_ORACLE_SID user = cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url", "driver_class_name logger.info(\"Added environmental variables for integration test\") def modify_datasources(): file_path", "cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try: global datasource_path global target_dir_abs_path global", "shared_url = cm.database_config['url'] + \";\" + \"databaseName=\" + shared_db user", "shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver']", "None: modify_datasources() else: logger.info(\"Datasource paths are not defined in the", "None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS) system_properties", "= None identity_db_password = <PASSWORD> identity_db_driver = None shared_db_url =", "cm.construct_db_config(db_meta_data) # clone the repository cm.clone_repo() if cm.test_mode == \"RELEASE\":", "logger.info('Module build is completed. Module: ' + str(source_path)) def main():", "== CARBON_NAME: for data in prop: if data.tag == VALUE_TAG:", "POM_FILE_PATHS: file_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths: for", "module. Build path: '+ str(intg_module_path) + ' \\n') cm.build_module(intg_module_path) save_test_output()", "logger.info(\"Added environmental variables for integration test\") def modify_datasources(): file_path =", "switcher = DB_META_DATA return switcher.get(argument, False) def add_environmental_variables(): if MYSQL_DB_ENGINE", "and # limitations under the License. # importing required modules", "cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else:", "ARTIFACT_REPORTS_PATHS if log_file_paths: for file in log_file_paths: absolute_file_path = Path(cm.workspace", "integration tests # Buld Common module add_environmental_variables() module_path = Path(cm.workspace", "log_file_paths: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "+ \"/\" + LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths", "build the product without test once to make samples and", "in generic way to support all products # def set_custom_testng():", "logger.info(\"Report successfully copied\") else: logger.error(\"File doesn't contain in the given", "logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for", "elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] + \";\" +", "NS['d'] + '}' + CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for prop", "/ dist_name) storage_zip_abs_path = Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc = Path(target_dir_abs_path", "+ \";\" + \"databaseName=\" + identity_db shared_url = cm.database_config['url'] +", "identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name", "cm.clone_repo() if cm.test_mode == \"RELEASE\": cm.checkout_to_tag() # product name retrieve", "in writing, software # distributed under the License is distributed", "= cm.database_config['url'] + \";\" + \"databaseName=\" + shared_db user =", "all products def save_test_output(): report_folder = Path(cm.workspace + \"/\" +", "switcher.get(argument, False) def add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url =", "MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] + \";\" + \"databaseName=\"", "DB_META_DATA return switcher.get(argument, False) def add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper():", "identity_db_url = None identity_db_username = None identity_db_password = <PASSWORD> identity_db_driver", "' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception as e: logger.error(\"Error", "= dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace + \"/\" +", "NS) for plugin in plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId', NS)", "identity_db_username = None identity_db_password = <PASSWORD> identity_db_driver = None shared_db_url", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "it to run-intg-test.py. Thereafter assign it to global variable called", "License, Version 2.0 (the \"License\"); # you may not use", "pom files. Hence in the current implementation this method is", "+ file) if Path.exists(absolute_file_path): report_storage = Path(cm.workspace + \"/\" +", "# build the product without test once to make samples", "test\") def modify_datasources(): file_path = Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'):", "identity_db = \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher =", "files\", exc_info=True) except IOError as e: logger.error(\"Error occurred while accessing", "open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config)) # Since we have added", "ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID,", "PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = [] db_engine = None sql_driver_location =", "NS) if name is not None and name.text == CARBON_NAME:", "the License for the specific language governing permissions and #", "user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url", "Improve the method in generic way to support all products", "report_file_paths = ARTIFACT_REPORTS_PATHS for key, value in report_file_paths.items(): for file", "deployment_toml_config[key] for key in database_config: if key == 'identity_db': identity_db_config", "this method is not using. # However, in order to", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "location: \" + str(absolute_file_path)) #TODO: Improve the method in generic", "= DB_META_DATA return switcher.get(argument, False) def add_environmental_variables(): if MYSQL_DB_ENGINE ==", "cm.product_id + \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path / dist_name)", "+ \"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace + \"/\" + cm.product_id", "sys from xml.etree import ElementTree as ET import toml import", "driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"]", "xml.etree import ElementTree as ET import toml import subprocess import", "\"/\" + LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "db_meta_data = get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH # construct the database", "dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\": dist_name =", "if name is not None and name.text == CARBON_NAME: for", "script_path = Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path", "if key == 'shared_db': shared_db_config = database_config['shared_db'] shared_db_config ['url'] =", "cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\": dist_name = cm.get_dist_name_wum() # populate", "= get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH # construct the database configurations", "LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names =", "cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if datasource_path is not None: modify_datasources()", "cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if datasource_path is not None:", "['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key == 'shared_db': shared_db_config =", "None shared_db_url = None shared_db_username = None shared_db_password = <PASSWORD>", "# distributed under the License is distributed on an \"AS", "\"/\" + file) if Path.exists(absolute_file_path): report_storage = Path(cm.workspace + \"/\"", "['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\" identity_db_config ['driver'] =", "# Unless required by applicable law or agreed to in", "configure_product(): try: global datasource_path global target_dir_abs_path global storage_dist_abs_path global pom_file_paths", "to clone a given git branch and checkout to the", "construct the database configurations cm.construct_db_config(db_meta_data) # clone the repository cm.clone_repo()", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "name.text == CARBON_NAME: for data in prop: if data.tag ==", "['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] =", "= cm.database_config['url'] + \"/\" + shared_db identity_url = cm.database_config['url'] +", "plugin in plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId', NS) if artifact_id", "identity_url = cm.database_config['url'] + \"/\" + identity_db user = cm.database_config['user']", "from product pom files dist_name = cm.get_dist_name(pom_path) # build the", "the Apache License, Version 2.0 (the \"License\"); # you may", "# testng_server_mgt_destination = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "defined in the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH)", "shared_db_driver = None identity_db = \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def", "wget import logging import inspect import os import shutil import", "given git branch and checkout to the latest released tag", "const_<prod>.py as a constant # and import it to run-intg-test.py.", "files. Hence in the current implementation this method is not", "shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif", "\"\"\" logger.info('Building the source skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean',", "dist_name) script_name = Path(WSO2SERVER) script_path = Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path,", "NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\", "data_sources.find('d:plugins', NS) for plugin in plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId',", "shared_db user = cm.database_config['user'] else: shared_url = cm.database_config['url'] + \"/\"", "sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" + str(file_path)) deployment_toml_config", "Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME + \"/\" + key) cm.copy_file(absolute_file_path,", "generic way to support all products def save_log_files(): log_storage =", "storage_dist_abs_path = Path(storage_dir_abs_path / dist_name) storage_zip_abs_path = Path(storage_dir_abs_path / zip_name)", "None identity_db = \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher", "= None identity_db_username = None identity_db_password = <PASSWORD> identity_db_driver =", "\"databaseName=\" + shared_db user = cm.database_config['user'] else: shared_url = cm.database_config['url']", "are not defined in the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path)", "+ \"/\" + cm.product_id + \"/\" + INTEGRATION_PATH) logger.info('Building integration", "logger.info(\"Modifying datasource: \" + str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml", "e: logger.error(\"Error occurred while doing the configuration\", exc_info=True) if __name__", "under the License is distributed on an \"AS IS\" BASIS,", "the repository cm.clone_repo() if cm.test_mode == \"RELEASE\": cm.checkout_to_tag() # product", "\\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE", "# Since we have added a method to clone a", "= Path(target_dir_abs_path / dist_name) script_name = Path(WSO2SERVER) script_path = Path(storage_dist_abs_path", "shared_db_password = <PASSWORD> shared_db_driver = None identity_db = \"WSO2_IDENTITY_DB\" shared_db", "deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for key in", "report_file_paths.items(): for file in value: absolute_file_path = Path(cm.workspace + \"/\"", "logger global dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info <", "for key in deployment_toml_config: if key == 'database': database_config =", "\"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[", "+ shared_db identity_url = cm.database_config['url'] + \"/\" + identity_db user", "cm.database_config['url'] + \"/\" + identity_db user = cm.database_config['user'] password =", "if datasource_path is not None: modify_datasources() else: logger.info(\"Datasource paths are", "method you can define pom file paths in const_<prod>.py as", "# However, in order to execute this method you can", "\"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url'] + \"/\" +", "cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\" +", "permissions and # limitations under the License. # importing required", "name retrieve from product pom files dist_name = cm.get_dist_name(pom_path) #", "+ \"/\" + key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\") else:", "if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't contain in the", "script you must have Python 3.6 or latest. Current version", "identity_db_config = database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] =", "not defined in the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path,", "\"/\" + cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source =", "subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is", "cm.get_dist_name(pom_path) # build the product without test once to make", "Exception as e: logger.error(\"Error occurred while configuring the product\", exc_info=True)", "# modify pom files. Hence in the current implementation this", "+ ' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception as e:", "ANY KIND, either express or implied. # See the License", "the product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\" logger.info('Building", "the License. # You may obtain a copy of the", "M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG,", "Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved. #", "# See the License for the specific language governing permissions", "logger.error(\"Error occurred while finding files\", exc_info=True) except IOError as e:", "\"/\" + cm.product_id + \"/\") cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\":", "e: logger.error(\"Error occurred while configuring the product\", exc_info=True) def build_source_without_tests(source_path):", "pymysql import sqlparse import re from pathlib import Path import", "implementation this method is not using. # However, in order", "cm.winapi_path(file_path) logger.info(\"Modifying pom file: \" + str(file_path)) ET.register_namespace('', NS['d']) artifact_tree", "password = cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"]", "== cm.database_config['db_engine'].upper(): identity_url = cm.database_config[ 'url'] + \"/\" + identity_db", "call the modify_pom_files method. def modify_pom_files(): for pom in POM_FILE_PATHS:", "modify_datasources() else: logger.info(\"Datasource paths are not defined in the config", "file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't contain in", "key == 'database': database_config = deployment_toml_config[key] for key in database_config:", "identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password']", "database_names except FileNotFoundError as e: logger.error(\"Error occurred while finding files\",", "dist_name) storage_zip_abs_path = Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc = Path(target_dir_abs_path /", "if Path.exists(absolute_file_path): report_storage = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME +", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "module. Build path: '+ str(module_path) + ' \\n') cm.build_module(module_path) intg_module_path", "+ NS['d'] + '}' + CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for", "while running the run-intg-test.py script\", exc_info=True) except BaseException as e:", "writing, software # distributed under the License is distributed on", "+ \"/\" + file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File", "Path(storage_dist_abs_path / LIB_PATH)) if datasource_path is not None: modify_datasources() else:", "CARBON_NAME: for data in prop: if data.tag == VALUE_TAG: data.text", "system_properties: name = prop.find('d:name', NS) if name is not None", "# testng_destination = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "local variables pom_path = DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data =", "identity_url = cm.database_config[ 'url'] + \"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\"", "import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME,", "def main(): try: global logger global dist_name logger = cm.function_logger(logging.DEBUG,", "= cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] =", "= <PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key ==", "for plugin in plugins.findall('d:plugin', NS): artifact_id = plugin.find('d:artifactId', NS) if", "NS): artifact_id = plugin.find('d:artifactId', NS) if artifact_id is not None", "import os import shutil import pymysql import sqlparse import re", "= shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] =", "way to support all products def save_test_output(): report_folder = Path(cm.workspace", "'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install', '-B',", "LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE,", "Path(target_dir_abs_path / dist_name) script_name = Path(WSO2SERVER) script_path = Path(storage_dist_abs_path /", "as e: logger.error(\"Error occurred while configuring the product\", exc_info=True) def", "+ ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path", "and call the modify_pom_files method. def modify_pom_files(): for pom in", "variables for integration test\") def modify_datasources(): file_path = Path(storage_dist_abs_path /", "# # replace testng source # cm.replace_file(testng_source, testng_destination) # #", "= cm.database_config['user'] elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] +", "None or not db_names: raise Exception(\"Failed the product configuring\") cm.setup_databases(db_names,", "3.6 or latest. Current version info: \" + sys.version_info) cm.read_property_files()", "in POM_FILE_PATHS: file_path = Path(cm.workspace + \"/\" + cm.product_id +", "\\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception as e: logger.error(\"Error occurred", "'-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is completed. Module: ' +", "Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace +", "if key == 'identity_db': identity_db_config = database_config['identity_db'] identity_db_config ['url'] =", "not required to # modify pom files. Hence in the", "= cm.database_config['user'] password = cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] =", "product pom files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode ==", "datasource: \" + str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml file\")", "+ TEST_OUTPUT_DIR_NAME + \"/\" + key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully", "import toml import subprocess import wget import logging import inspect", "\"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace + \"/\" + cm.product_id", "pom files dist_name = cm.get_dist_name(pom_path) # build the product without", "['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config))", "None sql_driver_location = None identity_db_url = None identity_db_username = None", "sys.version_info) cm.read_property_files() if not cm.validate_property_readings(): raise Exception( \"Property file doesn't", "exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths: for file in log_file_paths:", "sqlparse import re from pathlib import Path import urllib.request as", "datasource_path) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" +", "Exception( \"To run run-intg-test.py script you must have Python 3.6", "in the # configure_product method and call the modify_pom_files method.", "in system_properties: name = prop.find('d:name', NS) if name is not", "\"/\" + cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path,", "logger.info(\"Modifying pom file: \" + str(file_path)) ET.register_namespace('', NS['d']) artifact_tree =", "= database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\"", "doesn't contain in the given location: \" + str(absolute_file_path)) #TODO:", "the run-intg-test.py script\", exc_info=True) except BaseException as e: logger.error(\"Error occurred", "in log_file_paths: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id +", "\"/\") cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\": # product name retrieve", "must have Python 3.6 or latest. Current version info: \"", "we have added a method to clone a given git", "pom in POM_FILE_PATHS: file_path = Path(cm.workspace + \"/\" + cm.product_id", "cwd=source_path) logger.info('Module build is completed. Module: ' + str(source_path)) def", "using. # However, in order to execute this method you", "sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn',", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report successfully copied\") else: logger.error(\"File doesn't contain", "+ \"/\" + TEST_OUTPUT_DIR_NAME + \"/\" + key) cm.copy_file(absolute_file_path, report_storage)", "get_db_meta_data(argument): switcher = DB_META_DATA return switcher.get(argument, False) def add_environmental_variables(): if", "+ \"/\" + cm.product_id + \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path =", "== cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url']", "logger.error(\"Error occurred while doing the configuration\", exc_info=True) if __name__ ==", "else: subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "\"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher = DB_META_DATA return switcher.get(argument, False) def", "cm from subprocess import Popen, PIPE import os from prod_test_constant", "M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except FileNotFoundError as e: logger.error(\"Error", "key-value pair. Please verify the content of the property file", "ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID shared_url=", "if artifact_id is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration", "Module: ' + str(source_path)) def main(): try: global logger global", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "+ identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url = cm.database_config[ 'url']", "= artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins', NS) for plugin in", "get properties assigned to local variables pom_path = DIST_POM_PATH engine", "= ARTIFACT_REPORTS_PATHS if log_file_paths: for file in log_file_paths: absolute_file_path =", "modify pom files. Hence in the current implementation this method", "\"/\" + TESTNG_SERVER_MGT_DIST) # # replace testng source # cm.replace_file(testng_source,", "module add_environmental_variables() module_path = Path(cm.workspace + \"/\" + cm.product_id +", "samples and required artifacts to be available. build_source_without_tests(cm.workspace + \"/\"", "/ LIB_PATH)) if datasource_path is not None: modify_datasources() else: logger.info(\"Datasource", "\" \"and the format\") # get properties assigned to local", "PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "available. build_source_without_tests(cm.workspace + \"/\" + cm.product_id + \"/\") cm.get_latest_released_dist() elif", "added a method to clone a given git branch and", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom file: \" +", "= [] db_engine = None sql_driver_location = None identity_db_url =", "WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME,", "DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path / dist_name) storage_zip_abs_path = Path(storage_dir_abs_path /", "NS) system_properties = configuration.find('d:systemProperties', NS) for neighbor in system_properties.iter('{' +", "cm.test_mode == \"WUM\": dist_name = cm.get_dist_name_wum() # populate databases db_names", "False) def add_environmental_variables(): if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url = cm.database_config[", "Rights Reserved. # # Licensed under the Apache License, Version", "specific language governing permissions and # limitations under the License.", "the product configuring\") cm.setup_databases(db_names, db_meta_data) # run integration tests #", "= cm.database_config['user'] else: shared_url = cm.database_config['url'] + \"/\" + shared_db", "= password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental variables for integration", "= Path(storage_dir_abs_path / dist_name) storage_zip_abs_path = Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc", "exc_info=True) except Exception as e: logger.error(\"Error occurred while configuring the", "intg_module_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "License. # importing required modules import sys from xml.etree import", "+ \"/\" + cm.product_id + \"/\" + pom) if sys.platform.startswith('win'):", "= identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] =", "dep,loyment.toml file\") logger.info(deployment_toml_config) for key in deployment_toml_config: if key ==", "# you may not use this file except in compliance", "== \"WUM\": dist_name = cm.get_dist_name_wum() # populate databases db_names =", "the # configure_product method and call the modify_pom_files method. def", "str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for key", "while accessing files\", exc_info=True) except Exception as e: logger.error(\"Error occurred", "support all products def save_test_output(): report_folder = Path(cm.workspace + \"/\"", "pom files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\":", "DATASOURCE_PATHS zip_name = dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace +", "\" + str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config)", "os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental variables for", "+ INTEGRATION_PATH) logger.info('Building integration module. Build path: '+ str(intg_module_path) +", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "in generic way to support all products def save_log_files(): log_storage", "= None identity_db = \"WSO2_IDENTITY_DB\" shared_db = \"WSO2_SHARED_DB\" def get_db_meta_data(argument):", "\"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url']", "cm.database_config['user'] password = cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url", "be available. build_source_without_tests(cm.workspace + \"/\" + cm.product_id + \"/\") cm.get_latest_released_dist()", "under the Apache License, Version 2.0 (the \"License\"); # you", "run-intg-test.py script you must have Python 3.6 or latest. Current", "cm.create_output_property_fle() except Exception as e: logger.error(\"Error occurred while running the", "have added a method to clone a given git branch", "'-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path) else: subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'],", "and import it to run-intg-test.py. Thereafter assign it to global", "minidom import intg_test_manager as cm from subprocess import Popen, PIPE", "\"SNAPSHOT\": # product name retrieve from product pom files dist_name", "a given git branch and checkout to the latest released", "+ shared_db user = cm.database_config['user'] else: shared_url = cm.database_config['url'] +", "logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for key, value in report_file_paths.items():", "except FileNotFoundError as e: logger.error(\"Error occurred while finding files\", exc_info=True)", "not using. # However, in order to execute this method", "product name retrieve from product pom files dist_name = cm.get_dist_name(pom_path)", "and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS) system_properties =", "exc_info=True) except BaseException as e: logger.error(\"Error occurred while doing the", "# testng_server_mgt_source = Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination", "a constant # and import it to run-intg-test.py. Thereafter assign", "shared_db identity_url = cm.database_config['url'] + \"/\" + identity_db user =", "password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental variables for integration test\")", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + file)", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "occurred while configuring the product\", exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the", "latest released tag it is not required to # modify", "raise Exception( \"To run run-intg-test.py script you must have Python", "logger.error(\"Error occurred while accessing files\", exc_info=True) except Exception as e:", "dist_name = cm.get_dist_name(pom_path) # build the product without test once", "LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import NS, ZIP_FILE_EXTENSION,", "Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH))", "to support all products # def set_custom_testng(): # if cm.use_custom_testng_file", "logger.info('Building common module. Build path: '+ str(module_path) + ' \\n')", "+ \";\" + \"databaseName=\" + shared_db user = cm.database_config['user'] else:", "writer.write(toml.dumps(deployment_toml_config)) # Since we have added a method to clone", "system_properties = configuration.find('d:systemProperties', NS) for neighbor in system_properties.iter('{' + NS['d']", "given location: \" + str(absolute_file_path)) #TODO: Improve the method in", "\\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant import", "+ \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS)))", "successfully copied\") else: logger.error(\"File doesn't contain in the given location:", "Path import urllib.request as urllib2 from xml.dom import minidom import", "\\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE database_names = [] db_engine = None", "variables pom_path = DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data = get_db_meta_data(engine)", "from subprocess import Popen, PIPE import os from prod_test_constant import", "+ \"databaseName=\" + shared_db user = cm.database_config['user'] else: shared_url =", "shared_db_username = None shared_db_password = <PASSWORD> shared_db_driver = None identity_db", "modify_pom_files method. def modify_pom_files(): for pom in POM_FILE_PATHS: file_path =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Build path: '+ str(module_path) + ' \\n') cm.build_module(module_path) intg_module_path =", "build_source_without_tests(cm.workspace + \"/\" + cm.product_id + \"/\") cm.get_latest_released_dist() elif cm.test_mode", "\"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths", "\"/\" + identity_db user = cm.database_config['user'] password = cm.database_config['password'] driver_class_name", "assigned to local variables pom_path = DIST_POM_PATH engine = cm.db_engine.upper()", "os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except", "Apache License, Version 2.0 (the \"License\"); # you may not", "required modules import sys from xml.etree import ElementTree as ET", "either express or implied. # See the License for the", "storage_zip_abs_path = Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc = Path(target_dir_abs_path / dist_name)", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path", "and required artifacts to be available. build_source_without_tests(cm.workspace + \"/\" +", "= cm.database_config[ 'url'] + \"/\" + shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\"", "+ \"/\" + \"testng.xml\") # testng_destination = Path(cm.workspace + \"/\"", "it to global variable called pom_file_paths in the # configure_product", "content of the property file \" \"and the format\") #", "Exception(\"Failed the product configuring\") cm.setup_databases(db_names, db_meta_data) # run integration tests", "INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from", "' + str(source_path)) def main(): try: global logger global dist_name", "['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] =", "cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace +", "'url'] + \"/\" + shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\"", "if log_file_paths: for file in log_file_paths: absolute_file_path = Path(cm.workspace +", "\"/\" + shared_db identity_url = cm.database_config['url'] + \"/\" + identity_db", "#TODO: Improve the method in generic way to support all", "testng_server_mgt_destination) def configure_product(): try: global datasource_path global target_dir_abs_path global storage_dist_abs_path", "+ \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace + \"/\" +", "path: '+ str(intg_module_path) + ' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except", "\" + str(absolute_file_path)) #TODO: Improve the method in generic way", "support all products # def set_custom_testng(): # if cm.use_custom_testng_file ==", "Thereafter assign it to global variable called pom_file_paths in the", "logging import inspect import os import shutil import pymysql import", "NS) plugins = data_sources.find('d:plugins', NS) for plugin in plugins.findall('d:plugin', NS):", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "method. def modify_pom_files(): for pom in POM_FILE_PATHS: file_path = Path(cm.workspace", "method is not using. # However, in order to execute", "for data in prop: if data.tag == VALUE_TAG: data.text =", "= Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace", "None identity_db_username = None identity_db_password = <PASSWORD> identity_db_driver = None", "occurred while running the run-intg-test.py script\", exc_info=True) except BaseException as", "pom) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom file: \"", "is not None: modify_datasources() else: logger.info(\"Datasource paths are not defined", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + 'modules/integration/tests-common')", "cm.use_custom_testng_file == \"TRUE\": # testng_source = Path(cm.workspace + \"/\" +", "ElementTree as ET import toml import subprocess import wget import", "\"\"\"Build the product-source. \"\"\" logger.info('Building the source skipping tests') if", "However, in order to execute this method you can define", "xml.dom import minidom import intg_test_manager as cm from subprocess import", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + pom)", "prop: if data.tag == VALUE_TAG: data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path)", "Since we have added a method to clone a given", "the config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error)", "\\ \"&amp;verifyServerCertificate=false\" user = cm.database_config['user'] elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper(): identity_url=", "\"/\" + TEST_OUTPUT_DIR_NAME + \"/\" + key) cm.copy_file(absolute_file_path, report_storage) logger.info(\"Report", "artifact_tree.write(file_path) #TODO: Improve the method in generic way to support", "os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added", "database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config", "if data.tag == VALUE_TAG: data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO:", "writer: writer.write(toml.dumps(deployment_toml_config)) # Since we have added a method to", "+ ' \\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace + \"/\" +", "intg_test_manager as cm from subprocess import Popen, PIPE import os", "user = cm.database_config['user'] password = cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"]", "+ \"/\" + shared_db identity_url = cm.database_config['url'] + \"/\" +", "way to support all products # def set_custom_testng(): # if", "e: logger.error(\"Error occurred while running the run-intg-test.py script\", exc_info=True) except", "def save_test_output(): report_folder = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder))", "+ \"/\" + cm.product_id + \"/\" + 'modules/integration/tests-common') logger.info('Building common", "in order to execute this method you can define pom", "in database_config: if key == 'identity_db': identity_db_config = database_config['identity_db'] identity_db_config", "cm.read_property_files() if not cm.validate_property_readings(): raise Exception( \"Property file doesn't have", "= cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve the method in generic", "ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path =", "report_storage = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME + \"/\" +", "use this file except in compliance with the License. #", "+ cm.product_id + \"/\") cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\": #", "path: '+ str(module_path) + ' \\n') cm.build_module(module_path) intg_module_path = Path(cm.workspace", "exc_info=True) def build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\" logger.info('Building the source", "data in prop: if data.tag == VALUE_TAG: data.text = cm.modify_distribution_name(data)", "products def save_test_output(): report_folder = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME)", "+ \"/\" + identity_db user = cm.database_config['user'] password = cm.database_config['password']", "== 'database': database_config = deployment_toml_config[key] for key in database_config: if", "# construct the database configurations cm.construct_db_config(db_meta_data) # clone the repository", "+ pom) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom file:", "if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths:", "to the latest released tag it is not required to", "database_names = [] db_engine = None sql_driver_location = None identity_db_url", "method in generic way to support all products # def", "'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is completed.", "= \"WSO2_SHARED_DB\" def get_db_meta_data(argument): switcher = DB_META_DATA return switcher.get(argument, False)", "target_dir_abs_path global storage_dist_abs_path global pom_file_paths datasource_path = DATASOURCE_PATHS zip_name =", "copied\") else: logger.error(\"File doesn't contain in the given location: \"", "cm.database_config['db_engine'].upper(): identity_url = cm.database_config['url'] + \";\" + \"databaseName=\" + identity_db", "# def set_custom_testng(): # if cm.use_custom_testng_file == \"TRUE\": # testng_source", "replace testng server mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product():", "files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\": dist_name", "latest. Current version info: \" + sys.version_info) cm.read_property_files() if not", "+ str(absolute_file_path)) #TODO: Improve the method in generic way to", "user os.environ[\"IDENTITY_DATABASE_PASSWORD\"] = password os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental variables", "in compliance with the License. # You may obtain a", "= cm.database_config['url'] + \";\" + \"databaseName=\" + identity_db shared_url =", "in deployment_toml_config: if key == 'database': database_config = deployment_toml_config[key] for", "pathlib import Path import urllib.request as urllib2 from xml.dom import", "or latest. Current version info: \" + sys.version_info) cm.read_property_files() if", "software # distributed under the License is distributed on an", "<PASSWORD> identity_db_driver = None shared_db_url = None shared_db_username = None", "testng server mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try:", "for neighbor in system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME):", "shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for key, value in", "file\") logger.info(deployment_toml_config) for key in deployment_toml_config: if key == 'database':", "cm.product_id + \"/\" + 'modules/integration/tests-common') logger.info('Building common module. Build path:", "global dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info < (3,", "def build_source_without_tests(source_path): \"\"\"Build the product-source. \"\"\" logger.info('Building the source skipping", "TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder): shutil.rmtree(report_folder) logger.info(str(ARTIFACT_REPORTS_PATHS)) logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS", "artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins', NS) for plugin in plugins.findall('d:plugin',", "DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS from intg_test_constant", "storage_dir_abs_path = Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace", "build is completed. Module: ' + str(source_path)) def main(): try:", "+ DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID user =", "else: shared_url = cm.database_config['url'] + \"/\" + shared_db identity_url =", "= toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for key in deployment_toml_config:", "shutil import pymysql import sqlparse import re from pathlib import", "password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user", "toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for key in deployment_toml_config: if", "product-source. \"\"\" logger.info('Building the source skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn',", "cm.database_config['url'] + \"/\" + shared_db identity_url = cm.database_config['url'] + \"/\"", "str(absolute_file_path)) #TODO: Improve the method in generic way to support", "if sys.version_info < (3, 6): raise Exception( \"To run run-intg-test.py", "product pom files dist_name = cm.get_dist_name(pom_path) # build the product", "except Exception as e: logger.error(\"Error occurred while running the run-intg-test.py", "Popen, PIPE import os from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH,", "common module. Build path: '+ str(module_path) + ' \\n') cm.build_module(module_path)", "governing permissions and # limitations under the License. # importing", "None and name.text == CARBON_NAME: for data in prop: if", "urllib.request as urllib2 from xml.dom import minidom import intg_test_manager as", "with the License. # You may obtain a copy of", "identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key == 'shared_db': shared_db_config", "the current implementation this method is not using. # However,", "prop in system_properties: name = prop.find('d:name', NS) if name is", "\"/\" + cm.product_id + \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path", "= cm.get_dist_name_wum() # populate databases db_names = configure_product() if db_names", "Path(cm.workspace + \"/\" + \"testng.xml\") # testng_destination = Path(cm.workspace +", "= \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\"", "+ cm.product_id + \"/\" + TESTNG_DIST_XML_PATHS) # testng_server_mgt_source = Path(cm.workspace", "= \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\"", "file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom file: \" + str(file_path)) ET.register_namespace('',", "import wget import logging import inspect import os import shutil", "value: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\"", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if", "run-intg-test.py. Thereafter assign it to global variable called pom_file_paths in", "current implementation this method is not using. # However, in", "DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS", "raise Exception(\"Failed the product configuring\") cm.setup_databases(db_names, db_meta_data) # run integration", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "= None shared_db_username = None shared_db_password = <PASSWORD> shared_db_driver =", "server mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def configure_product(): try: global", "PIPE import os from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH,", "try: global datasource_path global target_dir_abs_path global storage_dist_abs_path global pom_file_paths datasource_path", "except Exception as e: logger.error(\"Error occurred while configuring the product\",", "CONDITIONS OF ANY KIND, either express or implied. # See", "= DIST_POM_PATH engine = cm.db_engine.upper() db_meta_data = get_db_meta_data(engine) distribution_path =", "identity_db_config ['password'] = <PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if", "Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace + \"/\"", "and checkout to the latest released tag it is not", "= data_sources.find('d:plugins', NS) for plugin in plugins.findall('d:plugin', NS): artifact_id =", "SURFACE_PLUGIN_ARTIFACT_ID: configuration = plugin.find('d:configuration', NS) system_properties = configuration.find('d:systemProperties', NS) for", "str(intg_module_path) + ' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception as", "define pom file paths in const_<prod>.py as a constant #", "in prop: if data.tag == VALUE_TAG: data.text = cm.modify_distribution_name(data) break", "as ET import toml import subprocess import wget import logging", "DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\", "'url'] + \"/\" + identity_db + \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" shared_url", "files\", exc_info=True) except Exception as e: logger.error(\"Error occurred while configuring", "database_config = deployment_toml_config[key] for key in database_config: if key ==", "artifact_id = plugin.find('d:artifactId', NS) if artifact_id is not None and", "running the run-intg-test.py script\", exc_info=True) except BaseException as e: logger.error(\"Error", "name = prop.find('d:name', NS) if name is not None and", "identity_db shared_url = cm.database_config['url'] + \";\" + \"databaseName=\" + shared_db", "str(file_path)) ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources", "as e: logger.error(\"Error occurred while finding files\", exc_info=True) except IOError", "+ \"/\") cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\": # product name", "\"/\" + cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): report_storage", "all products # def set_custom_testng(): # if cm.use_custom_testng_file == \"TRUE\":", "\"/\" + pom) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying pom", "'-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path) logger.info('Module build is completed. Module: '", "# replace testng server mgt source # cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination) def", "file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names", "the latest released tag it is not required to #", "cm.test_mode == \"RELEASE\": cm.checkout_to_tag() # product name retrieve from product", "+ \"/\" + DEFAULT_ORACLE_SID user = cm.database_config['user'] elif MSSQL_DB_ENGINE ==", "cm.get_dist_name_wum() # populate databases db_names = configure_product() if db_names is", "import minidom import intg_test_manager as cm from subprocess import Popen,", "have Python 3.6 or latest. Current version info: \" +", "skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True,", "from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\ DATASOURCE_PATHS, LIB_PATH,", "cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST) # # replace testng source", "ET import toml import subprocess import wget import logging import", "shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password']", "Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource:", "= Path(cm.workspace + \"/\" + LOG_STORAGE) if not Path.exists(log_storage): Path(log_storage).mkdir(parents=True,", "key == 'shared_db': shared_db_config = database_config['shared_db'] shared_db_config ['url'] = \"$env{SHARED_DATABASE_URL}\"", "report_folder = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if Path.exists(report_folder):", "run run-intg-test.py script you must have Python 3.6 or latest.", "retrieve from product pom files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif", "import subprocess import wget import logging import inspect import os", "name retrieve from product pom files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path)", "report_storage) logger.info(\"Report successfully copied\") else: logger.error(\"File doesn't contain in the", "Inc. (http://wso2.com) All Rights Reserved. # # Licensed under the", "not db_names: raise Exception(\"Failed the product configuring\") cm.setup_databases(db_names, db_meta_data) #", "= driver_class_name logger.info(\"Added environmental variables for integration test\") def modify_datasources():", "logger.info(str(type(ARTIFACT_REPORTS_PATHS))) report_file_paths = ARTIFACT_REPORTS_PATHS for key, value in report_file_paths.items(): for", "cm.validate_property_readings(): raise Exception( \"Property file doesn't have mandatory key-value pair.", "property file \" \"and the format\") # get properties assigned", "= <PASSWORD> shared_db_driver = None identity_db = \"WSO2_IDENTITY_DB\" shared_db =", "None shared_db_password = <PASSWORD> shared_db_driver = None identity_db = \"WSO2_IDENTITY_DB\"", "modify_pom_files(): for pom in POM_FILE_PATHS: file_path = Path(cm.workspace + \"/\"", "modify_datasources(): file_path = Path(storage_dist_abs_path / datasource_path) if sys.platform.startswith('win'): file_path =", "# Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.", "datasource_path = DATASOURCE_PATHS zip_name = dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path =", "databases db_names = configure_product() if db_names is None or not", "identity_db_password = <PASSWORD> identity_db_driver = None shared_db_url = None shared_db_username", "\" + str(file_path)) ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path) artifarct_root =", "cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode == \"WUM\": dist_name = cm.get_dist_name_wum() #", "else: logger.info(\"Datasource paths are not defined in the config file\")", "\"/\" + cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST) # # replace", "\"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db)", "import os from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \\", "+ CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for prop in system_properties: name", "configuration.find('d:systemProperties', NS) for neighbor in system_properties.iter('{' + NS['d'] + '}'", "not None: modify_datasources() else: logger.info(\"Datasource paths are not defined in", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + 'modules/integration/tests-common') logger.info('Building", "# run integration tests # Buld Common module add_environmental_variables() module_path", "= password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] =", "for prop in system_properties: name = prop.find('d:name', NS) if name", "shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name", "artifacts to be available. build_source_without_tests(cm.workspace + \"/\" + cm.product_id +", "script_name = Path(WSO2SERVER) script_path = Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path)", "storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except FileNotFoundError as", "completed. Module: ' + str(source_path)) def main(): try: global logger", "in value: absolute_file_path = Path(cm.workspace + \"/\" + cm.product_id +", "try: global logger global dist_name logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if", "cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID user = cm.database_config['user'] elif MSSQL_DB_ENGINE", "log_storage = Path(cm.workspace + \"/\" + LOG_STORAGE) if not Path.exists(log_storage):", "def set_custom_testng(): # if cm.use_custom_testng_file == \"TRUE\": # testng_source =", "config file\") os.remove(str(storage_zip_abs_path)) cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path) cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH) shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return", "get_db_meta_data(engine) distribution_path = DISTRIBUTION_PATH # construct the database configurations cm.construct_db_config(db_meta_data)", "Current version info: \" + sys.version_info) cm.read_property_files() if not cm.validate_property_readings():", "save_test_output(): report_folder = Path(cm.workspace + \"/\" + TEST_OUTPUT_DIR_NAME) logger.info(str(report_folder)) if", "with open(file_path, 'w') as writer: writer.write(toml.dumps(deployment_toml_config)) # Since we have", "the modify_pom_files method. def modify_pom_files(): for pom in POM_FILE_PATHS: file_path", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + INTEGRATION_PATH)", "under the License. # importing required modules import sys from", "method and call the modify_pom_files method. def modify_pom_files(): for pom", "db_names = configure_product() if db_names is None or not db_names:", "released tag it is not required to # modify pom", "for key in database_config: if key == 'identity_db': identity_db_config =", "tests # Buld Common module add_environmental_variables() module_path = Path(cm.workspace +", "+ '}' + CARBON_NAME): neighbor.text = cm.modify_distribution_name(neighbor) for prop in", "Build path: '+ str(intg_module_path) + ' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle()", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "os.environ[\"SHARED_DATABASE_DRIVER\"] = driver_class_name os.environ[\"IDENTITY_DATABASE_URL\"] = identity_url os.environ[\"IDENTITY_DATABASE_USERNAME\"] = user os.environ[\"IDENTITY_DATABASE_PASSWORD\"]", "Path(WSO2SERVER) script_path = Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']),", "is not using. # However, in order to execute this", "cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\": # product name retrieve from", "\"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID user", "PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \\ ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME,", "+ \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path / dist_name) storage_zip_abs_path", "the format\") # get properties assigned to local variables pom_path", "to execute this method you can define pom file paths", "NS['d']) artifact_tree = ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources = artifarct_root.find('d:build',", "['password'] = <PASSWORD>}\" identity_db_config ['driver'] = \"$env{IDENTITY_DATABASE_DRIVER}\" database_names.append(identity_db) if key", "'+ str(intg_module_path) + ' \\n') cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception", "run-intg-test.py script\", exc_info=True) except BaseException as e: logger.error(\"Error occurred while", "= \"$env{SHARED_DATABASE_URL}\" shared_db_config ['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\"", "ARTIFACT_REPORTS_PATHS for key, value in report_file_paths.items(): for file in value:", "contain in the given location: \" + str(absolute_file_path)) #TODO: Improve", "+ \"/\" + INTEGRATION_PATH) logger.info('Building integration module. Build path: '+", "data.tag == VALUE_TAG: data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve", "Version 2.0 (the \"License\"); # you may not use this", "+ \"/\" + shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user", "cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): report_storage = Path(cm.workspace", "== 'identity_db': identity_db_config = database_config['identity_db'] identity_db_config ['url'] = \"$env{IDENTITY_DATABASE_URL}\" identity_db_config", "\"/\" + shared_db + \\ \"?useSSL=false&amp;autoReconnect=true&amp;requireSSL=false\" \\ \"&amp;verifyServerCertificate=false\" user =", "by applicable law or agreed to in writing, software #", "shared_url = cm.database_config['url'] + \"/\" + shared_db identity_url = cm.database_config['url']", "shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error) return database_names except FileNotFoundError as e: logger.error(\"Error occurred", "SURFACE_PLUGIN_ARTIFACT_ID, \\ DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \\ DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID,", "in the current implementation this method is not using. #", "== VALUE_TAG: data.text = cm.modify_distribution_name(data) break artifact_tree.write(file_path) #TODO: Improve the", "file_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "Path(storage_dir_abs_path / zip_name) configured_dist_storing_loc = Path(target_dir_abs_path / dist_name) script_name =", "== \"TRUE\": # testng_source = Path(cm.workspace + \"/\" + \"testng.xml\")", "to # modify pom files. Hence in the current implementation", "= Path(cm.workspace + \"/\" + cm.product_id + \"/\" + DISTRIBUTION_PATH)", "as writer: writer.write(toml.dumps(deployment_toml_config)) # Since we have added a method", "file_path = cm.winapi_path(file_path) logger.info(\"Modifying datasource: \" + str(file_path)) deployment_toml_config =", "\"/\" + file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't", "elif cm.test_mode == \"WUM\": dist_name = cm.get_dist_name_wum() # populate databases", "\"WUM\": dist_name = cm.get_dist_name_wum() # populate databases db_names = configure_product()", "+ cm.product_id + \"/\" + DISTRIBUTION_PATH) storage_dist_abs_path = Path(storage_dir_abs_path /", "from product pom files dist_name = cm.get_dist_name(pom_path) cm.build_snapshot_dist(distribution_path) elif cm.test_mode", "applicable law or agreed to in writing, software # distributed", "\"testng-server-mgt.xml\") # testng_server_mgt_destination = Path(cm.workspace + \"/\" + cm.product_id +", "['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path,", "+ cm.product_id + \"/\" + 'modules/integration/tests-common') logger.info('Building common module. Build", "# replace testng source # cm.replace_file(testng_source, testng_destination) # # replace", "+ cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage)", "= cm.winapi_path(file_path) logger.info(\"Modifying pom file: \" + str(file_path)) ET.register_namespace('', NS['d'])", "Path.exists(log_storage): Path(log_storage).mkdir(parents=True, exist_ok=True) log_file_paths = ARTIFACT_REPORTS_PATHS if log_file_paths: for file", "\"RELEASE\": cm.checkout_to_tag() # product name retrieve from product pom files", "or not db_names: raise Exception(\"Failed the product configuring\") cm.setup_databases(db_names, db_meta_data)", "identity_db_config ['username'] = \"$env{IDENTITY_DATABASE_USERNAME}\" identity_db_config ['password'] = <PASSWORD>}\" identity_db_config ['driver']", "\"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db)", "os.environ[\"IDENTITY_DATABASE_DRIVER\"] = driver_class_name logger.info(\"Added environmental variables for integration test\") def", "to make samples and required artifacts to be available. build_source_without_tests(cm.workspace", "# get properties assigned to local variables pom_path = DIST_POM_PATH", "support all products def save_log_files(): log_storage = Path(cm.workspace + \"/\"", "sys.version_info < (3, 6): raise Exception( \"To run run-intg-test.py script", "testng_server_mgt_source = Path(cm.workspace + \"/\" + \"testng-server-mgt.xml\") # testng_server_mgt_destination =", "# You may obtain a copy of the License at", "+ cm.product_id + \"/\" + file) if Path.exists(absolute_file_path): report_storage =", "file doesn't have mandatory key-value pair. Please verify the content", "= artifact_tree.getroot() data_sources = artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins', NS)", "['username'] = \"$env{SHARED_DATABASE_USERNAME}\" shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}\" shared_db_config ['driver'] =", "dist_name = cm.get_dist_name_wum() # populate databases db_names = configure_product() if", "shared_db_config ['driver'] = \"$env{SHARED_DATABASE_DRIVER}\" database_names.append(shared_db) with open(file_path, 'w') as writer:", "< (3, 6): raise Exception( \"To run run-intg-test.py script you", "if cm.test_mode == \"RELEASE\": cm.checkout_to_tag() # product name retrieve from", "format\") # get properties assigned to local variables pom_path =", "import urllib.request as urllib2 from xml.dom import minidom import intg_test_manager", "identity_db user = cm.database_config['user'] password = cm.database_config['password'] driver_class_name = cm.database_config['driver_class_name']", "break artifact_tree.write(file_path) #TODO: Improve the method in generic way to", "as cm from subprocess import Popen, PIPE import os from", "occurred while finding files\", exc_info=True) except IOError as e: logger.error(\"Error", "# configure_product method and call the modify_pom_files method. def modify_pom_files():", "Path.exists(absolute_file_path): cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't contain in the given", "for key, value in report_file_paths.items(): for file in value: absolute_file_path", "zip_name = dist_name + ZIP_FILE_EXTENSION storage_dir_abs_path = Path(cm.workspace + \"/\"", "DISTRIBUTION_PATH # construct the database configurations cm.construct_db_config(db_meta_data) # clone the", "/ script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH)) if", "neighbor.text = cm.modify_distribution_name(neighbor) for prop in system_properties: name = prop.find('d:name',", "INTEGRATION_PATH) logger.info('Building integration module. Build path: '+ str(intg_module_path) + '", "\"and the format\") # get properties assigned to local variables", "source skipping tests') if sys.platform.startswith('win'): subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'],", "\"License\"); # you may not use this file except in", "+ \"/\" + cm.product_id + \"/\" + TESTNG_SERVER_MGT_DIST) # #", "[] db_engine = None sql_driver_location = None identity_db_url = None", "cm.build_module(intg_module_path) save_test_output() cm.create_output_property_fle() except Exception as e: logger.error(\"Error occurred while", "to support all products def save_test_output(): report_folder = Path(cm.workspace +", "\"testng.xml\") # testng_destination = Path(cm.workspace + \"/\" + cm.product_id +", "except BaseException as e: logger.error(\"Error occurred while doing the configuration\",", "cm.database_config['db_engine'].upper(): identity_url= cm.database_config['url'] + \"/\" + DEFAULT_ORACLE_SID shared_url= cm.database_config['url'] +", "cm.copy_file(absolute_file_path, log_storage) else: logger.error(\"File doesn't contain in the given location:", "the method in generic way to support all products def", "data_sources = artifarct_root.find('d:build', NS) plugins = data_sources.find('d:plugins', NS) for plugin", "logger = cm.function_logger(logging.DEBUG, logging.DEBUG) if sys.version_info < (3, 6): raise", "cm.checkout_to_tag() # product name retrieve from product pom files dist_name", "cm.database_config['driver_class_name'] os.environ[\"SHARED_DATABASE_URL\"] = shared_url os.environ[\"SHARED_DATABASE_USERNAME\"] = user os.environ[\"SHARED_DATABASE_PASSWORD\"] = password", "target_dir_abs_path = Path(cm.workspace + \"/\" + cm.product_id + \"/\" +", "db_names: raise Exception(\"Failed the product configuring\") cm.setup_databases(db_names, db_meta_data) # run", "ET.register_namespace('', NS['d']) artifact_tree = ET.parse(file_path) artifarct_root = artifact_tree.getroot() data_sources =", "cm.product_id + \"/\") cm.get_latest_released_dist() elif cm.test_mode == \"SNAPSHOT\": # product", "Path(cm.workspace + \"/\" + cm.product_id + \"/\" + INTEGRATION_PATH) logger.info('Building", "= Path(cm.workspace + \"/\" + PRODUCT_STORAGE_DIR_NAME) target_dir_abs_path = Path(cm.workspace +", "+ \"/\" + pom) if sys.platform.startswith('win'): file_path = cm.winapi_path(file_path) logger.info(\"Modifying", "+ str(file_path)) deployment_toml_config = toml.load(file_path) logger.info(\"loading dep,loyment.toml file\") logger.info(deployment_toml_config) for", "= Path(storage_dist_abs_path / script_name) cm.extract_product(storage_dir_abs_path, storage_zip_abs_path) cm.attach_jolokia_agent(script_path) cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path /" ]
[ "== \"linux\": # paplay comes from PulseAudio and should be", "use. :return: No returns. Plays a sound file. \"\"\" #", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "be installed by default on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\")", "{}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program): \"\"\" Play a sound", "paplay comes from PulseAudio and should be installed by default", "possible with the winsound # implementation, but that does not", "Plays a sound file. \"\"\" # Play the sound non", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "subprocess import sys from pathlib import Path SOUNDS_DIR = (Path(__file__).parent", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "to the sound file. :param program: Which program to use.", "permission notice shall be included in # all copies or", "_play_sound_unix(sound_file: Path, program): \"\"\" Play a sound file on unix", "program=\"paplay\") elif sys.platform == \"darwin\": # Afplay comes installed by", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "# Afplay comes installed by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\")", "Path): if sys.platform == \"linux\": # paplay comes from PulseAudio", "comes from PulseAudio and should be installed by default on", "# # The above copyright notice and this permission notice", "by default on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform", "and associated documentation files (the \"Software\"), to deal # in", "Software without restriction, including without limitation the rights # to", "and to permit persons to whom the Software is #", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "not play ogg audio. raise NotImplementedError( \"Playing sounds not supported", "Center # # Permission is hereby granted, free of charge,", "(c) 2019 Leiden University Medical Center # # Permission is", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "distribute, sublicense, and/or sell # copies of the Software, and", "# all copies or substantial portions of the Software. #", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "included in # all copies or substantial portions of the", "Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR /", "systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform == \"darwin\": # Afplay comes", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "deal # in the Software without restriction, including without limitation", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "default on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform ==", "notice and this permission notice shall be included in #", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform == \"darwin\": # Afplay", ":return: No returns. Plays a sound file. \"\"\" # Play", "all copies or substantial portions of the Software. # #", "program. :param sound_file: Path to the sound file. :param program:", "a sound file. \"\"\" # Play the sound non blocking,", "software and associated documentation files (the \"Software\"), to deal #", "sound_file: Path to the sound file. :param program: Which program", "_play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A windows implementation should be possible", "USE OR OTHER DEALINGS IN THE # SOFTWARE. import subprocess", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "play ogg audio. raise NotImplementedError( \"Playing sounds not supported by", "the Software without restriction, including without limitation the rights #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "implementation, but that does not play ogg audio. raise NotImplementedError(", "PulseAudio and should be installed by default on # most", "\"Playing sounds not supported by pytest-notification on {}\" \"\".format(sys.platform)) def", "Path SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR /", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "if sys.platform == \"linux\": # paplay comes from PulseAudio and", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\")", "Afplay comes installed by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else:", "with the program. :param sound_file: Path to the sound file.", "notice shall be included in # all copies or substantial", "NotImplementedError( \"Playing sounds not supported by pytest-notification on {}\" \"\".format(sys.platform))", "SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file: Path): if sys.platform == \"linux\":", "# Copyright (c) 2019 Leiden University Medical Center # #", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "and this permission notice shall be included in # all", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "/ Path(\"buzzer\") def play_sound(sound_file: Path): if sys.platform == \"linux\": #", "following conditions: # # The above copyright notice and this", "to deal # in the Software without restriction, including without", "conditions: # # The above copyright notice and this permission", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "= SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\") def", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file:", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "program=\"afplay\") else: # A windows implementation should be possible with", "returns. Plays a sound file. \"\"\" # Play the sound", "THE # SOFTWARE. import subprocess import sys from pathlib import", "and/or sell # copies of the Software, and to permit", "the rights # to use, copy, modify, merge, publish, distribute,", "on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program): \"\"\" Play a", "be included in # all copies or substantial portions of", "is hereby granted, free of charge, to any person obtaining", "DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\")", ":param sound_file: Path to the sound file. :param program: Which", "does not play ogg audio. raise NotImplementedError( \"Playing sounds not", "not supported by pytest-notification on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path,", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "play_sound(sound_file: Path): if sys.platform == \"linux\": # paplay comes from", "from PulseAudio and should be installed by default on #", "person obtaining a copy # of this software and associated", "# # Permission is hereby granted, free of charge, to", "without restriction, including without limitation the rights # to use,", "subject to the following conditions: # # The above copyright", "raise NotImplementedError( \"Playing sounds not supported by pytest-notification on {}\"", "be possible with the winsound # implementation, but that does", "\"\"\" Play a sound file on unix with the program.", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "IN THE # SOFTWARE. import subprocess import sys from pathlib", "# A windows implementation should be possible with the winsound", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "or substantial portions of the Software. # # THE SOFTWARE", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "/ Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR", "should be installed by default on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"),", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "/ Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file: Path):", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import", "audio. raise NotImplementedError( \"Playing sounds not supported by pytest-notification on", "# Play the sound non blocking, use Popen. subprocess.Popen([program, str(sound_file)])", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "merge, publish, distribute, sublicense, and/or sell # copies of the", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "import Path SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR", "# SOFTWARE. import subprocess import sys from pathlib import Path", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "Path(\"applause\") DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file: Path): if", "sys.platform == \"linux\": # paplay comes from PulseAudio and should", "installed by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A", "University Medical Center # # Permission is hereby granted, free", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "Path(\"buzzer\") def play_sound(sound_file: Path): if sys.platform == \"linux\": # paplay", "so, subject to the following conditions: # # The above", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "program to use. :return: No returns. Plays a sound file.", "the following conditions: # # The above copyright notice and", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "Which program to use. :return: No returns. Plays a sound", "sound file. :param program: Which program to use. :return: No", "the program. :param sound_file: Path to the sound file. :param", "OR OTHER DEALINGS IN THE # SOFTWARE. import subprocess import", "the Software, and to permit persons to whom the Software", "winsound # implementation, but that does not play ogg audio.", "ogg audio. raise NotImplementedError( \"Playing sounds not supported by pytest-notification", "sound file. \"\"\" # Play the sound non blocking, use", "OTHER DEALINGS IN THE # SOFTWARE. import subprocess import sys", "2019 Leiden University Medical Center # # Permission is hereby", "in # all copies or substantial portions of the Software.", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "file. \"\"\" # Play the sound non blocking, use Popen.", "persons to whom the Software is # furnished to do", "_play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform == \"darwin\": # Afplay comes installed", "program): \"\"\" Play a sound file on unix with the", "associated documentation files (the \"Software\"), to deal # in the", "== \"darwin\": # Afplay comes installed by default on Macintosh", "should be possible with the winsound # implementation, but that", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to any person obtaining a copy # of this software", "installed by default on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "Software is # furnished to do so, subject to the", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "whom the Software is # furnished to do so, subject", "No returns. Plays a sound file. \"\"\" # Play the", "sublicense, and/or sell # copies of the Software, and to", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "else: # A windows implementation should be possible with the", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "in the Software without restriction, including without limitation the rights", "# furnished to do so, subject to the following conditions:", "any person obtaining a copy # of this software and", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "restriction, including without limitation the rights # to use, copy,", "# paplay comes from PulseAudio and should be installed by", "including without limitation the rights # to use, copy, modify,", "elif sys.platform == \"darwin\": # Afplay comes installed by default", "copyright notice and this permission notice shall be included in", "# most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform == \"darwin\": #", "A windows implementation should be possible with the winsound #", "with the winsound # implementation, but that does not play", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "= (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND", "free of charge, to any person obtaining a copy #", "files (the \"Software\"), to deal # in the Software without", "pathlib import Path SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND =", "def play_sound(sound_file: Path): if sys.platform == \"linux\": # paplay comes", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "that does not play ogg audio. raise NotImplementedError( \"Playing sounds", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "unix with the program. :param sound_file: Path to the sound", "(Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path(\"applause\") DEFAULT_FAIL_SOUND =", "default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A windows implementation", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "implementation should be possible with the winsound # implementation, but", "DEALINGS IN THE # SOFTWARE. import subprocess import sys from", "file. :param program: Which program to use. :return: No returns.", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "SOFTWARE. import subprocess import sys from pathlib import Path SOUNDS_DIR", "on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A windows implementation should", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "on # most systems. _play_sound_unix(sound_file.with_suffix(\".oga\"), program=\"paplay\") elif sys.platform == \"darwin\":", "\"darwin\": # Afplay comes installed by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"),", "Play a sound file on unix with the program. :param", "(the \"Software\"), to deal # in the Software without restriction,", "but that does not play ogg audio. raise NotImplementedError( \"Playing", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "the Software is # furnished to do so, subject to", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "limitation the rights # to use, copy, modify, merge, publish,", "this permission notice shall be included in # all copies", "windows implementation should be possible with the winsound # implementation,", "comes installed by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: #", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file: Path): if sys.platform", "without limitation the rights # to use, copy, modify, merge,", "pytest-notification on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program): \"\"\" Play", "by default on Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A windows", "= SOUNDS_DIR / Path(\"buzzer\") def play_sound(sound_file: Path): if sys.platform ==", "sys from pathlib import Path SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute()", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "\"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program): \"\"\" Play a sound file", "# in the Software without restriction, including without limitation the", "documentation files (the \"Software\"), to deal # in the Software", "copies or substantial portions of the Software. # # THE", "import subprocess import sys from pathlib import Path SOUNDS_DIR =", "\"\"\" # Play the sound non blocking, use Popen. subprocess.Popen([program,", "supported by pytest-notification on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program):", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "sell # copies of the Software, and to permit persons", ":param program: Which program to use. :return: No returns. Plays", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "publish, distribute, sublicense, and/or sell # copies of the Software,", "to the following conditions: # # The above copyright notice", "file on unix with the program. :param sound_file: Path to", "Path to the sound file. :param program: Which program to", "# implementation, but that does not play ogg audio. raise", "to use. :return: No returns. Plays a sound file. \"\"\"", "sounds not supported by pytest-notification on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file:", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "from pathlib import Path SOUNDS_DIR = (Path(__file__).parent / Path(\"sounds\")).absolute() DEFAULT_SUCCESS_SOUND", "a sound file on unix with the program. :param sound_file:", "\"linux\": # paplay comes from PulseAudio and should be installed", "sound file on unix with the program. :param sound_file: Path", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "Software, and to permit persons to whom the Software is", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "\"Software\"), to deal # in the Software without restriction, including", "and should be installed by default on # most systems.", "the winsound # implementation, but that does not play ogg", "import sys from pathlib import Path SOUNDS_DIR = (Path(__file__).parent /", "program: Which program to use. :return: No returns. Plays a", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "Path, program): \"\"\" Play a sound file on unix with", "# copies of the Software, and to permit persons to", "def _play_sound_unix(sound_file: Path, program): \"\"\" Play a sound file on", "by pytest-notification on {}\" \"\".format(sys.platform)) def _play_sound_unix(sound_file: Path, program): \"\"\"", "granted, free of charge, to any person obtaining a copy", "obtaining a copy # of this software and associated documentation", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "is # furnished to do so, subject to the following", "to whom the Software is # furnished to do so,", "Macintosh _play_sound_unix(sound_file.with_suffix(\".mp3\"), program=\"afplay\") else: # A windows implementation should be", "copy # of this software and associated documentation files (the", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "Permission is hereby granted, free of charge, to any person", "on unix with the program. :param sound_file: Path to the", "The above copyright notice and this permission notice shall be", "sys.platform == \"darwin\": # Afplay comes installed by default on", "the sound file. :param program: Which program to use. :return:", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "Copyright (c) 2019 Leiden University Medical Center # # Permission", "Leiden University Medical Center # # Permission is hereby granted,", "Medical Center # # Permission is hereby granted, free of", "to permit persons to whom the Software is # furnished", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "sqrt def is_simple(n: int) -> bool: if n % 2", "2 == 0: n += 1 while not is_simple(n): n", "(3, int(sqrt(n)) + 2, 2): if n % i ==", "and n != 2: return False for i in range", "== 0 and n != 2: return False for i", "if n % i == 0 and n != i:", "i in range (3, int(sqrt(n)) + 2, 2): if n", "n % i == 0 and n != i: return", "2: return 2 else: if n % 2 == 0:", "return 2 else: if n % 2 == 0: n", "<= 2: return 2 else: if n % 2 ==", "-> bool: if n % 2 == 0 and n", "else: if n % 2 == 0: n += 1", "bool: if n % 2 == 0 and n !=", "for i in range (3, int(sqrt(n)) + 2, 2): if", "i == 0 and n != i: return False return", "n % 2 == 0 and n != 2: return", "0 and n != i: return False return True def", "False for i in range (3, int(sqrt(n)) + 2, 2):", "def next_prime(n: int) -> int: n += 1 if n", "2: return False for i in range (3, int(sqrt(n)) +", "n != 2: return False for i in range (3,", "2, 2): if n % i == 0 and n", "from math import sqrt def is_simple(n: int) -> bool: if", "n += 1 while not is_simple(n): n += 2 return", "False return True def next_prime(n: int) -> int: n +=", "0 and n != 2: return False for i in", "return False return True def next_prime(n: int) -> int: n", "True def next_prime(n: int) -> int: n += 1 if", "% 2 == 0 and n != 2: return False", "2 == 0 and n != 2: return False for", "in range (3, int(sqrt(n)) + 2, 2): if n %", "+ 2, 2): if n % i == 0 and", "== 0 and n != i: return False return True", "and n != i: return False return True def next_prime(n:", "n != i: return False return True def next_prime(n: int)", "next_prime(n: int) -> int: n += 1 if n <=", "int(sqrt(n)) + 2, 2): if n % i == 0", "math import sqrt def is_simple(n: int) -> bool: if n", "int: n += 1 if n <= 2: return 2", "if n <= 2: return 2 else: if n %", "n <= 2: return 2 else: if n % 2", "!= 2: return False for i in range (3, int(sqrt(n))", "if n % 2 == 0: n += 1 while", "range (3, int(sqrt(n)) + 2, 2): if n % i", "i: return False return True def next_prime(n: int) -> int:", "-> int: n += 1 if n <= 2: return", "+= 1 while not is_simple(n): n += 2 return n", "2): if n % i == 0 and n !=", "2 else: if n % 2 == 0: n +=", "== 0: n += 1 while not is_simple(n): n +=", "int) -> bool: if n % 2 == 0 and", "+= 1 if n <= 2: return 2 else: if", "% i == 0 and n != i: return False", "return False for i in range (3, int(sqrt(n)) + 2,", "n % 2 == 0: n += 1 while not", "<reponame>yaznasivasai/python_codewars from math import sqrt def is_simple(n: int) -> bool:", "return True def next_prime(n: int) -> int: n += 1", "% 2 == 0: n += 1 while not is_simple(n):", "is_simple(n: int) -> bool: if n % 2 == 0", "int) -> int: n += 1 if n <= 2:", "import sqrt def is_simple(n: int) -> bool: if n %", "if n % 2 == 0 and n != 2:", "0: n += 1 while not is_simple(n): n += 2", "def is_simple(n: int) -> bool: if n % 2 ==", "n += 1 if n <= 2: return 2 else:", "!= i: return False return True def next_prime(n: int) ->", "1 if n <= 2: return 2 else: if n" ]
[ "1) Provide a a static ip, URL or similar that", "in the terminal (e.g., http://55e57164.ngrok.io). You will use this address", "Run this command to create a secure public URL for", "Under \"Choose a Trigger\", select “Any new email in inbox\".", "face. ''' import asyncio import re import sys try: from", "run the example: 1) Provide a a static ip, URL", "That server. One easy way to do this is with", "task in the background so the HTTP server responds immediately.", "async def read_name(): try: async with robot.perform_off_charger(): '''If necessary, Move", "2.0 (the \"License\"); # you may not use this file", "cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so didn't read email address: \"+", "or at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "send a web request to the ifttt_gmail.py script. e) In", "cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our custom robot class with", "email sender's name and show a mailbox image on his", "modify this method to change how Cozmo reacts to the", "# Cozmo to raise and lower his lift. To change", "up an applet on the IFTTT website. When the applet", "If This Then That server. One easy way to do", "out the fields as follows. Remember your publicly accessible URL", "example This example demonstrates how \"If This Then That\" (http://ifttt.com)", "b) Run this command to create a secure public URL", "# Perform Cozmo's task in the background so the HTTP", "with ngrok, which sets up a secure tunnel to localhost", "into https://ifttt.com b) Create an applet: https://ifttt.com/create c) Set up", "common import IFTTTRobot app = web.Application() async def serve_gmail(request): '''Define", "of the email sender. await robot.say_text(\"Email from \" + email_local_part).wait_for_completed()", "applet. a) Run this script at the command line: ./ifttt_gmail.py", "in the file LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0 #", "# remote_control_cozmo.py to see a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed()", "import re import sys try: from aiohttp import web except", "name of the email sender. from_email_address = json_object[\"FromAddress\"] # Use", "and click “Allow” to provide permissions to IFTTT for your", "To change the animation, # you may replace \"ID_pokedB\" with", "app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit(\"A", "have Cozmo display an email image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\")", "your applet. a) Run this script at the command line:", "tells # Cozmo to raise and lower his lift. To", "port 8080: ./ngrok http 8080 c) Note the HTTP forwarding", "to make it easy to see Cozmo's face.''' await robot.get_in_position()", "sender's name and show a mailbox image on his face.", "his lift, announce the email, and then show a mailbox", "your email account. Click \"Done\". 3. Under \"Choose a Trigger\",", "the robot to become available and add it to the", "should roll off the charger, raise and lower his lift,", "the file LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "use this file except in compliance with the License. #", "applet trigger is called (which sends a web request received", "lead you through setting up an applet on the IFTTT", "to set up and run the example: 1) Provide a", "\"Gmail\" as your service. If prompted, click \"Connect\", select your", "Action\" then “Finish\". 3) Test your applet. a) Run this", "up your applet on the \"If This Then That\" website.", "your local web server to the internet. See the ngrok", "email_local_part = match_object.group(1) robot = request.app['robot'] async def read_name(): try:", "animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak the name", "for port 8080: ./ngrok http 8080 c) Note the HTTP", "followed by \"/iftttGmail\" as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST", "cozmo.ConnectionError as e: sys.exit(\"A connection error occurred: %s\" % e)", "with robot.perform_off_charger(): '''If necessary, Move Cozmo's Head and Lift to", "to the Maker channel if prompted. 3. Click “Make a", "You may modify this method to change how Cozmo reacts", "This should cause IFTTT to detect that the email was", "License. # You may obtain a copy of the License", "script. e) In response to the ifttt web request, Cozmo", "await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak the name of", "our custom robot class with extra helper methods cozmo.conn.CozmoConnection.robot_factory =", "app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit(\"A connection error", "Head and Lift to make it easy to see Cozmo's", "may obtain a copy of the License in the file", "under the License is distributed on an \"AS IS\" BASIS,", "your Gmail account, and click “Allow” to provide permissions to", "License for the specific language governing permissions and # limitations", "# Last, have Cozmo display an email image on his", "language governing permissions and # limitations under the License. '''\"If", "\"If This Then That\" (http://ifttt.com) can be used make Cozmo", "animation \"ID_pokedB\", which tells # Cozmo to raise and lower", "Do `pip3 install --user aiohttp` to install\") import cozmo from", "That\" website. a) Sign up and sign into https://ifttt.com b)", "replace \"ID_pokedB\" with another animation. Run # remote_control_cozmo.py to see", "ifttt web request, Cozmo should roll off the charger, raise", "robot to become available and add it to the app", "of the License in the file LICENSE.txt or at #", "limitations under the License. '''\"If This Then That\" Gmail example", "a Trigger\", select “Any new email in inbox\". d) Set", "re import sys try: from aiohttp import web except ImportError:", "and show a mailbox image on his face. Please place", "responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach the function as", "of the email address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part =", "Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\" then “Finish\".", "on. Follow these steps to set up and run the", "This Then That. You may modify this method to change", "robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so didn't read email", "= await request.json() # Extract the name of the email", "HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect", "announce the email, and then show a mailbox image on", "terminal (e.g., http://55e57164.ngrok.io). You will use this address in your", "address shown in the terminal (e.g., http://55e57164.ngrok.io). You will use", "2. Select \"Gmail\" as your service. If prompted, click \"Connect\",", "from aiohttp. Do `pip3 install --user aiohttp` to install\") import", "apart pieces of the email address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address)", "# Attach the function as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail)", "WARNING: Using ngrok exposes your local web server to the", "await robot.say_text(\"Email from \" + email_local_part).wait_for_completed() # Last, have Cozmo", "easy way to do this is with ngrok, which sets", "(http://ifttt.com) can be used make Cozmo respond when a Gmail", "= IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for the", "from_email_address) email_local_part = match_object.group(1) robot = request.app['robot'] async def read_name():", "False # Use our custom robot class with extra helper", "try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for the robot to", "web request\" and fill out the fields as follows. Remember", "in compliance with the License. # You may obtain a", "that can be reached from the If This Then That", "= match_object.group(1) robot = request.app['robot'] async def read_name(): try: async", "animation, # you may replace \"ID_pokedB\" with another animation. Run", "software # distributed under the License is distributed on an", "example), Cozmo will play an animation, speak the email sender's", "When necessary, he will be rolled off and back on.", "receiving requests from If This Then That. You may modify", "your applet on the \"If This Then That\" website. a)", "Click “Make a web request\" and fill out the fields", "shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json Body:", "import cozmo from common import IFTTTRobot app = web.Application() async", "the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).", "this script at the command line: ./ifttt_gmail.py b) On ifttt.com,", "didn't read email address: \"+ from_email_address) # Perform Cozmo's task", "the ifttt_gmail.py script. e) In response to the ifttt web", "the example: 1) Provide a a static ip, URL or", "# Wait for the robot to become available and add", "off and back on. Follow these steps to set up", "https://ngrok.com/docs 2) Set up your applet on the \"If This", "8080 c) Note the HTTP forwarding address shown in the", "method to change how Cozmo reacts to the email being", "Click “that\". 2. Select “Maker\" to set it as your", "Set up your applet on the \"If This Then That\"", "to create a secure public URL for port 8080: ./ngrok", "local web server to the internet. See the ngrok documentation", "it to the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError", "copy of the License in the file LICENSE.txt or at", "aiohttp. Do `pip3 install --user aiohttp` to install\") import cozmo", "email image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was", "see a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have", "necessary, he will be rolled off and back on. Follow", "the \"If This Then That\" website. a) Sign up and", "if prompted. 3. Click “Make a web request\" and fill", "1. Click “that\". 2. Select “Maker\" to set it as", "may modify this method to change how Cozmo reacts to", "an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging()", "Cozmo respond when a Gmail account receives an email. Instructions", "Cozmo should roll off the charger, raise and lower his", "fields as follows. Remember your publicly accessible URL from above", "= re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1) robot = request.app['robot'] async", "request\" and fill out the fields as follows. Remember your", "up a secure tunnel to localhost running on your machine.", "If prompted, click \"Connect\", select your Gmail account, and click", "''' json_object = await request.json() # Extract the name of", "robot.say_text(\"Email from \" + email_local_part).wait_for_completed() # Last, have Cozmo display", "(which sends a web request received by the web server", "“Any new email in inbox\". d) Set up your action.", "trigger. 1. Click \"this\". 2. Select \"Gmail\" as your service.", "in your applet, below. WARNING: Using ngrok exposes your local", "OF ANY KIND, either express or implied. # See the", "running on your machine. To set up ngrok: a) Follow", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "documentation for more information: https://ngrok.com/docs 2) Set up your applet", "a web request to the ifttt_gmail.py script. e) In response", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "d) Set up your action. 1. Click “that\". 2. Select", "rolled off and back on. Follow these steps to set", "below will lead you through setting up an applet on", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "the If This Then That server. One easy way to", "Then That\" website. a) Sign up and sign into https://ifttt.com", "from common import IFTTTRobot app = web.Application() async def serve_gmail(request):", "2. Select “Maker\" to set it as your action channel.", "HTTP server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach the", "to in writing, software # distributed under the License is", "use this address in your applet, below. WARNING: Using ngrok", "handler for receiving requests from If This Then That. You", "URL for port 8080: ./ngrok http 8080 c) Note the", "# See the License for the specific language governing permissions", "Use our custom robot class with extra helper methods cozmo.conn.CozmoConnection.robot_factory", "or agreed to in writing, software # distributed under the", "a) Run this script at the command line: ./ifttt_gmail.py b)", "raise and lower his lift, announce the email, and then", "the applet trigger is called (which sends a web request", "Anki, Inc. # # Licensed under the Apache License, Version", "required by applicable law or agreed to in writing, software", "Perform Cozmo's task in the background so the HTTP server", "IFTTTRobot app = web.Application() async def serve_gmail(request): '''Define an HTTP", "and install: https://ngrok.com/download b) Run this command to create a", "on his face. Please place Cozmo on the charger for", "in the URL field, followed by \"/iftttGmail\" as shown below:", "IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for the robot", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "https://ngrok.com/download b) Run this command to create a secure public", "server. One easy way to do this is with ngrok,", "with the License. # You may obtain a copy of", "cause IFTTT to detect that the email was received and", "See that IFTTT confirms that the applet was checked. c)", "received. ''' json_object = await request.json() # Extract the name", "Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\" then", "That. You may modify this method to change how Cozmo", "request.app['robot'] async def read_name(): try: async with robot.perform_off_charger(): '''If necessary,", "if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use", "Create an applet: https://ifttt.com/create c) Set up your trigger. 1.", "up and sign into https://ifttt.com b) Create an applet: https://ifttt.com/create", "regular expression to break apart pieces of the email address", "--user aiohttp` to install\") import cozmo from common import IFTTTRobot", "set it as your action channel. Connect to the Maker", "background so the HTTP server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\")", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "POST handler for receiving requests from If This Then That.", "recipe d) On your IFTTT applet webpage, again click “Check", "email sender. await robot.say_text(\"Email from \" + email_local_part).wait_for_completed() # Last,", "webpage, again click “Check now”. This should cause IFTTT to", "change the animation, # you may replace \"ID_pokedB\" with another", "again click “Check now”. This should cause IFTTT to detect", "email being received. ''' json_object = await request.json() # Extract", "distributed under the License is distributed on an \"AS IS\"", "this address in your applet, below. WARNING: Using ngrok exposes", "your recipe d) On your IFTTT applet webpage, again click", "easy to see Cozmo's face.''' await robot.get_in_position() # First, have", "Cozmo to raise and lower his lift. To change the", "your applet page, click “Check now”. See that IFTTT confirms", "./ifttt_gmail.py b) On ifttt.com, on your applet page, click “Check", "express or implied. # See the License for the specific", "def serve_gmail(request): '''Define an HTTP POST handler for receiving requests", "this example. When necessary, he will be rolled off and", "install --user aiohttp` to install\") import cozmo from common import", "except in compliance with the License. # You may obtain", "to raise and lower his lift. To change the animation,", "an HTTP POST handler for receiving requests from If This", "show a mailbox image on his face. Please place Cozmo", "(e.g., http://55e57164.ngrok.io). You will use this address in your applet,", "Cozmo's task in the background so the HTTP server responds", "how Cozmo reacts to the email being received. ''' json_object", "was checked. c) Send an email to the Gmail account", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "do this is with ngrok, which sets up a secure", "handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect =", "not use this file except in compliance with the License.", "receives an email. Instructions below will lead you through setting", "re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1) robot = request.app['robot'] async def", "channel. Connect to the Maker channel if prompted. 3. Click", "Remember your publicly accessible URL from above (e.g., http://55e57164.ngrok.io) and", "async with robot.perform_off_charger(): '''If necessary, Move Cozmo's Head and Lift", "the fields as follows. Remember your publicly accessible URL from", "have Cozmo speak the name of the email sender. await", "#!/usr/bin/env python3 # Copyright (c) 2016 Anki, Inc. # #", "email address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1) robot", "Follow these steps to set up and run the example:", "writing, software # distributed under the License is distributed on", "c) Send an email to the Gmail account in your", "that IFTTT confirms that the applet was checked. c) Send", "expression to break apart pieces of the email address match_object", "you may not use this file except in compliance with", "web request received by the web server started in this", "sys.exit(\"Cannot import from aiohttp. Do `pip3 install --user aiohttp` to", "checked. c) Send an email to the Gmail account in", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "applet, below. WARNING: Using ngrok exposes your local web server", "your trigger. 1. Click \"this\". 2. Select \"Gmail\" as your", "this is with ngrok, which sets up a secure tunnel", "applet webpage, again click “Check now”. This should cause IFTTT", "your machine. To set up ngrok: a) Follow instructions here", "“Finish\". 3) Test your applet. a) Run this script at", "URL or similar that can be reached from the If", "HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io). You", "so didn't read email address: \"+ from_email_address) # Perform Cozmo's", "click “Check now”. See that IFTTT confirms that the applet", "now”. This should cause IFTTT to detect that the email", "for the robot to become available and add it to", "d) On your IFTTT applet webpage, again click “Check now”.", "address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1) robot =", "CONDITIONS OF ANY KIND, either express or implied. # See", "Then That\" Gmail example This example demonstrates how \"If This", "to the email being received. ''' json_object = await request.json()", "through setting up an applet on the IFTTT website. When", "so the HTTP server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "on your applet page, click “Check now”. See that IFTTT", "./ngrok http 8080 c) Note the HTTP forwarding address shown", "it as your action channel. Connect to the Maker channel", "a Gmail account receives an email. Instructions below will lead", "web server started in this example), Cozmo will play an", "being received. ''' json_object = await request.json() # Extract the", "website. When the applet trigger is called (which sends a", "web server to the internet. See the ngrok documentation for", "information: https://ngrok.com/docs 2) Set up your applet on the \"If", "'''Define an HTTP POST handler for receiving requests from If", "to become available and add it to the app object.", "lift, announce the email, and then show a mailbox image", "see Cozmo's face.''' await robot.get_in_position() # First, have Cozmo play", "the License in the file LICENSE.txt or at # #", "cozmo from common import IFTTTRobot app = web.Application() async def", "speak the name of the email sender. await robot.say_text(\"Email from", "aiohttp import web except ImportError: sys.exit(\"Cannot import from aiohttp. Do", "Sign up and sign into https://ifttt.com b) Create an applet:", "to localhost running on your machine. To set up ngrok:", "set up and run the example: 1) Provide a a", "shown in the terminal (e.g., http://55e57164.ngrok.io). You will use this", "for your email account. Click \"Done\". 3. Under \"Choose a", "action. 1. Click “that\". 2. Select “Maker\" to set it", "public URL for port 8080: ./ngrok http 8080 c) Note", "async def serve_gmail(request): '''Define an HTTP POST handler for receiving", "try: async with robot.perform_off_charger(): '''If necessary, Move Cozmo's Head and", "Wait for the robot to become available and add it", "the email address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1)", "web request to the ifttt_gmail.py script. e) In response to", "a web request received by the web server started in", "IFTTT to detect that the email was received and send", "up your action. 1. Click “that\". 2. Select “Maker\" to", "to set it as your action channel. Connect to the", "make Cozmo respond when a Gmail account receives an email.", "by the web server started in this example), Cozmo will", "was received and send a web request to the ifttt_gmail.py", "to the internet. See the ngrok documentation for more information:", "OR CONDITIONS OF ANY KIND, either express or implied. #", "select “Any new email in inbox\". d) Set up your", "the License is distributed on an \"AS IS\" BASIS, #", "on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so", "tunnel to localhost running on your machine. To set up", "\"this\". 2. Select \"Gmail\" as your service. If prompted, click", "json_object = await request.json() # Extract the name of the", "methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait", "of the email sender. from_email_address = json_object[\"FromAddress\"] # Use a", "the applet was checked. c) Send an email to the", "name and show a mailbox image on his face. Please", "for more information: https://ngrok.com/docs 2) Set up your applet on", "Method: POST Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create", "def read_name(): try: async with robot.perform_off_charger(): '''If necessary, Move Cozmo's", "face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so didn't read", "the IFTTT website. When the applet trigger is called (which", "image on his face. Please place Cozmo on the charger", "the function as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__", "then show a mailbox image on his face. ''' import", "Using ngrok exposes your local web server to the internet.", "the email sender. await robot.say_text(\"Email from \" + email_local_part).wait_for_completed() #", "# Next, have Cozmo speak the name of the email", "Run # remote_control_cozmo.py to see a list of animations. await", "to provide permissions to IFTTT for your email account. Click", "match_object.group(1) robot = request.app['robot'] async def read_name(): try: async with", "URL from above (e.g., http://55e57164.ngrok.io) and use it in the", "read_name(): try: async with robot.perform_off_charger(): '''If necessary, Move Cozmo's Head", "will be rolled off and back on. Follow these steps", "web request, Cozmo should roll off the charger, raise and", "prompted. 3. Click “Make a web request\" and fill out", "have Cozmo play animation \"ID_pokedB\", which tells # Cozmo to", "Set up your action. 1. Click “that\". 2. Select “Maker\"", "asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach the function as an HTTP", "To set up ngrok: a) Follow instructions here to download", "law or agreed to in writing, software # distributed under", "(e.g., http://55e57164.ngrok.io) and use it in the URL field, followed", "On your IFTTT applet webpage, again click “Check now”. This", "and then show a mailbox image on his face. '''", "except cozmo.ConnectionError as e: sys.exit(\"A connection error occurred: %s\" %", "you through setting up an applet on the IFTTT website.", "your action channel. Connect to the Maker channel if prompted.", "http 8080 c) Note the HTTP forwarding address shown in", "This example demonstrates how \"If This Then That\" (http://ifttt.com) can", "b) On ifttt.com, on your applet page, click “Check now”.", "to see Cozmo's face.''' await robot.get_in_position() # First, have Cozmo", "First, have Cozmo play animation \"ID_pokedB\", which tells # Cozmo", "# limitations under the License. '''\"If This Then That\" Gmail", "to change how Cozmo reacts to the email being received.", "at the command line: ./ifttt_gmail.py b) On ifttt.com, on your", "internet. See the ngrok documentation for more information: https://ngrok.com/docs 2)", "response to the ifttt web request, Cozmo should roll off", "If This Then That. You may modify this method to", "That\" Gmail example This example demonstrates how \"If This Then", "\"Done\". 3. Under \"Choose a Trigger\", select “Any new email", "2) Set up your applet on the \"If This Then", "\" + email_local_part).wait_for_completed() # Last, have Cozmo display an email", "== '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our custom", "can be reached from the If This Then That server.", "request received by the web server started in this example),", "the License. '''\"If This Then That\" Gmail example This example", "break apart pieces of the email address match_object = re.search(r'([\\w.]+)@([\\w.]+)',", "See the ngrok documentation for more information: https://ngrok.com/docs 2) Set", "ifttt_gmail.py script. e) In response to the ifttt web request,", "Please place Cozmo on the charger for this example. When", "In response to the ifttt web request, Cozmo should roll", "'''If necessary, Move Cozmo's Head and Lift to make it", "speak the email sender's name and show a mailbox image", "click “Check now”. This should cause IFTTT to detect that", "and send a web request to the ifttt_gmail.py script. e)", "\"Connect\", select your Gmail account, and click “Allow” to provide", "field, followed by \"/iftttGmail\" as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method:", "these steps to set up and run the example: 1)", "“Maker\" to set it as your action channel. Connect to", "address: \"+ from_email_address) # Perform Cozmo's task in the background", "cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for", "immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach the function as an", "channel if prompted. 3. Click “Make a web request\" and", "charger for this example. When necessary, he will be rolled", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "website. a) Sign up and sign into https://ifttt.com b) Create", "account receives an email. Instructions below will lead you through", "\"/iftttGmail\" as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type:", "install\") import cozmo from common import IFTTTRobot app = web.Application()", "to the ifttt_gmail.py script. e) In response to the ifttt", "necessary, Move Cozmo's Head and Lift to make it easy", "and # limitations under the License. '''\"If This Then That\"", "an email to the Gmail account in your recipe d)", "Set up your trigger. 1. Click \"this\". 2. Select \"Gmail\"", "may not use this file except in compliance with the", "service. If prompted, click \"Connect\", select your Gmail account, and", "\"ID_pokedB\", which tells # Cozmo to raise and lower his", "account. Click \"Done\". 3. Under \"Choose a Trigger\", select “Any", "request, Cozmo should roll off the charger, raise and lower", "the internet. See the ngrok documentation for more information: https://ngrok.com/docs", "robot.get_in_position() # First, have Cozmo play animation \"ID_pokedB\", which tells", "example demonstrates how \"If This Then That\" (http://ifttt.com) can be", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "\"ID_pokedB\" with another animation. Run # remote_control_cozmo.py to see a", "“that\". 2. Select “Maker\" to set it as your action", "the email being received. ''' json_object = await request.json() #", "extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop)", "an applet: https://ifttt.com/create c) Set up your trigger. 1. Click", "custom robot class with extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot", "be used make Cozmo respond when a Gmail account receives", "ngrok, which sets up a secure tunnel to localhost running", "and sign into https://ifttt.com b) Create an applet: https://ifttt.com/create c)", "click “Allow” to provide permissions to IFTTT for your email", "IFTTT applet webpage, again click “Check now”. This should cause", "the email, and then show a mailbox image on his", "in the background so the HTTP server responds immediately. asyncio.ensure_future(read_name())", "a) Sign up and sign into https://ifttt.com b) Create an", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "page, click “Check now”. See that IFTTT confirms that the", "how \"If This Then That\" (http://ifttt.com) can be used make", "obtain a copy of the License in the file LICENSE.txt", "called (which sends a web request received by the web", "# # Licensed under the Apache License, Version 2.0 (the", "“Check now”. See that IFTTT confirms that the applet was", "account, and click “Allow” to provide permissions to IFTTT for", "applet on the \"If This Then That\" website. a) Sign", "__name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "account in your recipe d) On your IFTTT applet webpage,", "example. When necessary, he will be rolled off and back", "import sys try: from aiohttp import web except ImportError: sys.exit(\"Cannot", "to the Gmail account in your recipe d) On your", "sdk_conn = cozmo.connect_on_loop(app.loop) # Wait for the robot to become", "static ip, URL or similar that can be reached from", "when a Gmail account receives an email. Instructions below will", "to download and install: https://ngrok.com/download b) Run this command to", "an animation, speak the email sender's name and show a", "3. Click “Make a web request\" and fill out the", "robot = request.app['robot'] async def read_name(): try: async with robot.perform_off_charger():", "aiohttp` to install\") import cozmo from common import IFTTTRobot app", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "“Allow” to provide permissions to IFTTT for your email account.", "Follow instructions here to download and install: https://ngrok.com/download b) Run", "import IFTTTRobot app = web.Application() async def serve_gmail(request): '''Define an", "server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach the function", "can be used make Cozmo respond when a Gmail account", "the email was received and send a web request to", "Select “Maker\" to set it as your action channel. Connect", "robot class with extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try:", "\"If This Then That\" website. a) Sign up and sign", "“Make a web request\" and fill out the fields as", "lower his lift, announce the email, and then show a", "Cozmo's face.''' await robot.get_in_position() # First, have Cozmo play animation", "cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our custom robot class", "= app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit(\"A connection error occurred:", "Connect to the Maker channel if prompted. 3. Click “Make", "and Lift to make it easy to see Cozmo's face.'''", "fill out the fields as follows. Remember your publicly accessible", "ip, URL or similar that can be reached from the", "Gmail example This example demonstrates how \"If This Then That\"", "the command line: ./ifttt_gmail.py b) On ifttt.com, on your applet", "Run this script at the command line: ./ifttt_gmail.py b) On", "serve_gmail(request): '''Define an HTTP POST handler for receiving requests from", "the web server started in this example), Cozmo will play", "download and install: https://ngrok.com/download b) Run this command to create", "roll off the charger, raise and lower his lift, announce", "Instructions below will lead you through setting up an applet", "install: https://ngrok.com/download b) Run this command to create a secure", "publicly accessible URL from above (e.g., http://55e57164.ngrok.io) and use it", "\"+ from_email_address) # Perform Cozmo's task in the background so", "this example), Cozmo will play an animation, speak the email", "exposes your local web server to the internet. See the", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "''' import asyncio import re import sys try: from aiohttp", "# Extract the name of the email sender. from_email_address =", "add it to the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except", "(c) 2016 Anki, Inc. # # Licensed under the Apache", "your applet, below. WARNING: Using ngrok exposes your local web", "or implied. # See the License for the specific language", "web.Response(text=\"OK\") # Attach the function as an HTTP handler. app.router.add_post('/iftttGmail',", "sets up a secure tunnel to localhost running on your", "cozmo.connect_on_loop(app.loop) # Wait for the robot to become available and", "name of the email sender. await robot.say_text(\"Email from \" +", "and run the example: 1) Provide a a static ip,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "use it in the URL field, followed by \"/iftttGmail\" as", "1. Click \"this\". 2. Select \"Gmail\" as your service. If", "“Create Action\" then “Finish\". 3) Test your applet. a) Run", "robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak the name of the", "with extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn =", "ifttt.com, on your applet page, click “Check now”. See that", "as e: sys.exit(\"A connection error occurred: %s\" % e) web.run_app(app)", "an applet on the IFTTT website. When the applet trigger", "Cozmo on the charger for this example. When necessary, he", "c) Set up your trigger. 1. Click \"this\". 2. Select", "email sender. from_email_address = json_object[\"FromAddress\"] # Use a regular expression", "application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\" then “Finish\". 3)", "the terminal (e.g., http://55e57164.ngrok.io). You will use this address in", "Send an email to the Gmail account in your recipe", "be reached from the If This Then That server. One", "await request.json() # Extract the name of the email sender.", "ngrok: a) Follow instructions here to download and install: https://ngrok.com/download", "except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so didn't read email address:", "received and send a web request to the ifttt_gmail.py script.", "email to the Gmail account in your recipe d) On", "remote_control_cozmo.py to see a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() #", "your publicly accessible URL from above (e.g., http://55e57164.ngrok.io) and use", "requests from If This Then That. You may modify this", "the animation, # you may replace \"ID_pokedB\" with another animation.", "Provide a a static ip, URL or similar that can", "from \" + email_local_part).wait_for_completed() # Last, have Cozmo display an", "from the If This Then That server. One easy way", "reached from the If This Then That server. One easy", "to do this is with ngrok, which sets up a", "(the \"License\"); # you may not use this file except", "email. Instructions below will lead you through setting up an", "# you may not use this file except in compliance", "python3 # Copyright (c) 2016 Anki, Inc. # # Licensed", "Maker channel if prompted. 3. Click “Make a web request\"", "read email address: \"+ from_email_address) # Perform Cozmo's task in", "and fill out the fields as follows. Remember your publicly", "mailbox image on his face. Please place Cozmo on the", "example: 1) Provide a a static ip, URL or similar", "his face. ''' import asyncio import re import sys try:", "may replace \"ID_pokedB\" with another animation. Run # remote_control_cozmo.py to", "should cause IFTTT to detect that the email was received", "show a mailbox image on his face. ''' import asyncio", "# you may replace \"ID_pokedB\" with another animation. Run #", "and add it to the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())", "back on. Follow these steps to set up and run", "try: from aiohttp import web except ImportError: sys.exit(\"Cannot import from", "available and add it to the app object. app['robot'] =", "email account. Click \"Done\". 3. Under \"Choose a Trigger\", select", "Cozmo reacts to the email being received. ''' json_object =", "steps to set up and run the example: 1) Provide", "helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn = cozmo.connect_on_loop(app.loop) #", "# # Unless required by applicable law or agreed to", "the name of the email sender. from_email_address = json_object[\"FromAddress\"] #", "applet on the IFTTT website. When the applet trigger is", "lift. To change the animation, # you may replace \"ID_pokedB\"", "# You may obtain a copy of the License in", "machine. To set up ngrok: a) Follow instructions here to", "secure tunnel to localhost running on your machine. To set", "Select \"Gmail\" as your service. If prompted, click \"Connect\", select", "place Cozmo on the charger for this example. When necessary,", "Version 2.0 (the \"License\"); # you may not use this", "a secure public URL for port 8080: ./ngrok http 8080", "on the charger for this example. When necessary, he will", "and lower his lift. To change the animation, # you", "Then That\" (http://ifttt.com) can be used make Cozmo respond when", "will use this address in your applet, below. WARNING: Using", "2016 Anki, Inc. # # Licensed under the Apache License,", "for this example. When necessary, he will be rolled off", "play an animation, speak the email sender's name and show", "then “Finish\". 3) Test your applet. a) Run this script", "on the \"If This Then That\" website. a) Sign up", "Test your applet. a) Run this script at the command", "face. Please place Cozmo on the charger for this example.", "a secure tunnel to localhost running on your machine. To", "an email image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot", "with another animation. Run # remote_control_cozmo.py to see a list", "implied. # See the License for the specific language governing", "and use it in the URL field, followed by \"/iftttGmail\"", "to see a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next,", "permissions and # limitations under the License. '''\"If This Then", "the charger for this example. When necessary, he will be", "image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy", "another animation. Run # remote_control_cozmo.py to see a list of", "under the Apache License, Version 2.0 (the \"License\"); # you", "off the charger, raise and lower his lift, announce the", "# Use our custom robot class with extra helper methods", "was busy so didn't read email address: \"+ from_email_address) #", "email, and then show a mailbox image on his face.", "in inbox\". d) Set up your action. 1. Click “that\".", "the Maker channel if prompted. 3. Click “Make a web", "+ email_local_part).wait_for_completed() # Last, have Cozmo display an email image", "the HTTP server responds immediately. asyncio.ensure_future(read_name()) return web.Response(text=\"OK\") # Attach", "be rolled off and back on. Follow these steps to", "on your machine. To set up ngrok: a) Follow instructions", "in your recipe d) On your IFTTT applet webpage, again", "IFTTT website. When the applet trigger is called (which sends", "= False # Use our custom robot class with extra", "by applicable law or agreed to in writing, software #", "up your trigger. 1. Click \"this\". 2. Select \"Gmail\" as", "web.Application() async def serve_gmail(request): '''Define an HTTP POST handler for", "set up ngrok: a) Follow instructions here to download and", "way to do this is with ngrok, which sets up", "command to create a secure public URL for port 8080:", "email_local_part).wait_for_completed() # Last, have Cozmo display an email image on", "That\" (http://ifttt.com) can be used make Cozmo respond when a", "to the ifttt web request, Cozmo should roll off the", "his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy: cozmo.logger.warning(\"Robot was busy so didn't", "Copyright (c) 2016 Anki, Inc. # # Licensed under the", "sender. from_email_address = json_object[\"FromAddress\"] # Use a regular expression to", "address in your applet, below. WARNING: Using ngrok exposes your", "raise and lower his lift. To change the animation, #", "You may obtain a copy of the License in the", "inbox\". d) Set up your action. 1. Click “that\". 2.", "This Then That\" (http://ifttt.com) can be used make Cozmo respond", "your action. 1. Click “that\". 2. Select “Maker\" to set", "respond when a Gmail account receives an email. Instructions below", "Cozmo display an email image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except", "cozmo.logger.warning(\"Robot was busy so didn't read email address: \"+ from_email_address)", "8080: ./ngrok http 8080 c) Note the HTTP forwarding address", "\"Choose a Trigger\", select “Any new email in inbox\". d)", "as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json", "robot.perform_off_charger(): '''If necessary, Move Cozmo's Head and Lift to make", "'__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False # Use our custom robot", "a mailbox image on his face. ''' import asyncio import", "animation. Run # remote_control_cozmo.py to see a list of animations.", "sys try: from aiohttp import web except ImportError: sys.exit(\"Cannot import", "become available and add it to the app object. app['robot']", "that the applet was checked. c) Send an email to", "Click “Create Action\" then “Finish\". 3) Test your applet. a)", "the ngrok documentation for more information: https://ngrok.com/docs 2) Set up", "create a secure public URL for port 8080: ./ngrok http", "import asyncio import re import sys try: from aiohttp import", "his lift. To change the animation, # you may replace", "localhost running on your machine. To set up ngrok: a)", "URL field, followed by \"/iftttGmail\" as shown below: URL: http://55e57164.ngrok.io/iftttGmail", "ImportError: sys.exit(\"Cannot import from aiohttp. Do `pip3 install --user aiohttp`", "applet was checked. c) Send an email to the Gmail", "match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part = match_object.group(1) robot = request.app['robot']", "5. Click “Create Action\" then “Finish\". 3) Test your applet.", "Gmail account, and click “Allow” to provide permissions to IFTTT", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "pieces of the email address match_object = re.search(r'([\\w.]+)@([\\w.]+)', from_email_address) email_local_part", "Unless required by applicable law or agreed to in writing,", "as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__':", "Then That server. One easy way to do this is", "this command to create a secure public URL for port", "https://ifttt.com/create c) Set up your trigger. 1. Click \"this\". 2.", "Then That. You may modify this method to change how", "Use a regular expression to break apart pieces of the", "the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e:", "change how Cozmo reacts to the email being received. '''", "the specific language governing permissions and # limitations under the", "= request.app['robot'] async def read_name(): try: async with robot.perform_off_charger(): '''If", "sends a web request received by the web server started", "import web except ImportError: sys.exit(\"Cannot import from aiohttp. Do `pip3", "that the email was received and send a web request", "applicable law or agreed to in writing, software # distributed", "server started in this example), Cozmo will play an animation,", "more information: https://ngrok.com/docs 2) Set up your applet on the", "the URL field, followed by \"/iftttGmail\" as shown below: URL:", "POST Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\"", "from above (e.g., http://55e57164.ngrok.io) and use it in the URL", "display an email image on his face. robot.display_image_file_on_face(\"../face_images/ifttt_gmail.png\") except cozmo.RobotBusy:", "in this example), Cozmo will play an animation, speak the", "and lower his lift, announce the email, and then show", "{\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\" then “Finish\". 3) Test your", "accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in", "Cozmo will play an animation, speak the email sender's name", "from If This Then That. You may modify this method", "in writing, software # distributed under the License is distributed", "it easy to see Cozmo's face.''' await robot.get_in_position() # First,", "you may replace \"ID_pokedB\" with another animation. Run # remote_control_cozmo.py", "Attach the function as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if", "demonstrates how \"If This Then That\" (http://ifttt.com) can be used", "charger, raise and lower his lift, announce the email, and", "governing permissions and # limitations under the License. '''\"If This", "import from aiohttp. Do `pip3 install --user aiohttp` to install\")", "URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5.", "used make Cozmo respond when a Gmail account receives an", "This Then That\" website. a) Sign up and sign into", "command line: ./ifttt_gmail.py b) On ifttt.com, on your applet page,", "to IFTTT for your email account. Click \"Done\". 3. Under", "= cozmo.connect_on_loop(app.loop) # Wait for the robot to become available", "= web.Application() async def serve_gmail(request): '''Define an HTTP POST handler", "list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak", "= json_object[\"FromAddress\"] # Use a regular expression to break apart", "here to download and install: https://ngrok.com/download b) Run this command", "an email. Instructions below will lead you through setting up", "request to the ifttt_gmail.py script. e) In response to the", "IFTTT for your email account. Click \"Done\". 3. Under \"Choose", "License. '''\"If This Then That\" Gmail example This example demonstrates", "You will use this address in your applet, below. WARNING:", "http://55e57164.ngrok.io) and use it in the URL field, followed by", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "a list of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo", "similar that can be reached from the If This Then", "b) Create an applet: https://ifttt.com/create c) Set up your trigger.", "your service. If prompted, click \"Connect\", select your Gmail account,", "License, Version 2.0 (the \"License\"); # you may not use", "trigger is called (which sends a web request received by", "serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False #", "a mailbox image on his face. Please place Cozmo on", "await robot.get_in_position() # First, have Cozmo play animation \"ID_pokedB\", which", "his face. Please place Cozmo on the charger for this", "new email in inbox\". d) Set up your action. 1.", "email was received and send a web request to the", "by \"/iftttGmail\" as shown below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content", "applet page, click “Check now”. See that IFTTT confirms that", "This Then That\" Gmail example This example demonstrates how \"If", "provide permissions to IFTTT for your email account. Click \"Done\".", "will lead you through setting up an applet on the", "e) In response to the ifttt web request, Cozmo should", "Cozmo play animation \"ID_pokedB\", which tells # Cozmo to raise", "follows. Remember your publicly accessible URL from above (e.g., http://55e57164.ngrok.io)", "is called (which sends a web request received by the", "to break apart pieces of the email address match_object =", "which tells # Cozmo to raise and lower his lift.", "from_email_address) # Perform Cozmo's task in the background so the", "sender. await robot.say_text(\"Email from \" + email_local_part).wait_for_completed() # Last, have", "or similar that can be reached from the If This", "the ifttt web request, Cozmo should roll off the charger,", "the email sender's name and show a mailbox image on", "it in the URL field, followed by \"/iftttGmail\" as shown", "Last, have Cozmo display an email image on his face.", "sign into https://ifttt.com b) Create an applet: https://ifttt.com/create c) Set", "Click \"Done\". 3. Under \"Choose a Trigger\", select “Any new", "as your service. If prompted, click \"Connect\", select your Gmail", "on his face. ''' import asyncio import re import sys", "Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click “Create Action\" then “Finish\". 3) Test", "the License for the specific language governing permissions and #", "“Check now”. This should cause IFTTT to detect that the", "above (e.g., http://55e57164.ngrok.io) and use it in the URL field,", "reacts to the email being received. ''' json_object = await", "Apache License, Version 2.0 (the \"License\"); # you may not", "http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"} 5. Click", "either express or implied. # See the License for the", "IFTTT confirms that the applet was checked. c) Send an", "# First, have Cozmo play animation \"ID_pokedB\", which tells #", "will play an animation, speak the email sender's name and", "for receiving requests from If This Then That. You may", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Move Cozmo's Head and Lift to make it easy to", "face.''' await robot.get_in_position() # First, have Cozmo play animation \"ID_pokedB\",", "up and run the example: 1) Provide a a static", "License in the file LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0", "One easy way to do this is with ngrok, which", "your IFTTT applet webpage, again click “Check now”. This should", "a copy of the License in the file LICENSE.txt or", "c) Note the HTTP forwarding address shown in the terminal", "select your Gmail account, and click “Allow” to provide permissions", "Next, have Cozmo speak the name of the email sender.", "started in this example), Cozmo will play an animation, speak", "app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit(\"A connection error occurred: %s\"", "object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as e: sys.exit(\"A connection", "permissions to IFTTT for your email account. Click \"Done\". 3.", "# Use a regular expression to break apart pieces of", "instructions here to download and install: https://ngrok.com/download b) Run this", "now”. See that IFTTT confirms that the applet was checked.", "a a static ip, URL or similar that can be", "Note the HTTP forwarding address shown in the terminal (e.g.,", "the email sender. from_email_address = json_object[\"FromAddress\"] # Use a regular", "is with ngrok, which sets up a secure tunnel to", "https://ifttt.com b) Create an applet: https://ifttt.com/create c) Set up your", "except ImportError: sys.exit(\"Cannot import from aiohttp. Do `pip3 install --user", "ngrok documentation for more information: https://ngrok.com/docs 2) Set up your", "HTTP POST handler for receiving requests from If This Then", "a) Follow instructions here to download and install: https://ngrok.com/download b)", "file LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "3) Test your applet. a) Run this script at the", "animation, speak the email sender's name and show a mailbox", "the Gmail account in your recipe d) On your IFTTT", "this method to change how Cozmo reacts to the email", "app = web.Application() async def serve_gmail(request): '''Define an HTTP POST", "ngrok exposes your local web server to the internet. See", "`pip3 install --user aiohttp` to install\") import cozmo from common", "# Copyright (c) 2016 Anki, Inc. # # Licensed under", "http://55e57164.ngrok.io). You will use this address in your applet, below.", "server to the internet. See the ngrok documentation for more", "under the License. '''\"If This Then That\" Gmail example This", "prompted, click \"Connect\", select your Gmail account, and click “Allow”", "script at the command line: ./ifttt_gmail.py b) On ifttt.com, on", "When the applet trigger is called (which sends a web", "confirms that the applet was checked. c) Send an email", "a web request\" and fill out the fields as follows.", "as follows. Remember your publicly accessible URL from above (e.g.,", "action channel. Connect to the Maker channel if prompted. 3.", "\"License\"); # you may not use this file except in", "below: URL: http://55e57164.ngrok.io/iftttGmail Method: POST Content Type: application/json Body: {\"FromAddress\":\"{{FromAddress}}\"}", "On ifttt.com, on your applet page, click “Check now”. See", "the charger, raise and lower his lift, announce the email,", "return web.Response(text=\"OK\") # Attach the function as an HTTP handler.", "of animations. await robot.play_anim(name='ID_pokedB').wait_for_completed() # Next, have Cozmo speak the", "and back on. Follow these steps to set up and", "3. Under \"Choose a Trigger\", select “Any new email in", "busy so didn't read email address: \"+ from_email_address) # Perform", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "mailbox image on his face. ''' import asyncio import re", "asyncio import re import sys try: from aiohttp import web", "as your action channel. Connect to the Maker channel if", "This Then That server. One easy way to do this", "below. WARNING: Using ngrok exposes your local web server to", "# distributed under the License is distributed on an \"AS", "'''\"If This Then That\" Gmail example This example demonstrates how", "image on his face. ''' import asyncio import re import", "Cozmo's Head and Lift to make it easy to see", "function as an HTTP handler. app.router.add_post('/iftttGmail', serve_gmail) if __name__ ==", "a regular expression to break apart pieces of the email", "a static ip, URL or similar that can be reached", "# Unless required by applicable law or agreed to in", "web except ImportError: sys.exit(\"Cannot import from aiohttp. Do `pip3 install", "Gmail account receives an email. Instructions below will lead you", "Click \"this\". 2. Select \"Gmail\" as your service. If prompted,", "email address: \"+ from_email_address) # Perform Cozmo's task in the", "Trigger\", select “Any new email in inbox\". d) Set up", "the background so the HTTP server responds immediately. asyncio.ensure_future(read_name()) return", "he will be rolled off and back on. Follow these", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "Extract the name of the email sender. from_email_address = json_object[\"FromAddress\"]", "click \"Connect\", select your Gmail account, and click “Allow” to", "Cozmo speak the name of the email sender. await robot.say_text(\"Email", "line: ./ifttt_gmail.py b) On ifttt.com, on your applet page, click", "lower his lift. To change the animation, # you may", "to detect that the email was received and send a", "on the IFTTT website. When the applet trigger is called", "from aiohttp import web except ImportError: sys.exit(\"Cannot import from aiohttp.", "forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io). You will", "play animation \"ID_pokedB\", which tells # Cozmo to raise and", "setting up an applet on the IFTTT website. When the", "from_email_address = json_object[\"FromAddress\"] # Use a regular expression to break", "class with extra helper methods cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot try: sdk_conn", "make it easy to see Cozmo's face.''' await robot.get_in_position() #", "applet: https://ifttt.com/create c) Set up your trigger. 1. Click \"this\".", "email in inbox\". d) Set up your action. 1. Click", "to the app object. app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot()) except cozmo.ConnectionError as", "Gmail account in your recipe d) On your IFTTT applet", "received by the web server started in this example), Cozmo", "detect that the email was received and send a web", "json_object[\"FromAddress\"] # Use a regular expression to break apart pieces", "to install\") import cozmo from common import IFTTTRobot app =", "the Apache License, Version 2.0 (the \"License\"); # you may", "app.router.add_post('/iftttGmail', serve_gmail) if __name__ == '__main__': cozmo.setup_basic_logging() cozmo.robot.Robot.drive_off_charger_on_connect = False", "request.json() # Extract the name of the email sender. from_email_address", "Lift to make it easy to see Cozmo's face.''' await", "the name of the email sender. await robot.say_text(\"Email from \"", "which sets up a secure tunnel to localhost running on", "up ngrok: a) Follow instructions here to download and install:", "secure public URL for port 8080: ./ngrok http 8080 c)", "LICENSE.txt or at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required" ]
[ "fig= txt.get_figure() if figure is None else figure if transformation", "== 0: return x[0], y[0] if xfrac == 1: return", "Use 'import matplotlib as mpl'. screewidth : int Width of", "z.ndim != 2 or z.shape != (y.size, x.size): raise ValueError(\"z.shape", "fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot lines down to", "dx = np.diff(x) dy = np.diff(y) if not np.allclose(dx, dx[0],", "mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot lines down to the dec=0", "origin='lower', extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def", "utf-8 -*- \"\"\" Created on Fri May 30 17:15:27 2014", "consider ussing a ::2 slice for fewer dashes = [[],", "= [[], [30, 10], [20, 8], [10, 5], [3, 2],", "with matplotlib sometimes not showing polygon when it extends beyond", "= mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize def pcolor_reg(x,", "coordinates z = dist * np.sin(dec) h = dist *", "for fewer dashes = [[], [30, 10], [20, 8], [10,", "return fig, fontsize def pcolor_reg(x, y, z, **kw): \"\"\" Similar", "xy = [] def onclick(event): if not event.inaxes: fig.canvas.stop_event_loop() else:", "txt_scale * 0.75 for xx, yy, zz, label in zip(x,", "['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return", "scale factors for converting text sizes in points to another", "if scale == 'log': lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac,", "zoff = [r_label * v for v in [xoff, yoff,", "they are created and maximized for an audience far from", "scale == 'linear': return newlim(oldlim) elif scale == 'log': return", "timeout=600.): if fig is None: fig = plt.gcf() xy =", "bigax.set_zorder(-10) return bigax def log_frac(x, frac): l0, l1 = list(map(np.log10,", "args[0], args[1] = edges, values ax = kwargs.pop('ax', plt.gca()) return", "list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun'))) # get xyz", "fig, fontsize def pcolor_reg(x, y, z, **kw): \"\"\" Similar to", "ax = fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig fig", "y, **kw) if fmt is None else ax.plot(x, y, fmt,", "y = r*np.cos(t), r*np.sin(t) z = np.zeros(n+1) x, y =", "r_factor xoff, yoff, zoff = [r_label * v for v", "[x] if errneg is not None: xn = xl -", "factors for converting text sizes in points to another coordinate.", "= ax.get_figure() else: raise TypeError('ax_or_fig must be a Figure or", "it extends beyond plot range xlim = ax.get_xlim() inrange =", "def log_frac(x, frac): l0, l1 = list(map(np.log10, x)) ld =", "coordinates are requested and the data is plotted on a", "hc * np.sin(az * np.pi / 180.0) zc = -np.cos(el", "5, 3, 5, 10, 5, 3, 5], [15] + [5,", "ax = kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs) def point_along_line(x, y,", "sizes in points to another coordinate. Useful for properly spacing", "= yc / np.sqrt(xc**2 + yc**2) yoff = np.sqrt(1.0 -", "r, r, r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode =", "def step(*args, **kwargs): edges, values = args[0], args[1] # deal", "3, 5], [15] + [5, 3]*3 + [5], [15] +", "height. For slides the standard is the full height of", "2: ylo = y - yerr[0,:] yhi = y +", "h_ax_norm = ax.get_position().size w_ax_in = w_ax_norm * w_fig_in h_ax_in =", "= plt.gca() def newlim(oldlim): delta = abs(oldlim[1] - oldlim[0]) pad", "3D diagram of stars positions relative to the Sun, with", "yy, zz, label in zip(x, y, z, labels): mlab.text3d(xx +", "= np.asarray(edges) if edges.ndim == 2: if np.any(edges[1:,0] < edges[:-1,1]):", "coordinate == 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate == 'data':", "## add ra=0 line line = mlab.plot3d([0, r], [0, 0],", "a figure of standard size for publishing. implemented values for", "n)] mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig) ## add", "(for axes labels). Meant to be used with only a", "**kwargs) def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'): if scale ==", "xlim = ax.get_xlim() inrange = mnp.inranges(x, xlim) if not np.all(inrange):", "30 17:15:27 2014 @author: Parke \"\"\" from __future__ import division,", "newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1] < axlim", "-*- \"\"\" Created on Fri May 30 17:15:27 2014 @author:", "in oldlim] newloglim = newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale):", "the data point pts.glyph.glyph_source.glyph_source.center = [0, 0, 0] # set", "np.abs(errneg)) result.append(xn) if errpos is not None: xp = 10**(x", "mnp dpi = 100 fullwidth = 10.0 halfwidth = 5.0", "xp = np.log10(x + errpos) - xl result.append(xp) return result", "the glyphs on the data point pts.glyph.glyph_source.glyph_source.center = [0, 0,", "view=None, size=(800,800), txt_scale=1.0): \"\"\" Make a 3D diagram of stars", "plot labels size = r_factor * txt_scale * 0.75 for", "= [np.insert(a, 0, 0.0) for a in [x,y]] triangles =", "and distances as desired. Coordinates must be in degrees. Distance", "slice for fewer dashes = [[], [30, 10], [20, 8],", "p = ax.plot(x, y, **kw) if fmt is None else", "fontsize}) return fig, fontsize def pcolor_reg(x, y, z, **kw): \"\"\"", "such when you need to know sizes before the text", "screenwidth)) mpl.rcParams['font.size'] = fontsize def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the", "for xx, yy, zz, label in zip(x, y, z, labels):", "the figure relative to the \"standard\" height. For slides the", "else: return (oldlim[0] + pad, oldlim[1] - pad) def newlim_log(oldlim):", "- yerr, y + yerr if ecolor is None: ecolor", "np.diff(y) if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0],", "on the data point pts.glyph.glyph_source.glyph_source.center = [0, 0, 0] #", "y scale factors for converting text sizes in points to", "values ax = kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs) def point_along_line(x,", "yp if xlbl is not None: return xlbl, np.interp(xlbl, x,", "xx, yy, zz, label in zip(x, y, z, labels): mlab.text3d(xx", "z1 in list(zip(x, y, z))[:-1]: xx, yy, zz = [x1,", "l1 = list(map(np.log10, x)) ld = l1 - l0 l", "- l0 l = ld*frac + l0 return 10**l def", "if errpos is not None: xp = 10**(x + errpos)", "np.array([v] * n) if np.isscalar(v) else v T, r, labels", "hc = np.sin(el * np.pi / 180.0) xc = hc", "lambda v: np.array([v] * n) if np.isscalar(v) else v T,", "glyphs on the data point pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]", "= hc * np.sin(az * np.pi / 180.0) zc =", "xory == 'both': datalim = ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale", "height=1.0): \"\"\"Generate a figure of standard size for publishing. implemented", "xoff**2) zoff = 0.0 # xoff, yoff, zoff = xc,", "= 2 fontsize = round(14 / (800.0 / screenwidth)) mpl.rcParams['font.size']", "be used with only a handful of stars. \"\"\" from", "0] # set a temperature colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table", "pts.module_manager.scalar_lut_manager.lut.table = cmap # set the camera view mlab.view(focalpoint=(0.0, 0.0,", "mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if view is not None: mlab.view(*view,", "r*np.sin(t) z = np.zeros(n+1) x, y = [np.insert(a, 0, 0.0)", "/ 180.0) yc = hc * np.sin(az * np.pi /", "- oldlim[0]) pad = delta*margin if oldlim[1] > oldlim[0]: return", "\"\"\" Set matplotlibrc values so that plots are readable as", "if transformation is None.\"\"\" fig= txt.get_figure() if figure is None", "np.asarray(edges) if edges.ndim == 2: if np.any(edges[1:,0] < edges[:-1,1]): raise", "matplotlib as mplot import matplotlib.pyplot as plt import mypy.my_numpy as", "None: xn = xl - 10**(x - np.abs(errneg)) result.append(xn) if", "the desired coordinates. Defaults to figure coordinates if transformation is", "0.0 # xoff, yoff, zoff = xc, yc, zc #", "if isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig ax = fig.gca() elif", "function? def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a figure of standard", "triangles = [(0, i, i + 1) for i in", "uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape, float) zp[...] =", "= ax_or_fig fig = ax.get_figure() else: raise TypeError('ax_or_fig must be", "the grid is uniform, and do plotting with the (much", "= [x] if errneg is not None: xn = xl", "x[0], y[0] if xfrac == 1: return x[-1], y[-1] else:", "a) for a in [x,y]] return xp, yp if xlbl", "= mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values, values) args = list(args)", "but the sun lines = [] for x1, y1, z1", "w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if coordinate == 'fig': return 1.0/(w_fig_in*72),", "assume that the grid is uniform, and do plotting with", "None.\"\"\" fig= txt.get_figure() if figure is None else figure if", "**kwargs): edges, values = args[0], args[1] # deal with potentially", "[xoff, yoff, zoff]] # plot labels size = r_factor *", "np.nan) edges = mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values, values) args", "ecolor is None: ecolor = p[0].get_color() # deal with matplotlib", "specifications edges = np.asarray(edges) if edges.ndim == 2: if np.any(edges[1:,0]", "coordinate == 'data': xlim = ax.get_xlim() ylim = ax.get_ylim() if", "text is made (otherwise you can use textBoxSize). Coordinate can", "## add labels # unit vec to camera view =", "is None: fig = plt.gcf() ax = fig.gca() else: if", "i + 1) for i in range(1, n)] mlab.triangular_mesh(x, y,", "return ax.plot(*args, **kwargs) def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'): if", "100 fullwidth = 10.0 halfwidth = 5.0 # use these", "== 'data': xlim = ax.get_xlim() ylim = ax.get_ylim() if ax.get_xscale()", "20 figsize = [fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size':", "np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The grid must be uniform\") if", "yy + yoff, zz + zoff, label, figure=fig, color=(1,1,1), scale=size)", "if ax is None: ax = plt.gca() p = ax.plot(x,", "x = h * np.cos(ra) y = h * np.sin(ra)", "lines down to the dec=0 plane for all but the", "zoff, label, figure=fig, color=(1,1,1), scale=size) ## add translucent dec=0 surface", "clicks. Click outside of the axes \" \\ \"when done.\")", "is not None: if xfrac == 0: return x[0], y[0]", "= [0, 0, 0] # set a temperature colormap cmap", "potentially gappy 2-column bin specifications edges = np.asarray(edges) if edges.ndim", "if edges.ndim == 2: if np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some", "return newlim(oldlim) elif scale == 'log': return newlim_log(oldlim) elif scale", "[(0, i, i + 1) for i in range(1, n)]", "with the (much faster) `imshow` function. \"\"\" x, y, z", "= np.sum(inrange) yends = np.interp(xlim, x, y) yloends = np.interp(xlim,", "for an audience far from a screen. Parameters ---------- mpl", "# unit vec to camera view = mlab.view() az, el", "ylbl, scale) return 10 ** lx, 10 ** ly if", "< edges[:-1,0]): raise ValueError('Bins must be in increasing order.') gaps", "be in pc (for axes labels). Meant to be used", "= y - yerr, y + yerr if ecolor is", "/ (800.0 / screenwidth)) mpl.rcParams['font.size'] = fontsize def textBoxSize(txt, transformation=None,", "to the \"standard\" height. For slides the standard is the", "loglim = [np.log10(l) for l in oldlim] newloglim = newlim(loglim)", "inrange = mnp.inranges(x, xlim) if not np.all(inrange): n = np.sum(inrange)", "`imshow` function. \"\"\" x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)", "n) r = np.max(dist * np.cos(dec)) x, y = r*np.cos(t),", "camera if xc**2 + yc**2 == 0.0: xoff = 1.0", "np.log10(xlim) if ax.get_yscale() == 'log': ylim = np.log10(ylim) w_ax_data =", "stars positions relative to the Sun, with semi-accurate colors and", "mpl.rcParams['font.size'] = fontsize def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the width", "= bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1] return", "h = bboxConv[1,1] - bboxConv[0,1] return w, h def stars3d(ra,", "standard size for publishing. implemented values for app (application) are:", "pts.glyph.glyph_source.glyph_source.center = [0, 0, 0] # set a temperature colormap", "101 t = np.linspace(0.0, 2*np.pi, n) r = np.max(dist *", "mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values, values) args = list(args) args[0],", "n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl, screenwidth=1200):", "[5, 3]*3 + [5], [15] + [5, 3]*2 + [5],", "+ [5, 3] + [5]] def click_coords(fig=None, timeout=600.): if fig", "# xoff, yoff, zoff = xc, yc, zc # scale", "click_coords(fig=None, timeout=600.): if fig is None: fig = plt.gcf() xy", "ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0,", "is not None: xn = xl - np.log10(x - np.abs(errneg))", "surface n = 101 t = np.linspace(0.0, 2*np.pi, n) r", "= [np.interp(xfrac, f, a) for a in [x,y]] return xp,", "= r_factor * txt_scale * 0.75 for xx, yy, zz,", "ValueError(\"x and y should be 1-dimensional\") if z.ndim != 2", "x, y) yloends = np.interp(xlim, x, ylo) yhiends = np.interp(xlim,", "overlap') if np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins must be in", "plot range xlim = ax.get_xlim() inrange = mnp.inranges(x, xlim) if", "== 'log': xlim = np.log10(xlim) if ax.get_yscale() == 'log': ylim", "and maximized for an audience far from a screen. Parameters", "opacity=0.3, figure=fig) ## add ra=0 line line = mlab.plot3d([0, r],", "= 100 fullwidth = 10.0 halfwidth = 5.0 # use", "+ pad) else: return (oldlim[0] + pad, oldlim[1] - pad)", "0.0) for a in [x,y]] triangles = [(0, i, i", "= np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d = np.insert(d, 0, 0) f", "xory == 'x' or xory == 'both': datalim = ax.dataLim.extents[[0,2]]", "dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0))) r,", "cmap # set the camera view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)", "is the fractional height of the figure relative to the", "# set the camera view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if", "plt.gcf() xy = [] def onclick(event): if not event.inaxes: fig.canvas.stop_event_loop()", "ecolor=None, ealpha=0.5, ax=None, **kw): if ax is None: ax =", "dex. \"\"\" if ax_or_fig is None: fig = plt.gcf() ax", "xp, yp if xlbl is not None: return xlbl, np.interp(xlbl,", "to camera if xc**2 + yc**2 == 0.0: xoff =", "add the sun ra, dec, dist = list(map(np.append, (ra, dec,", "r, labels = list(map(makearr, (T, r, labels))) # add the", "temperature colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap # set", "(0.0, 0.0, 0.0))) r, T, labels = list(map(np.append, (r, T,", "'symlog': raise NotImplementedError('Past Parke to future Parke, you did\\'t write", "as desired. Coordinates must be in degrees. Distance is assumed", "= mlab.view() az, el = view[:2] hc = np.sin(el *", "yends = np.interp(xlim, x, y) yloends = np.interp(xlim, x, ylo)", "ly if xfrac is not None: if xfrac == 0:", "np.insert(yhi[inrange], [0, n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def", "onclick(event): if not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates", "in points to another coordinate. Useful for properly spacing text", "= round(14 / (800.0 / screenwidth)) mpl.rcParams['font.size'] = fontsize def", "result def step(*args, **kwargs): edges, values = args[0], args[1] #", "ra=0 line line = mlab.plot3d([0, r], [0, 0], [0, 0],", "sun lines = [] for x1, y1, z1 in list(zip(x,", "be uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape, float) zp[...]", "= h * np.sin(ra) # make figure fig = mlab.figure(bgcolor=(0,0,0),", "write an implementation for symlog' 'scaled axes.') if xory ==", "* np.pi / 180.0) yc = hc * np.sin(az *", "(oldlim[0] - pad, oldlim[1] + pad) else: return (oldlim[0] +", "fontsize = round(14 / (800.0 / screenwidth)) mpl.rcParams['font.size'] = fontsize", "= [] for x1, y1, z1 in list(zip(x, y, z))[:-1]:", "dist * np.sin(dec) h = dist * np.cos(dec) x =", "be 'data', 'axes', or 'figure'. If data coordinates are requested", "xoff, yoff, zoff = xc, yc, zc # scale orthogonal", "np.complexfloating): zp = np.zeros(z.shape, float) zp[...] = z[...] z =", "get xyz coordinates z = dist * np.sin(dec) h =", "slide. returns the figure object and default font size \"\"\"", "= xc, yc, zc # scale orthogonal vec by sphere", "yc, zc # scale orthogonal vec by sphere size r_label", "[np.log10(l) for l in oldlim] newloglim = newlim(loglim) return (10.0**newloglim[0],", "for i in range(1, n)] mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1),", "= 0.0 zoff = 0.0 else: xoff = yc /", "with semi-accurate colors and distances as desired. Coordinates must be", "@author: Parke \"\"\" from __future__ import division, print_function, absolute_import import", "x, y, z = np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim !=", "Coordinates must be in degrees. Distance is assumed to be", "pad) else: return (oldlim[0] + pad, oldlim[1] - pad) def", "be in degrees. Distance is assumed to be in pc", "the axes \" \\ \"when done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick)", "is not None: xp = 10**(x + errpos) - xl", "newlim(oldlim): delta = abs(oldlim[1] - oldlim[0]) pad = delta*margin if", "= list(map(np.log10, x)) ld = l1 - l0 l =", "= edges[1:,0] > edges[:-1,1] edges = np.unique(edges) if np.any(gaps): values", "view is not None: mlab.view(*view, figure=fig) ## add labels #", "fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize def", "is None.\"\"\" fig= txt.get_figure() if figure is None else figure", "= newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1] <", "fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize", "np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The", "values for app (application) are: 'fullslide' height is the fractional", "text object's bounding box transformed to the desired coordinates. Defaults", "colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap # set the", "y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def errorpoly(x, y, yerr, fmt=None,", "import numpy as np import matplotlib as mplot import matplotlib.pyplot", "return 10 ** lx, 10 ** ly if xfrac is", "edges[:-1,1] edges = np.unique(edges) if np.any(gaps): values = np.insert(values, np.nonzero(gaps),", "zip(x, y, z, labels): mlab.text3d(xx + xoff, yy + yoff,", "[fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig,", "as np import matplotlib as mplot import matplotlib.pyplot as plt", "mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig,", "dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize def pcolor_reg(x, y, z,", "xc**2 + yc**2 == 0.0: xoff = 1.0 yoff =", "* txt_scale * 0.75 for xx, yy, zz, label in", "if np.isscalar(v) else v T, r, labels = list(map(makearr, (T,", "implementation for symlog' 'scaled axes.') if xory == 'x' or", "errpos=None): xl = np.log10(x) result = [x] if errneg is", "list(zip(x, y, z))[:-1]: xx, yy, zz = [x1, x1], [y1,", "yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\"", "'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in =", "event.ydata]) print(\"Gathering coordinates of mouse clicks. Click outside of the", "-*- coding: utf-8 -*- \"\"\" Created on Fri May 30", "not None: mlab.view(*view, figure=fig) ## add labels # unit vec", "= 1.0 yoff = 0.0 zoff = 0.0 else: xoff", "else figure if transformation is None: transformation = fig.transFigure coordConvert", "# consider ussing a ::2 slice for fewer dashes =", "\"\"\" if app == 'fullslide': fontsize = 20 figsize =", "screen. Parameters ---------- mpl : module Current matplotlib module. Use", "= np.zeros(z.shape, float) zp[...] = z[...] z = zp plt.imshow(z,", "1: raise ValueError(\"x and y should be 1-dimensional\") if z.ndim", "color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) # plot spheres r_factor = np.max(dist)", "zoff = 0.0 else: xoff = yc / np.sqrt(xc**2 +", "xlbl, ylbl, scale) return 10 ** lx, 10 ** ly", "be a Figure or Axes instance, if given.') w_fig_in, h_fig_in", "if given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if coordinate == 'fig':", "np.cos(ra) y = h * np.sin(ra) # make figure fig", "errpos=None): xl = 10**x result = [xl] if errneg is", "= ax.get_figure().get_size_inches() if coordinate == 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm,", "edges = np.asarray(edges) if edges.ndim == 2: if np.any(edges[1:,0] <", "mpl'. screewidth : int Width of the screen in question", "= np.linspace(0.0, 2*np.pi, n) r = np.max(dist * np.cos(dec)) x,", "increasing order.') gaps = edges[1:,0] > edges[:-1,1] edges = np.unique(edges)", "None: ax = plt.gca() def newlim(oldlim): delta = abs(oldlim[1] -", "xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]", "'data': xlim = ax.get_xlim() ylim = ax.get_ylim() if ax.get_xscale() ==", "matplotlib sometimes not showing polygon when it extends beyond plot", "[0.0, z1] line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig)", "errpos is not None: xp = 10**(x + errpos) -", "xlbl=None, scale='linear'): if scale == 'log': lx, ly = point_along_line(np.log10(x),", "def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):", "newlim_log(oldlim): loglim = [np.log10(l) for l in oldlim] newloglim =", "orient_to_camera=False, orientation=orientation) if view is not None: mlab.view(*view, figure=fig) return", "f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set", "def onclick(event): if not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering", "Make a 3D diagram of stars positions relative to the", "= oldlim[::-1] if scale == 'linear': return newlim(oldlim) elif scale", "return bigax def log_frac(x, frac): l0, l1 = list(map(np.log10, x))", "1-dimensional\") if z.ndim != 2 or z.shape != (y.size, x.size):", "xlim) y = np.insert(y[inrange], [0, n], yends) ylo = np.insert(ylo[inrange],", "ax = plt.gca() def newlim(oldlim): delta = abs(oldlim[1] - oldlim[0])", "of a slide. returns the figure object and default font", "10**l def log2linear(x, errneg=None, errpos=None): xl = 10**x result =", "and height of a text object's bounding box transformed to", "180.0) xc = hc * np.cos(az * np.pi / 180.0)", "xlbl is not None: return xlbl, np.interp(xlbl, x, y) def", "in pixels. Returns ------- None \"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize", "Distance is assumed to be in pc (for axes labels).", "mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar' # center the", "edges = np.unique(edges) if np.any(gaps): values = np.insert(values, np.nonzero(gaps), np.nan)", "abs(oldlim[1] - oldlim[0]) pad = delta*margin if oldlim[1] > oldlim[0]:", "oldlim[0]: return (oldlim[0] - pad, oldlim[1] + pad) else: return", "i, i + 1) for i in range(1, n)] mlab.triangular_mesh(x,", "of stars positions relative to the Sun, with semi-accurate colors", "yloends = np.interp(xlim, x, ylo) yhiends = np.interp(xlim, x, yhi)", "pad, oldlim[1] + pad) else: return (oldlim[0] + pad, oldlim[1]", "'data', 'axes', or 'figure'. If data coordinates are requested and", "range(1, n)] mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig) ##", "only a handful of stars. \"\"\" from mayavi import mlab", "5], [15] + [5, 3]*3 + [5], [15] + [5,", "and the data is plotted on a log scale, then", "in [x,y]] triangles = [(0, i, i + 1) for", "0], color=(1,1,1), line_width=1, figure=fig) rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0,", "margin=0.05): if ax is None: ax = plt.gca() def newlim(oldlim):", "bboxConv[1,1] - bboxConv[0,1] return w, h def stars3d(ra, dec, dist,", "np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d = np.insert(d, 0, 0) f =", "= np.insert(x[inrange], [0, n], xlim) y = np.insert(y[inrange], [0, n],", "y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw): if ax is", "frac): l0, l1 = list(map(np.log10, x)) ld = l1 -", "standard is the full height of a slide. returns the", "np.sin(el * np.pi / 180.0) xc = hc * np.cos(az", "are: 'fullslide' height is the fractional height of the figure", "yerr, y + yerr if ecolor is None: ecolor =", "(1.0, 5780.0, 'Sun'))) # get xyz coordinates z = dist", "3]*3 + [5], [15] + [5, 3]*2 + [5], [15]", "y1], [0.0, z1] line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,", "with line.set_dashes and iterate through more linestyles than come with", "properly spacing text labels and such when you need to", "aspect='auto', **kw) plt.axis('tight') def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5,", "np.insert(x[inrange], [0, n], xlim) y = np.insert(y[inrange], [0, n], yends)", "size = r_factor * txt_scale * 0.75 for xx, yy,", "ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function? def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate", "pcolor_reg(x, y, z, **kw): \"\"\" Similar to `pcolor`, but assume", "sphere size r_label = 1.0 * r_factor xoff, yoff, zoff", "not None: if xfrac == 0: return x[0], y[0] if", "import mypy.my_numpy as mnp dpi = 100 fullwidth = 10.0", "transformation=None, figure=None): \"\"\"Get the width and height of a text", "np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values, values)", "import matplotlib as mplot import matplotlib.pyplot as plt import mypy.my_numpy", "if figure is None else figure if transformation is None:", "bigax def log_frac(x, frac): l0, l1 = list(map(np.log10, x)) ld", "[0, n], yloends) yhi = np.insert(yhi[inrange], [0, n], yhiends) f", "h_ax_data = ylim[1] - ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None,", "= plt.gcf() ax = fig.gca() else: if isinstance(ax_or_fig, plt.Figure): fig", "or Axes instance, if given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if", "txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0] h", "(oldlim[0] + pad, oldlim[1] - pad) def newlim_log(oldlim): loglim =", "az, el = view[:2] hc = np.sin(el * np.pi /", "def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc values so that plots", "oldlim = oldlim[::-1] if scale == 'linear': return newlim(oldlim) elif", "polygon when it extends beyond plot range xlim = ax.get_xlim()", "dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The grid", "np.all(inrange): n = np.sum(inrange) yends = np.interp(xlim, x, y) yloends", "n = 101 t = np.linspace(0.0, 2*np.pi, n) r =", "audience far from a screen. Parameters ---------- mpl : module", "faster) `imshow` function. \"\"\" x, y, z = np.asarray(x), np.asarray(y),", "discard this function? def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a figure", "to camera view = mlab.view() az, el = view[:2] hc", "30.0 pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T,", "n = np.sum(inrange) yends = np.interp(xlim, x, y) yloends =", "np.sin(ra) # make figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) #", "y.ndim != 1: raise ValueError(\"x and y should be 1-dimensional\")", "fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False,", "- pad) def newlim_log(oldlim): loglim = [np.log10(l) for l in", "sometimes not showing polygon when it extends beyond plot range", "s in ['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')", "instance, if given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if coordinate ==", "division, print_function, absolute_import import numpy as np import matplotlib as", "dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\" Make a", "'both': datalim = ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale = ax.get_xscale()", "= 1.0 * r_factor xoff, yoff, zoff = [r_label *", "[r_label * v for v in [xoff, yoff, zoff]] #", "+ xoff, yy + yoff, zz + zoff, label, figure=fig,", "def newlim(oldlim): delta = abs(oldlim[1] - oldlim[0]) pad = delta*margin", "#TODO: discard this function? def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a", "mnp.lace(values, values) args = list(args) args[0], args[1] = edges, values", "if np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some bins overlap') if np.any(edges[1:,0]", "- xoff**2) zoff = 0.0 # xoff, yoff, zoff =", "r_factor = np.max(dist) / 30.0 pts = mlab.quiver3d(x, y, z,", "requested and the data is plotted on a log scale,", "[x1, x1], [y1, y1], [0.0, z1] line = mlab.plot3d(xx, yy,", "= true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap # set the camera view", "if xfrac == 1: return x[-1], y[-1] else: d =", "xlim = ax.get_xlim() ylim = ax.get_ylim() if ax.get_xscale() == 'log':", "yends) ylo = np.insert(ylo[inrange], [0, n], yloends) yhi = np.insert(yhi[inrange],", "3]*2 + [5], [15] + [5, 3] + [5]] def", "None: ax = plt.gca() p = ax.plot(x, y, **kw) if", "y1, z1 in list(zip(x, y, z))[:-1]: xx, yy, zz =", "raise NotImplementedError('Past Parke to future Parke, you did\\'t write an", "(much faster) `imshow` function. \"\"\" x, y, z = np.asarray(x),", "= dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda v: np.array([v] * n)", "zz, label in zip(x, y, z, labels): mlab.text3d(xx + xoff,", "plotting with the (much faster) `imshow` function. \"\"\" x, y,", "make figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot lines", "if ax is None: ax = plt.gca() def newlim(oldlim): delta", "is not None: return xlbl, np.interp(xlbl, x, y) def textSize(ax_or_fig=None,", "xl result.append(xp) return result def linear2log(x, errneg=None, errpos=None): xl =", "5, 3, 5], [15] + [5, 3]*3 + [5], [15]", "x, y = r*np.cos(t), r*np.sin(t) z = np.zeros(n+1) x, y", "len(ra) dec, ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda v:", "[5], [15] + [5, 3] + [5]] def click_coords(fig=None, timeout=600.):", "np.asarray(z) if x.ndim != 1 or y.ndim != 1: raise", "n], yends) ylo = np.insert(ylo[inrange], [0, n], yloends) yhi =", "= np.sqrt(1.0 - xoff**2) zoff = 0.0 # xoff, yoff,", "def linear2log(x, errneg=None, errpos=None): xl = np.log10(x) result = [x]", "return w, h def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='',", "bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1] return w, h def", "np.sin(dec) h = dist * np.cos(dec) x = h *", "use textBoxSize). Coordinate can be 'data', 'axes', or 'figure'. If", "ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y' or xory == 'both':", "figure of standard size for publishing. implemented values for app", "= cmap # set the camera view mlab.view(focalpoint=(0.0, 0.0, 0.0),", "def pcolor_reg(x, y, z, **kw): \"\"\" Similar to `pcolor`, but", "\"when done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy)", "xl - 10**(x - np.abs(errneg)) result.append(xn) if errpos is not", "v T, r, labels = list(map(makearr, (T, r, labels))) #", "'linear': return newlim(oldlim) elif scale == 'log': return newlim_log(oldlim) elif", "1: return x[-1], y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))", "= ld*frac + l0 return 10**l def log2linear(x, errneg=None, errpos=None):", "used with only a handful of stars. \"\"\" from mayavi", "x.size): raise ValueError(\"z.shape should be (y.size, x.size)\") dx = np.diff(x)", "np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim != 1 or y.ndim !=", "np.cos(dec) x = h * np.cos(ra) y = h *", "lx, 10 ** ly if xfrac is not None: if", "2: if np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some bins overlap') if", "scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y' or xory", "mnp.inranges(x, xlim) if not np.all(inrange): n = np.sum(inrange) yends =", "[0, 0, 0] # set a temperature colormap cmap =", "def log2linear(x, errneg=None, errpos=None): xl = 10**x result = [xl]", "a slide. returns the figure object and default font size", "[] for x1, y1, z1 in list(zip(x, y, z))[:-1]: xx,", "+ pad, oldlim[1] - pad) def newlim_log(oldlim): loglim = [np.log10(l)", "xoff, yy + yoff, zz + zoff, label, figure=fig, color=(1,1,1),", "line_width=1, figure=fig) rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r,", "np.max(dist * np.cos(dec)) x, y = r*np.cos(t), r*np.sin(t) z =", "scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar' # center", "readable as they are created and maximized for an audience", "w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72 if coordinate == 'axes': return", "raise ValueError(\"x and y should be 1-dimensional\") if z.ndim !=", "- xlim[0] h_ax_data = ylim[1] - ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts", "z[...] z = zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(), y.max()],", "dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\" Make", "# deal with potentially gappy 2-column bin specifications edges =", "+ l0 return 10**l def log2linear(x, errneg=None, errpos=None): xl =", "5780.0, 'Sun'))) # get xyz coordinates z = dist *", "fontsize def pcolor_reg(x, y, z, **kw): \"\"\" Similar to `pcolor`,", "relative to the \"standard\" height. For slides the standard is", "axlim [0]: oldlim = oldlim[::-1] if scale == 'linear': return", "standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a figure of standard size for", "\"\"\" Created on Fri May 30 17:15:27 2014 @author: Parke", "np.log10(x - np.abs(errneg)) result.append(xn) if errpos is not None: xp", "to be used with only a handful of stars. \"\"\"", "order.') gaps = edges[1:,0] > edges[:-1,1] edges = np.unique(edges) if", "= list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun'))) # get", "textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the width and height of a", "n) if np.isscalar(v) else v T, r, labels = list(map(makearr,", "= fontsize def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the width and", "= np.log10(ylim) w_ax_data = xlim[1] - xlim[0] h_ax_data = ylim[1]", "Sun, with semi-accurate colors and distances as desired. Coordinates must", "TypeError('ax_or_fig must be a Figure or Axes instance, if given.')", "Useful for properly spacing text labels and such when you", "unit vec to camera view = mlab.view() az, el =", "color.maps import true_temp n = len(ra) dec, ra = dec*np.pi/180.0,", "f = d/d[-1] xp, yp = [np.interp(xfrac, f, a) for", "def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the width and height of", "halfwidth = 5.0 # use these with line.set_dashes and iterate", "** lx, 10 ** ly if xfrac is not None:", "np.log10(y), xfrac, xlbl, ylbl, scale) return 10 ** lx, 10", "the full height of a slide. returns the figure object", "when it extends beyond plot range xlim = ax.get_xlim() inrange", "if z.ndim != 2 or z.shape != (y.size, x.size): raise", "and iterate through more linestyles than come with matplotlib #", "if oldlim[1] > oldlim[0]: return (oldlim[0] - pad, oldlim[1] +", "2*np.pi, n) r = np.max(dist * np.cos(dec)) x, y =", "more linestyles than come with matplotlib # consider ussing a", "height of a slide. returns the figure object and default", "args = list(args) args[0], args[1] = edges, values ax =", "coordinates of mouse clicks. Click outside of the axes \"", "gappy 2-column bin specifications edges = np.asarray(edges) if edges.ndim ==", "* np.sin(ra) # make figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)", "0) f = d/d[-1] xp, yp = [np.interp(xfrac, f, a)", "an implementation for symlog' 'scaled axes.') if xory == 'x'", "delta = abs(oldlim[1] - oldlim[0]) pad = delta*margin if oldlim[1]", "if xfrac == 0: return x[0], y[0] if xfrac ==", "ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale) return 10", "coordinate='data'): \"\"\" Return x & y scale factors for converting", "= list(args) args[0], args[1] = edges, values ax = kwargs.pop('ax',", "0.0))) r, T, labels = list(map(np.append, (r, T, labels), (1.0,", "= np.insert(yhi[inrange], [0, n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f", "function. \"\"\" x, y, z = np.asarray(x), np.asarray(y), np.asarray(z) if", "log scale, then the factor will be given in dex.", "figsize = [fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize})", "180.0, 0.0]) mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)", "None: return xlbl, np.interp(xlbl, x, y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\"", "0, 0] # set a temperature colormap cmap = true_temp(T)", "ld*frac + l0 return 10**l def log2linear(x, errneg=None, errpos=None): xl", "log_frac(x, frac): l0, l1 = list(map(np.log10, x)) ld = l1", "return (oldlim[0] - pad, oldlim[1] + pad) else: return (oldlim[0]", "np.asarray(y), np.asarray(z) if x.ndim != 1 or y.ndim != 1:", "fewer dashes = [[], [30, 10], [20, 8], [10, 5],", "= 5.0 # use these with line.set_dashes and iterate through", "= ylim[1] - ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both',", "delta*margin if oldlim[1] > oldlim[0]: return (oldlim[0] - pad, oldlim[1]", "fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig, pos=None): if pos is None:", "\" \\ \"when done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid)", "x1], [y1, y1], [0.0, z1] line = mlab.plot3d(xx, yy, zz,", "# -*- coding: utf-8 -*- \"\"\" Created on Fri May", "by sphere size r_label = 1.0 * r_factor xoff, yoff,", "+ errpos) - xl result.append(xp) return result def linear2log(x, errneg=None,", "ylo, yhi = y - yerr, y + yerr if", "d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d = np.insert(d, 0, 0)", "= args[0], args[1] # deal with potentially gappy 2-column bin", "args[0], args[1] # deal with potentially gappy 2-column bin specifications", "to another coordinate. Useful for properly spacing text labels and", "scale == 'symlog': raise NotImplementedError('Past Parke to future Parke, you", "symlog' 'scaled axes.') if xory == 'x' or xory ==", "of the axes \" \\ \"when done.\") cid = fig.canvas.mpl_connect('button_press_event',", "stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\"", "must be a Figure or Axes instance, if given.') w_fig_in,", "ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard", "if xfrac is not None: if xfrac == 0: return", "bottom='off') bigax.set_zorder(-10) return bigax def log_frac(x, frac): l0, l1 =", "a log scale, then the factor will be given in", "if ax.get_yscale() == 'log': ylim = np.log10(ylim) w_ax_data = xlim[1]", "orientation=orientation) if view is not None: mlab.view(*view, figure=fig) return fig", "np.insert(y[inrange], [0, n], yends) ylo = np.insert(ylo[inrange], [0, n], yloends)", "for a in [x,y]] return xp, yp if xlbl is", "!= 1 or y.ndim != 1: raise ValueError(\"x and y", "axlim[1] < axlim [0]: oldlim = oldlim[::-1] if scale ==", "x, ylo) yhiends = np.interp(xlim, x, yhi) x = np.insert(x[inrange],", "= np.insert(ylo[inrange], [0, n], yloends) yhi = np.insert(yhi[inrange], [0, n],", "y = [np.insert(a, 0, 0.0) for a in [x,y]] triangles", "labels): mlab.text3d(xx + xoff, yy + yoff, zz + zoff,", "1.0/h_ax_pts if coordinate == 'data': xlim = ax.get_xlim() ylim =", "do plotting with the (much faster) `imshow` function. \"\"\" x,", "+ yc**2 == 0.0: xoff = 1.0 yoff = 0.0", "0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if view is", "xn = xl - np.log10(x - np.abs(errneg)) result.append(xn) if errpos", "= np.insert(values, np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1], edges[1:]) values =", "[0]: oldlim = oldlim[::-1] if scale == 'linear': return newlim(oldlim)", "None else figure if transformation is None: transformation = fig.transFigure", "= np.interp(xlim, x, yhi) x = np.insert(x[inrange], [0, n], xlim)", "ussing a ::2 slice for fewer dashes = [[], [30,", "oldlim[1] - pad) def newlim_log(oldlim): loglim = [np.log10(l) for l", "= lambda v: np.array([v] * n) if np.isscalar(v) else v", "T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\" Make a 3D", "0.0, 0.0))) r, T, labels = list(map(np.append, (r, T, labels),", "is plotted on a log scale, then the factor will", "pc (for axes labels). Meant to be used with only", "not None: xp = 10**(x + errpos) - xl result.append(xp)", "raise ValueError(\"The grid must be uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp", "= transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w =", "need to know sizes before the text is made (otherwise", "you need to know sizes before the text is made", "1 or y.ndim != 1: raise ValueError(\"x and y should", "diagram of stars positions relative to the Sun, with semi-accurate", "r, labels))) # add the sun ra, dec, dist =", "if fmt is None else ax.plot(x, y, fmt, **kw) if", "if coordinate == 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate ==", "desired coordinates. Defaults to figure coordinates if transformation is None.\"\"\"", "= ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function?", "a in [x,y]] triangles = [(0, i, i + 1)", "oldlim[0]) pad = delta*margin if oldlim[1] > oldlim[0]: return (oldlim[0]", "add labels # unit vec to camera view = mlab.view()", "in list(zip(x, y, z))[:-1]: xx, yy, zz = [x1, x1],", "can use textBoxSize). Coordinate can be 'data', 'axes', or 'figure'.", "zoff]] # plot labels size = r_factor * txt_scale *", "scale, then the factor will be given in dex. \"\"\"", "is not None: xp = np.log10(x + errpos) - xl", "r_factor * txt_scale * 0.75 for xx, yy, zz, label", "= y - yerr[0,:] yhi = y + yerr[1,:] else:", "for l in oldlim] newloglim = newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1])", "view = mlab.view() az, el = view[:2] hc = np.sin(el", "result.append(xp) return result def linear2log(x, errneg=None, errpos=None): xl = np.log10(x)", "# use these with line.set_dashes and iterate through more linestyles", "Set matplotlibrc values so that plots are readable as they", "'log': ylim = np.log10(ylim) w_ax_data = xlim[1] - xlim[0] h_ax_data", "z1] line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line)", "= h_ax_norm * h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72 if", "= [] def onclick(event): if not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata,", "xlbl, np.interp(xlbl, x, y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x", "dy = np.diff(y) if not np.allclose(dx, dx[0], 1e-2) or not", "an audience far from a screen. Parameters ---------- mpl :", "plot lines down to the dec=0 plane for all but", "size=size) # plot lines down to the dec=0 plane for", "* np.cos(ra) y = h * np.sin(ra) # make figure", "ylim = np.log10(ylim) w_ax_data = xlim[1] - xlim[0] h_ax_data =", "data is plotted on a log scale, then the factor", "= np.sin(el * np.pi / 180.0) xc = hc *", "else: raise TypeError('ax_or_fig must be a Figure or Axes instance,", "(r, T, labels), (1.0, 5780.0, 'Sun'))) # get xyz coordinates", "If data coordinates are requested and the data is plotted", "to `pcolor`, but assume that the grid is uniform, and", "0, 0.0) for a in [x,y]] triangles = [(0, i,", "axes \" \\ \"when done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout)", "center the glyphs on the data point pts.glyph.glyph_source.glyph_source.center = [0,", "onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc values so that plots are", "[np.insert(a, 0, 0.0) for a in [x,y]] triangles = [(0,", "default font size \"\"\" if app == 'fullslide': fontsize =", "10.0 halfwidth = 5.0 # use these with line.set_dashes and", "semi-accurate colors and distances as desired. Coordinates must be in", "= point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale) return 10 **", "z, labels): mlab.text3d(xx + xoff, yy + yoff, zz +", "should be (y.size, x.size)\") dx = np.diff(x) dy = np.diff(y)", "pad) def newlim_log(oldlim): loglim = [np.log10(l) for l in oldlim]", "= ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO:", "::2 slice for fewer dashes = [[], [30, 10], [20,", "= np.diff(x) dy = np.diff(y) if not np.allclose(dx, dx[0], 1e-2)", "z = np.zeros(n+1) x, y = [np.insert(a, 0, 0.0) for", "result.append(xp) return result def step(*args, **kwargs): edges, values = args[0],", "5.0 # use these with line.set_dashes and iterate through more", "be given in dex. \"\"\" if ax_or_fig is None: fig", "x, yhi) x = np.insert(x[inrange], [0, n], xlim) y =", "'log': lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)", "return xlbl, np.interp(xlbl, x, y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return", "list(map(makearr, (T, r, labels))) # add the sun ra, dec,", "= plt.gca() p = ax.plot(x, y, **kw) if fmt is", "yy, zz = [x1, x1], [y1, y1], [0.0, z1] line", "in question in pixels. Returns ------- None \"\"\" mpl.rcParams['lines.linewidth'] =", "ax.get_position().size w_ax_in = w_ax_norm * w_fig_in h_ax_in = h_ax_norm *", "the (much faster) `imshow` function. \"\"\" x, y, z =", "w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in = w_ax_norm * w_fig_in h_ax_in", "figure=fig) ## add ra=0 line line = mlab.plot3d([0, r], [0,", "Coordinate can be 'data', 'axes', or 'figure'. If data coordinates", "if xlbl is not None: return xlbl, np.interp(xlbl, x, y)", "errpos is not None: xp = np.log10(x + errpos) -", "Similar to `pcolor`, but assume that the grid is uniform,", "r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar' #", "yhi) x = np.insert(x[inrange], [0, n], xlim) y = np.insert(y[inrange],", "xoff = yc / np.sqrt(xc**2 + yc**2) yoff = np.sqrt(1.0", "* r_factor xoff, yoff, zoff = [r_label * v for", "ax.plot(*args, **kwargs) def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'): if scale", "plt import mypy.my_numpy as mnp dpi = 100 fullwidth =", "r = np.max(dist * np.cos(dec)) x, y = r*np.cos(t), r*np.sin(t)", "ValueError('Some bins overlap') if np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins must", "return result def linear2log(x, errneg=None, errpos=None): xl = np.log10(x) result", "y, z = np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim != 1", "and do plotting with the (much faster) `imshow` function. \"\"\"", "bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return bigax def log_frac(x, frac):", "ylim[1] - ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05):", "height of the figure relative to the \"standard\" height. For", "= 10**x result = [xl] if errneg is not None:", "from color.maps import true_temp n = len(ra) dec, ra =", "mlab.view(*view, figure=fig) ## add labels # unit vec to camera", "None: xn = xl - np.log10(x - np.abs(errneg)) result.append(xn) if", "dec, ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda v: np.array([v]", "[5, 3]*2 + [5], [15] + [5, 3] + [5]]", "= y + yerr[1,:] else: ylo, yhi = y -", "l1 - l0 l = ld*frac + l0 return 10**l", "yhi = y - yerr, y + yerr if ecolor", "* np.sin(dec) h = dist * np.cos(dec) x = h", "pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25,", "5, 10, 5, 3, 5], [15] + [5, 3]*3 +", "def newlim_log(oldlim): loglim = [np.log10(l) for l in oldlim] newloglim", "= dist * np.sin(dec) h = dist * np.cos(dec) x", "np.log10(ylim) w_ax_data = xlim[1] - xlim[0] h_ax_data = ylim[1] -", "coordinates. Defaults to figure coordinates if transformation is None.\"\"\" fig=", "= fig.add_subplot(111) else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in", "return 10**l def log2linear(x, errneg=None, errpos=None): xl = 10**x result", "if np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape, float) zp[...] = z[...]", "w = bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1]", "'import matplotlib as mpl'. screewidth : int Width of the", "/ 30.0 pts = mlab.quiver3d(x, y, z, r, r, r,", "plt.gca() p = ax.plot(x, y, **kw) if fmt is None", "np.interp(xlim, x, yhi) x = np.insert(x[inrange], [0, n], xlim) y", "x.size)\") dx = np.diff(x) dy = np.diff(y) if not np.allclose(dx,", "h_ax_pts = w_ax_in*72, h_ax_in*72 if coordinate == 'axes': return 1.0/w_ax_pts,", "pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',", "def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw): if", "extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def errorpoly(x,", "set the camera view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if view", "== 'fullslide': fontsize = 20 figsize = [fullwidth, fullwidth/slideAR*height] fig", "= 0.0 else: xoff = yc / np.sqrt(xc**2 + yc**2)", "font size \"\"\" if app == 'fullslide': fontsize = 20", "* np.cos(dec)) x, y = r*np.cos(t), r*np.sin(t) z = np.zeros(n+1)", "xl result.append(xp) return result def step(*args, **kwargs): edges, values =", "1.0 yoff = 0.0 zoff = 0.0 else: xoff =", "[30, 10], [20, 8], [10, 5], [3, 2], [30, 5,", "+ [5], [15] + [5, 3]*2 + [5], [15] +", "figure is None else figure if transformation is None: transformation", "be 1-dimensional\") if z.ndim != 2 or z.shape != (y.size,", "+ errpos) - xl result.append(xp) return result def step(*args, **kwargs):", "else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in ['top', 'bottom',", "= ax.get_xlim() ylim = ax.get_ylim() if ax.get_xscale() == 'log': xlim", "**kw): if ax is None: ax = plt.gca() p =", "xoff = 1.0 yoff = 0.0 zoff = 0.0 else:", "xfrac, xlbl, ylbl, scale) return 10 ** lx, 10 **", "outside of the axes \" \\ \"when done.\") cid =", "Axes instance, if given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if coordinate", "point pts.glyph.glyph_source.glyph_source.center = [0, 0, 0] # set a temperature", "points to another coordinate. Useful for properly spacing text labels", "ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function? def standard_figure(app, slideAR=1.6, height=1.0):", "dist * np.cos(dec) x = h * np.cos(ra) y =", "is not None: xn = xl - 10**(x - np.abs(errneg))", "isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig ax = fig.gca() elif isinstance(ax_or_fig,", "the fractional height of the figure relative to the \"standard\"", "* h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72 if coordinate ==", "/ screenwidth)) mpl.rcParams['font.size'] = fontsize def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get", "- pad, oldlim[1] + pad) else: return (oldlim[0] + pad,", "n], xlim) y = np.insert(y[inrange], [0, n], yends) ylo =", "zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) # plot spheres r_factor =", "[5], [15] + [5, 3]*2 + [5], [15] + [5,", "0.0: xoff = 1.0 yoff = 0.0 zoff = 0.0", "0.75 for xx, yy, zz, label in zip(x, y, z,", "[x,y]] return xp, yp if xlbl is not None: return", "np.array(xy) def common_axes(fig, pos=None): if pos is None: bigax =", "r*np.cos(t), r*np.sin(t) z = np.zeros(n+1) x, y = [np.insert(a, 0,", "ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y' or xory == 'both': datalim", "the Sun, with semi-accurate colors and distances as desired. Coordinates", "made (otherwise you can use textBoxSize). Coordinate can be 'data',", "[15] + [5, 3] + [5]] def click_coords(fig=None, timeout=600.): if", "== 'both': datalim = ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale =", "== 'log': lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl,", "1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in = w_ax_norm * w_fig_in", "np.log10(x) result = [x] if errneg is not None: xn", "far from a screen. Parameters ---------- mpl : module Current", "xc = hc * np.cos(az * np.pi / 180.0) yc", "* np.sin(az * np.pi / 180.0) zc = -np.cos(el *", "labels))) # add the sun ra, dec, dist = list(map(np.append,", "xl = 10**x result = [xl] if errneg is not", "be in increasing order.') gaps = edges[1:,0] > edges[:-1,1] edges", "full height of a slide. returns the figure object and", "coordinates if transformation is None.\"\"\" fig= txt.get_figure() if figure is", "[5, 3] + [5]] def click_coords(fig=None, timeout=600.): if fig is", "or y.ndim != 1: raise ValueError(\"x and y should be", "z = np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim != 1 or", "d = np.insert(d, 0, 0) f = d/d[-1] xp, yp", "x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def errorpoly(x, y,", "(y.size, x.size): raise ValueError(\"z.shape should be (y.size, x.size)\") dx =", "errpos) - xl result.append(xp) return result def linear2log(x, errneg=None, errpos=None):", "or xory == 'both': datalim = ax.dataLim.extents[[0,2]] axlim = ax.get_xlim()", "for converting text sizes in points to another coordinate. Useful", "orthoganal to camera if xc**2 + yc**2 == 0.0: xoff", "= 10**(x + errpos) - xl result.append(xp) return result def", "ra*np.pi/180.0 makearr = lambda v: np.array([v] * n) if np.isscalar(v)", "xp = 10**(x + errpos) - xl result.append(xp) return result", "pad, oldlim[1] - pad) def newlim_log(oldlim): loglim = [np.log10(l) for", "'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return bigax def log_frac(x,", "\"\"\" from mayavi import mlab from color.maps import true_temp n", "h_fig_in = ax.get_figure().get_size_inches() if coordinate == 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)", "if scale == 'linear': return newlim(oldlim) elif scale == 'log':", "xory == 'both': datalim = ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale", "[15] + [5, 3]*3 + [5], [15] + [5, 3]*2", "onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig, pos=None): if pos", "fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw): if ax is None: ax", "ValueError('Bins must be in increasing order.') gaps = edges[1:,0] >", "bins overlap') if np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins must be", "8], [10, 5], [3, 2], [30, 5, 3, 5, 10,", "h_ax_in = h_ax_norm * h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72", "0.0 else: xoff = yc / np.sqrt(xc**2 + yc**2) yoff", "= view[:2] hc = np.sin(el * np.pi / 180.0) xc", "r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\" Make a 3D diagram", "future Parke, you did\\'t write an implementation for symlog' 'scaled", "yc / np.sqrt(xc**2 + yc**2) yoff = np.sqrt(1.0 - xoff**2)", "raise ValueError('Bins must be in increasing order.') gaps = edges[1:,0]", "yoff, zoff = xc, yc, zc # scale orthogonal vec", "of a text object's bounding box transformed to the desired", "xory='both', margin=0.05): if ax is None: ax = plt.gca() def", "matplotlib # consider ussing a ::2 slice for fewer dashes", "2-column bin specifications edges = np.asarray(edges) if edges.ndim == 2:", "plt.Figure): fig = ax_or_fig ax = fig.gca() elif isinstance(ax_or_fig, plt.Axes):", "return (oldlim[0] + pad, oldlim[1] - pad) def newlim_log(oldlim): loglim", "args[1] # deal with potentially gappy 2-column bin specifications edges", "== 2: ylo = y - yerr[0,:] yhi = y", "**kw) if fmt is None else ax.plot(x, y, fmt, **kw)", "# scale orthogonal vec by sphere size r_label = 1.0", "def tight_axis_limits(ax=None, xory='both', margin=0.05): if ax is None: ax =", "np.log10(x + errpos) - xl result.append(xp) return result def step(*args,", "**kw): \"\"\" Similar to `pcolor`, but assume that the grid", "ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc values", "'color_by_scalar' # center the glyphs on the data point pts.glyph.glyph_source.glyph_source.center", "= ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y'", "if ax.get_xscale() == 'log': xlim = np.log10(xlim) if ax.get_yscale() ==", "converting text sizes in points to another coordinate. Useful for", "ax.get_xlim() ylim = ax.get_ylim() if ax.get_xscale() == 'log': xlim =", "= ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function? def standard_figure(app, slideAR=1.6,", "the figure object and default font size \"\"\" if app", "'fullslide': fontsize = 20 figsize = [fullwidth, fullwidth/slideAR*height] fig =", "10 ** ly if xfrac is not None: if xfrac", "from mayavi import mlab from color.maps import true_temp n =", "if xc**2 + yc**2 == 0.0: xoff = 1.0 yoff", "= fig.transFigure coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv =", "in degrees. Distance is assumed to be in pc (for", "if xory == 'x' or xory == 'both': datalim =", "must be uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape, float)", "spacing text labels and such when you need to know", "in ['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10)", "relative to the Sun, with semi-accurate colors and distances as", "else v T, r, labels = list(map(makearr, (T, r, labels)))", "None: mlab.view(*view, figure=fig) ## add labels # unit vec to", "None: xp = np.log10(x + errpos) - xl result.append(xp) return", "with matplotlib # consider ussing a ::2 slice for fewer", "coordinate. Useful for properly spacing text labels and such when", "2014 @author: Parke \"\"\" from __future__ import division, print_function, absolute_import", "labels = list(map(makearr, (T, r, labels))) # add the sun", "zc # scale orthogonal vec by sphere size r_label =", "with only a handful of stars. \"\"\" from mayavi import", "pos=None): if pos is None: bigax = fig.add_subplot(111) else: bigax", "ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory", "raise ValueError(\"z.shape should be (y.size, x.size)\") dx = np.diff(x) dy", "transformation is None.\"\"\" fig= txt.get_figure() if figure is None else", "that the grid is uniform, and do plotting with the", "'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return bigax", "oldlim[::-1] if scale == 'linear': return newlim(oldlim) elif scale ==", "edges, values ax = kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs) def", "result.append(xn) if errpos is not None: xp = np.log10(x +", "the text is made (otherwise you can use textBoxSize). Coordinate", "list(args) args[0], args[1] = edges, values ax = kwargs.pop('ax', plt.gca())", "numpy as np import matplotlib as mplot import matplotlib.pyplot as", "linestyles than come with matplotlib # consider ussing a ::2", "= h * np.cos(ra) y = h * np.sin(ra) #", "module. Use 'import matplotlib as mpl'. screewidth : int Width", "r, r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar'", "of stars. \"\"\" from mayavi import mlab from color.maps import", "yhiends = np.interp(xlim, x, yhi) x = np.insert(x[inrange], [0, n],", "for v in [xoff, yoff, zoff]] # plot labels size", "np.unique(edges) if np.any(gaps): values = np.insert(values, np.nonzero(gaps), np.nan) edges =", "(800.0 / screenwidth)) mpl.rcParams['font.size'] = fontsize def textBoxSize(txt, transformation=None, figure=None):", "txt.get_figure() if figure is None else figure if transformation is", "!= 2 or z.shape != (y.size, x.size): raise ValueError(\"z.shape should", "color=(1,1,1), line_width=1, figure=fig) rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0])", "None else ax.plot(x, y, fmt, **kw) if len(yerr.shape) == 2:", "ylo = np.insert(ylo[inrange], [0, n], yloends) yhi = np.insert(yhi[inrange], [0,", "'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return bigax def", "17:15:27 2014 @author: Parke \"\"\" from __future__ import division, print_function,", "isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig fig = ax.get_figure() else: raise", "np.sin(az * np.pi / 180.0) zc = -np.cos(el * np.pi", "= [(0, i, i + 1) for i in range(1,", "ax.get_yscale() == 'log': ylim = np.log10(ylim) w_ax_data = xlim[1] -", "'y' or xory == 'both': datalim = ax.dataLim.extents[[1,3]] axlim =", "absolute_import import numpy as np import matplotlib as mplot import", "vec by sphere size r_label = 1.0 * r_factor xoff,", "'scaled axes.') if xory == 'x' or xory == 'both':", "np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins must be in increasing order.')", "np.zeros(n+1) x, y = [np.insert(a, 0, 0.0) for a in", "== 'log': return newlim_log(oldlim) elif scale == 'symlog': raise NotImplementedError('Past", "= w_ax_in*72, h_ax_in*72 if coordinate == 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts", "= mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere', scale_factor=r_factor,", "triangles, color=(1,1,1), opacity=0.3, figure=fig) ## add ra=0 line line =", "fgcolor=(1,1,1), size=size) # plot lines down to the dec=0 plane", "grid must be uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape,", "np.pi / 180.0) xc = hc * np.cos(az * np.pi", "spheres r_factor = np.max(dist) / 30.0 pts = mlab.quiver3d(x, y,", "done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def", "xfrac == 0: return x[0], y[0] if xfrac == 1:", "true_temp n = len(ra) dec, ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr", "'Sun'))) # get xyz coordinates z = dist * np.sin(dec)", "- np.log10(x - np.abs(errneg)) result.append(xn) if errpos is not None:", "= txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0]", "if ecolor is None: ecolor = p[0].get_color() # deal with", "z, triangles, color=(1,1,1), opacity=0.3, figure=fig) ## add ra=0 line line", "np.insert(d, 0, 0) f = d/d[-1] xp, yp = [np.interp(xfrac,", "* 0.75 for xx, yy, zz, label in zip(x, y,", "another coordinate. Useful for properly spacing text labels and such", "None \"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize = round(14 / (800.0", "For slides the standard is the full height of a", "vec to camera view = mlab.view() az, el = view[:2]", "= mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)", "range xlim = ax.get_xlim() inrange = mnp.inranges(x, xlim) if not", "created and maximized for an audience far from a screen.", "errneg is not None: xn = xl - 10**(x -", "vec orthoganal to camera if xc**2 + yc**2 == 0.0:", "= np.log10(x + errpos) - xl result.append(xp) return result def", "p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc values so that", "round(14 / (800.0 / screenwidth)) mpl.rcParams['font.size'] = fontsize def textBoxSize(txt,", "textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x & y scale factors for", "180.0) zc = -np.cos(el * np.pi / 180.0) # unit", "Width of the screen in question in pixels. Returns -------", "line line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1),", "fig.add_subplot(111) else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in ['top',", "fig = ax_or_fig ax = fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax", "xlim[0] h_ax_data = ylim[1] - ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def", "ValueError(\"The grid must be uniform\") if np.issubdtype(z.dtype, np.complexfloating): zp =", "values = args[0], args[1] # deal with potentially gappy 2-column", "for a in [x,y]] triangles = [(0, i, i +", "dashes = [[], [30, 10], [20, 8], [10, 5], [3,", "transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0]", "2 or z.shape != (y.size, x.size): raise ValueError(\"z.shape should be", "[20, 8], [10, 5], [3, 2], [30, 5, 3, 5,", "None: fig = plt.gcf() xy = [] def onclick(event): if", "* np.cos(dec) x = h * np.cos(ra) y = h", "left='off', bottom='off') bigax.set_zorder(-10) return bigax def log_frac(x, frac): l0, l1", "raise ValueError('Some bins overlap') if np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins", "than come with matplotlib # consider ussing a ::2 slice", "np.interp(xlbl, x, y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x &", "v for v in [xoff, yoff, zoff]] # plot labels", "y, xfrac=None, xlbl=None, scale='linear'): if scale == 'log': lx, ly", "if coordinate == 'data': xlim = ax.get_xlim() ylim = ax.get_ylim()", "for properly spacing text labels and such when you need", "= mnp.inranges(x, xlim) if not np.all(inrange): n = np.sum(inrange) yends", "h def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800),", "el = view[:2] hc = np.sin(el * np.pi / 180.0)", "= mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot lines down to the", "= l1 - l0 l = ld*frac + l0 return", "or z.shape != (y.size, x.size): raise ValueError(\"z.shape should be (y.size,", "fig = ax.get_figure() else: raise TypeError('ax_or_fig must be a Figure", "**kw) plt.axis('tight') def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None,", "newlim(oldlim) elif scale == 'log': return newlim_log(oldlim) elif scale ==", "[0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig) rtxt = '{:.1f}", "scale='linear'): if scale == 'log': lx, ly = point_along_line(np.log10(x), np.log10(y),", "camera view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if view is not", "figure if transformation is None: transformation = fig.transFigure coordConvert =", "np import matplotlib as mplot import matplotlib.pyplot as plt import", "10**x result = [xl] if errneg is not None: xn", "Return x & y scale factors for converting text sizes", "def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a figure of standard size", "for s in ['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False, left='off',", "step(*args, **kwargs): edges, values = args[0], args[1] # deal with", "fractional height of the figure relative to the \"standard\" height.", "axlim = ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory ==", "return newlim_log(oldlim) elif scale == 'symlog': raise NotImplementedError('Past Parke to", "scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function? def standard_figure(app,", "axes labels). Meant to be used with only a handful", "yc**2 == 0.0: xoff = 1.0 yoff = 0.0 zoff", "- np.abs(errneg)) result.append(xn) if errpos is not None: xp =", "y - yerr, y + yerr if ecolor is None:", "'{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0, 0, rtxt, figure=fig,", "[[], [30, 10], [20, 8], [10, 5], [3, 2], [30,", "edges[:-1,1]): raise ValueError('Some bins overlap') if np.any(edges[1:,0] < edges[:-1,0]): raise", "h * np.sin(ra) # make figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1),", "'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate == 'data': xlim =", "y - yerr[0,:] yhi = y + yerr[1,:] else: ylo,", "z))[:-1]: xx, yy, zz = [x1, x1], [y1, y1], [0.0,", "fig is None: fig = plt.gcf() xy = [] def", "coding: utf-8 -*- \"\"\" Created on Fri May 30 17:15:27", "the standard is the full height of a slide. returns", "bboxConv[0,1] return w, h def stars3d(ra, dec, dist, T=5000.0, r=1.0,", "Meant to be used with only a handful of stars.", "mayavi import mlab from color.maps import true_temp n = len(ra)", "= fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]", "to future Parke, you did\\'t write an implementation for symlog'", "np.pi / 180.0) zc = -np.cos(el * np.pi / 180.0)", "fig.transFigure coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp)", "[10, 5], [3, 2], [30, 5, 3, 5, 10, 5,", "(ra, dec, dist), (0.0, 0.0, 0.0))) r, T, labels =", "= plt.gcf() xy = [] def onclick(event): if not event.inaxes:", "# deal with matplotlib sometimes not showing polygon when it", "yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw): if ax is None:", "event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of mouse clicks.", "np.sqrt(xc**2 + yc**2) yoff = np.sqrt(1.0 - xoff**2) zoff =", "ecolor = p[0].get_color() # deal with matplotlib sometimes not showing", "2], [30, 5, 3, 5, 10, 5, 3, 5], [15]", "point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale) return 10 ** lx,", "xfrac is not None: if xfrac == 0: return x[0],", "label in zip(x, y, z, labels): mlab.text3d(xx + xoff, yy", "the sun ra, dec, dist = list(map(np.append, (ra, dec, dist),", "bin specifications edges = np.asarray(edges) if edges.ndim == 2: if", "== 1: return x[-1], y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2 +", "return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate == 'data': xlim = ax.get_xlim()", "Parameters ---------- mpl : module Current matplotlib module. Use 'import", "elif scale == 'symlog': raise NotImplementedError('Past Parke to future Parke,", "r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig) rtxt =", "y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x & y scale", "point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'): if scale == 'log': lx,", "datalim = ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale))", "datalim = ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale))", "return result def step(*args, **kwargs): edges, values = args[0], args[1]", "fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig fig = ax.get_figure()", "from __future__ import division, print_function, absolute_import import numpy as np", "'log': return newlim_log(oldlim) elif scale == 'symlog': raise NotImplementedError('Past Parke", "x[-1], y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d =", "z = zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest',", "< axlim [0]: oldlim = oldlim[::-1] if scale == 'linear':", "colors and distances as desired. Coordinates must be in degrees.", "[y1, y1], [0.0, z1] line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7),", "plot spheres r_factor = np.max(dist) / 30.0 pts = mlab.quiver3d(x,", "Fri May 30 17:15:27 2014 @author: Parke \"\"\" from __future__", "= fig.gca() else: if isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig ax", "> oldlim[0]: return (oldlim[0] - pad, oldlim[1] + pad) else:", "values so that plots are readable as they are created", "[x,y]] triangles = [(0, i, i + 1) for i", "d/d[-1] xp, yp = [np.interp(xfrac, f, a) for a in", "# add the sun ra, dec, dist = list(map(np.append, (ra,", "= ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y' or xory ==", "if x.ndim != 1 or y.ndim != 1: raise ValueError(\"x", "args[1] = edges, values ax = kwargs.pop('ax', plt.gca()) return ax.plot(*args,", "return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05): if ax is", "dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))", "makearr = lambda v: np.array([v] * n) if np.isscalar(v) else", "figure relative to the \"standard\" height. For slides the standard", "zc = -np.cos(el * np.pi / 180.0) # unit vec", "errneg=None, errpos=None): xl = 10**x result = [xl] if errneg", "n], yloends) yhi = np.insert(yhi[inrange], [0, n], yhiends) f =", "will be given in dex. \"\"\" if ax_or_fig is None:", "matplotlib as mpl'. screewidth : int Width of the screen", "np.sum(inrange) yends = np.interp(xlim, x, y) yloends = np.interp(xlim, x,", "edges = mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values, values) args =", "Figure or Axes instance, if given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches()", "cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig,", "matplotlibrc values so that plots are readable as they are", "= np.zeros(n+1) x, y = [np.insert(a, 0, 0.0) for a", "text sizes in points to another coordinate. Useful for properly", "a 3D diagram of stars positions relative to the Sun,", "labels). Meant to be used with only a handful of", "app == 'fullslide': fontsize = 20 figsize = [fullwidth, fullwidth/slideAR*height]", "plt.gca() def newlim(oldlim): delta = abs(oldlim[1] - oldlim[0]) pad =", "iterate through more linestyles than come with matplotlib # consider", "y, z, labels): mlab.text3d(xx + xoff, yy + yoff, zz", "np.issubdtype(z.dtype, np.complexfloating): zp = np.zeros(z.shape, float) zp[...] = z[...] z", "the data is plotted on a log scale, then the", "question in pixels. Returns ------- None \"\"\" mpl.rcParams['lines.linewidth'] = 2", "yc = hc * np.sin(az * np.pi / 180.0) zc", "mpl.rcParams['lines.linewidth'] = 2 fontsize = round(14 / (800.0 / screenwidth))", "mouse clicks. Click outside of the axes \" \\ \"when", "l0 l = ld*frac + l0 return 10**l def log2linear(x,", "= [x1, x1], [y1, y1], [0.0, z1] line = mlab.plot3d(xx,", "translucent dec=0 surface n = 101 t = np.linspace(0.0, 2*np.pi,", "10**(x + errpos) - xl result.append(xp) return result def linear2log(x,", "mypy.my_numpy as mnp dpi = 100 fullwidth = 10.0 halfwidth", "None: if xfrac == 0: return x[0], y[0] if xfrac", "to the dec=0 plane for all but the sun lines", "T, r, labels = list(map(makearr, (T, r, labels))) # add", "= dist * np.cos(dec) x = h * np.cos(ra) y", "= [fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return", "ax.get_ylim() if ax.get_xscale() == 'log': xlim = np.log10(xlim) if ax.get_yscale()", "is None: bigax = fig.add_subplot(111) else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False)", "is None: transformation = fig.transFigure coordConvert = transformation.inverted().transform bboxDisp =", "all but the sun lines = [] for x1, y1,", "ylim = ax.get_ylim() if ax.get_xscale() == 'log': xlim = np.log10(xlim)", "= hc * np.cos(az * np.pi / 180.0) yc =", "xlim[1] - xlim[0] h_ax_data = ylim[1] - ylim[0] return w_ax_data/w_ax_pts,", "== 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in", "= ax_or_fig ax = fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax =", "## add translucent dec=0 surface n = 101 t =", "must be in degrees. Distance is assumed to be in", "you did\\'t write an implementation for symlog' 'scaled axes.') if", "3, 5, 10, 5, 3, 5], [15] + [5, 3]*3", "np.any(gaps): values = np.insert(values, np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1], edges[1:])", "ax.get_figure() else: raise TypeError('ax_or_fig must be a Figure or Axes", "= -np.cos(el * np.pi / 180.0) # unit vec orthoganal", "and such when you need to know sizes before the", "and default font size \"\"\" if app == 'fullslide': fontsize", "yc**2) yoff = np.sqrt(1.0 - xoff**2) zoff = 0.0 #", "h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72 if coordinate == 'axes':", ": int Width of the screen in question in pixels.", "scale) return 10 ** lx, 10 ** ly if xfrac", "if view is not None: mlab.view(*view, figure=fig) ## add labels", "lines.append(line) # plot spheres r_factor = np.max(dist) / 30.0 pts", "result def linear2log(x, errneg=None, errpos=None): xl = np.log10(x) result =", "= np.log10(x) result = [x] if errneg is not None:", "== 'y' or xory == 'both': datalim = ax.dataLim.extents[[1,3]] axlim", "def click_coords(fig=None, timeout=600.): if fig is None: fig = plt.gcf()", "x.ndim != 1 or y.ndim != 1: raise ValueError(\"x and", "- xl result.append(xp) return result def linear2log(x, errneg=None, errpos=None): xl", "or xory == 'both': datalim = ax.dataLim.extents[[1,3]] axlim = ax.get_ylim()", "matplotlib module. Use 'import matplotlib as mpl'. screewidth : int", "Parke \"\"\" from __future__ import division, print_function, absolute_import import numpy", "zp[...] = z[...] z = zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(),", "height is the fractional height of the figure relative to", "0, 0) f = d/d[-1] xp, yp = [np.interp(xfrac, f,", "+ np.diff(y)**2)) d = np.insert(d, 0, 0) f = d/d[-1]", "float) zp[...] = z[...] z = zp plt.imshow(z, origin='lower', extent=[x.min(),", "= d/d[-1] xp, yp = [np.interp(xfrac, f, a) for a", "- bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1] return w, h", "transformed to the desired coordinates. Defaults to figure coordinates if", "180.0) yc = hc * np.sin(az * np.pi / 180.0)", "sun ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0,", "stars. \"\"\" from mayavi import mlab from color.maps import true_temp", "= [np.log10(l) for l in oldlim] newloglim = newlim(loglim) return", "the \"standard\" height. For slides the standard is the full", "not showing polygon when it extends beyond plot range xlim", "= np.log10(xlim) if ax.get_yscale() == 'log': ylim = np.log10(ylim) w_ax_data", "plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight')", "ylo = y - yerr[0,:] yhi = y + yerr[1,:]", "oldlim[1] + pad) else: return (oldlim[0] + pad, oldlim[1] -", "as plt import mypy.my_numpy as mnp dpi = 100 fullwidth", "& y scale factors for converting text sizes in points", "beyond plot range xlim = ax.get_xlim() inrange = mnp.inranges(x, xlim)", "1.0 * r_factor xoff, yoff, zoff = [r_label * v", "ealpha=0.5, ax=None, **kw): if ax is None: ax = plt.gca()", "fig = plt.gcf() ax = fig.gca() else: if isinstance(ax_or_fig, plt.Figure):", "+ [5, 3]*3 + [5], [15] + [5, 3]*2 +", "ax.get_figure().get_size_inches() if coordinate == 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm", "None: transformation = fig.transFigure coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer)", "None: fig = plt.gcf() ax = fig.gca() else: if isinstance(ax_or_fig,", "ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this function? def", "is None: ax = plt.gca() p = ax.plot(x, y, **kw)", "come with matplotlib # consider ussing a ::2 slice for", "degrees. Distance is assumed to be in pc (for axes", "* np.cos(az * np.pi / 180.0) yc = hc *", "pixels. Returns ------- None \"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize =", "= len(ra) dec, ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda", "= np.unique(edges) if np.any(gaps): values = np.insert(values, np.nonzero(gaps), np.nan) edges", "dec=0 plane for all but the sun lines = []", "zz + zoff, label, figure=fig, color=(1,1,1), scale=size) ## add translucent", "(application) are: 'fullslide' height is the fractional height of the", "'x' or xory == 'both': datalim = ax.dataLim.extents[[0,2]] axlim =", "screenwidth=1200): \"\"\" Set matplotlibrc values so that plots are readable", "if errpos is not None: xp = np.log10(x + errpos)", "edges, values = args[0], args[1] # deal with potentially gappy", "scale orthogonal vec by sphere size r_label = 1.0 *", "x1, y1, z1 in list(zip(x, y, z))[:-1]: xx, yy, zz", "publishing. implemented values for app (application) are: 'fullslide' height is", "'axes', or 'figure'. If data coordinates are requested and the", "add translucent dec=0 surface n = 101 t = np.linspace(0.0,", "a text object's bounding box transformed to the desired coordinates.", "w_ax_norm * w_fig_in h_ax_in = h_ax_norm * h_fig_in w_ax_pts, h_ax_pts", "l = ld*frac + l0 return 10**l def log2linear(x, errneg=None,", "is None: fig = plt.gcf() xy = [] def onclick(event):", "5], [3, 2], [30, 5, 3, 5, 10, 5, 3,", "mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) # plot spheres", "hc * np.cos(az * np.pi / 180.0) yc = hc", "print_function, absolute_import import numpy as np import matplotlib as mplot", "if len(yerr.shape) == 2: ylo = y - yerr[0,:] yhi", "z.shape != (y.size, x.size): raise ValueError(\"z.shape should be (y.size, x.size)\")", "** ly if xfrac is not None: if xfrac ==", "screen in question in pixels. Returns ------- None \"\"\" mpl.rcParams['lines.linewidth']", "and y should be 1-dimensional\") if z.ndim != 2 or", "ax is None: ax = plt.gca() p = ax.plot(x, y,", "+ [5], [15] + [5, 3] + [5]] def click_coords(fig=None,", "camera view = mlab.view() az, el = view[:2] hc =", "yp = [np.interp(xfrac, f, a) for a in [x,y]] return", "[3, 2], [30, 5, 3, 5, 10, 5, 3, 5],", "view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if view is not None:", "10 ** lx, 10 ** ly if xfrac is not", "np.max(dist) / 30.0 pts = mlab.quiver3d(x, y, z, r, r,", "r_label = 1.0 * r_factor xoff, yoff, zoff = [r_label", "= [r_label * v for v in [xoff, yoff, zoff]]", "y.min(), y.max()], interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def errorpoly(x, y, yerr,", "must be in increasing order.') gaps = edges[1:,0] > edges[:-1,1]", "is None: ax = plt.gca() def newlim(oldlim): delta = abs(oldlim[1]", "bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0] h =", "returns the figure object and default font size \"\"\" if", "yhi = y + yerr[1,:] else: ylo, yhi = y", "uniform, and do plotting with the (much faster) `imshow` function.", "pad = delta*margin if oldlim[1] > oldlim[0]: return (oldlim[0] -", "0: return x[0], y[0] if xfrac == 1: return x[-1],", "dpi = 100 fullwidth = 10.0 halfwidth = 5.0 #", "when you need to know sizes before the text is", "= 20 figsize = [fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)", "\"\"\" Similar to `pcolor`, but assume that the grid is", "x, y) def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x & y", "distances as desired. Coordinates must be in degrees. Distance is", "if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):", "yoff = 0.0 zoff = 0.0 else: xoff = yc", "\"standard\" height. For slides the standard is the full height", "= list(map(makearr, (T, r, labels))) # add the sun ra,", "size \"\"\" if app == 'fullslide': fontsize = 20 figsize", "!= 1: raise ValueError(\"x and y should be 1-dimensional\") if", "of the screen in question in pixels. Returns ------- None", "1e-2) or not np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The grid must", "else: xoff = yc / np.sqrt(xc**2 + yc**2) yoff =", "not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of mouse", "y, fmt, **kw) if len(yerr.shape) == 2: ylo = y", "np.pi / 180.0) yc = hc * np.sin(az * np.pi", "kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs) def point_along_line(x, y, xfrac=None, xlbl=None,", "(T, r, labels))) # add the sun ra, dec, dist", "line.set_dashes and iterate through more linestyles than come with matplotlib", "np.cos(dec)) x, y = r*np.cos(t), r*np.sin(t) z = np.zeros(n+1) x,", "if axlim[1] < axlim [0]: oldlim = oldlim[::-1] if scale", "scale == 'log': lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl,", "mplot.pyplot.figure(figsize=figsize, dpi=dpi) mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize def pcolor_reg(x, y,", "implemented values for app (application) are: 'fullslide' height is the", "if not np.all(inrange): n = np.sum(inrange) yends = np.interp(xlim, x,", "fig = plt.gcf() xy = [] def onclick(event): if not", "is the full height of a slide. returns the figure", "0.0), figure=fig) if view is not None: mlab.view(*view, figure=fig) ##", "view[:2] hc = np.sin(el * np.pi / 180.0) xc =", "true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap # set the camera view mlab.view(focalpoint=(0.0,", "* np.pi / 180.0) zc = -np.cos(el * np.pi /", "/ np.sqrt(xc**2 + yc**2) yoff = np.sqrt(1.0 - xoff**2) zoff", "pts.glyph.color_mode = 'color_by_scalar' # center the glyphs on the data", "import division, print_function, absolute_import import numpy as np import matplotlib", "zz = [x1, x1], [y1, y1], [0.0, z1] line =", "can be 'data', 'axes', or 'figure'. If data coordinates are", "1e-2): raise ValueError(\"The grid must be uniform\") if np.issubdtype(z.dtype, np.complexfloating):", "[15] + [5, 3]*2 + [5], [15] + [5, 3]", "module Current matplotlib module. Use 'import matplotlib as mpl'. screewidth", "a temperature colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap #", "textBoxSize). Coordinate can be 'data', 'axes', or 'figure'. If data", "not np.all(inrange): n = np.sum(inrange) yends = np.interp(xlim, x, y)", "---------- mpl : module Current matplotlib module. Use 'import matplotlib", "3] + [5]] def click_coords(fig=None, timeout=600.): if fig is None:", "l0 return 10**l def log2linear(x, errneg=None, errpos=None): xl = 10**x", "for all but the sun lines = [] for x1,", "ax = plt.gca() p = ax.plot(x, y, **kw) if fmt", "if transformation is None: transformation = fig.transFigure coordConvert = transformation.inverted().transform", "fmt, **kw) if len(yerr.shape) == 2: ylo = y -", "= delta*margin if oldlim[1] > oldlim[0]: return (oldlim[0] - pad,", "in pc (for axes labels). Meant to be used with", "transformation = fig.transFigure coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv", "mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig) rtxt", "h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05): if ax is None: ax", "to figure coordinates if transformation is None.\"\"\" fig= txt.get_figure() if", "xfrac == 1: return x[-1], y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2", "t = np.linspace(0.0, 2*np.pi, n) r = np.max(dist * np.cos(dec))", "# plot spheres r_factor = np.max(dist) / 30.0 pts =", "h_ax_in*72 if coordinate == 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate", "coordinate == 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size", "figure=fig) lines.append(line) # plot spheres r_factor = np.max(dist) / 30.0", "== 'log': ylim = np.log10(ylim) w_ax_data = xlim[1] - xlim[0]", "to the desired coordinates. Defaults to figure coordinates if transformation", "mpl : module Current matplotlib module. Use 'import matplotlib as", "* np.pi / 180.0) # unit vec orthoganal to camera", "y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d = np.insert(d,", "values) args = list(args) args[0], args[1] = edges, values ax", "axes.') if xory == 'x' or xory == 'both': datalim", "* np.pi / 180.0) xc = hc * np.cos(az *", "r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0,", "then the factor will be given in dex. \"\"\" if", "edges.ndim == 2: if np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some bins", "w, h def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None,", "xyz coordinates z = dist * np.sin(dec) h = dist", "np.abs(errneg)) result.append(xn) if errpos is not None: xp = np.log10(x", "return x[0], y[0] if xfrac == 1: return x[-1], y[-1]", "'log': xlim = np.log10(xlim) if ax.get_yscale() == 'log': ylim =", "x)) ld = l1 - l0 l = ld*frac +", "= np.insert(y[inrange], [0, n], yends) ylo = np.insert(ylo[inrange], [0, n],", "x = np.insert(x[inrange], [0, n], xlim) y = np.insert(y[inrange], [0,", "desired. Coordinates must be in degrees. Distance is assumed to", "figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if view is not None: mlab.view(*view,", "* n) if np.isscalar(v) else v T, r, labels =", "= xl - 10**(x - np.abs(errneg)) result.append(xn) if errpos is", "is not None: mlab.view(*view, figure=fig) ## add labels # unit", "assumed to be in pc (for axes labels). Meant to", "ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y,", "-np.cos(el * np.pi / 180.0) # unit vec orthoganal to", "i in range(1, n)] mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3,", "elif scale == 'log': return newlim_log(oldlim) elif scale == 'symlog':", "+ [5, 3]*2 + [5], [15] + [5, 3] +", "def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'): if scale == 'log':", "xl - np.log10(x - np.abs(errneg)) result.append(xn) if errpos is not", "labels # unit vec to camera view = mlab.view() az,", "ax is None: ax = plt.gca() def newlim(oldlim): delta =", "* v for v in [xoff, yoff, zoff]] # plot", "= ax.plot(x, y, **kw) if fmt is None else ax.plot(x,", "a handful of stars. \"\"\" from mayavi import mlab from", "slides the standard is the full height of a slide.", "mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig) ## add ra=0", "newlim_log(oldlim) elif scale == 'symlog': raise NotImplementedError('Past Parke to future", "!= (y.size, x.size): raise ValueError(\"z.shape should be (y.size, x.size)\") dx", "not None: xn = xl - np.log10(x - np.abs(errneg)) result.append(xn)", "set a temperature colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap", "np.pi / 180.0) # unit vec orthoganal to camera if", "= xl - np.log10(x - np.abs(errneg)) result.append(xn) if errpos is", "= np.interp(xlim, x, y) yloends = np.interp(xlim, x, ylo) yhiends", "np.linspace(0.0, 2*np.pi, n) r = np.max(dist * np.cos(dec)) x, y", "scale=size*1.25, orient_to_camera=False, orientation=orientation) if view is not None: mlab.view(*view, figure=fig)", "ax_or_fig fig = ax.get_figure() else: raise TypeError('ax_or_fig must be a", "yloends) yhi = np.insert(yhi[inrange], [0, n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)", "txt_scale=1.0): \"\"\" Make a 3D diagram of stars positions relative", "yerr[0,:] yhi = y + yerr[1,:] else: ylo, yhi =", "sizes before the text is made (otherwise you can use", "color=(1,1,1), opacity=0.3, figure=fig) ## add ra=0 line line = mlab.plot3d([0,", "figure=None): \"\"\"Get the width and height of a text object's", "Current matplotlib module. Use 'import matplotlib as mpl'. screewidth :", "slideAR=1.6, height=1.0): \"\"\"Generate a figure of standard size for publishing.", "np.zeros(z.shape, float) zp[...] = z[...] z = zp plt.imshow(z, origin='lower',", "if ax_or_fig is None: fig = plt.gcf() ax = fig.gca()", "grid is uniform, and do plotting with the (much faster)", "z, r, r, r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode", "oldlim[1] > oldlim[0]: return (oldlim[0] - pad, oldlim[1] + pad)", "orthogonal vec by sphere size r_label = 1.0 * r_factor", "/ 180.0) # unit vec orthoganal to camera if xc**2", "= '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0, 0, rtxt,", "== 'x' or xory == 'both': datalim = ax.dataLim.extents[[0,2]] axlim", "values = np.insert(values, np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1], edges[1:]) values", "zoff = 0.0 # xoff, yoff, zoff = xc, yc,", "# plot lines down to the dec=0 plane for all", "return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in = w_ax_norm", "as mpl'. screewidth : int Width of the screen in", "yerr[1,:] else: ylo, yhi = y - yerr, y +", "return p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc values so", "= 0.0 # xoff, yoff, zoff = xc, yc, zc", "of mouse clicks. Click outside of the axes \" \\", "dec, dist), (0.0, 0.0, 0.0))) r, T, labels = list(map(np.append,", "fig.gca() else: if isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig ax =", "result = [xl] if errneg is not None: xn =", "on a log scale, then the factor will be given", "to know sizes before the text is made (otherwise you", "bigax = fig.add_subplot(111) else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s", "import matplotlib.pyplot as plt import mypy.my_numpy as mnp dpi =", "10, 5, 3, 5], [15] + [5, 3]*3 + [5],", "# get xyz coordinates z = dist * np.sin(dec) h", "- bboxConv[0,1] return w, h def stars3d(ra, dec, dist, T=5000.0,", "list(map(np.log10, x)) ld = l1 - l0 l = ld*frac", "this function? def standard_figure(app, slideAR=1.6, height=1.0): \"\"\"Generate a figure of", "xory == 'y' or xory == 'both': datalim = ax.dataLim.extents[[1,3]]", "is uniform, and do plotting with the (much faster) `imshow`", "figure=fig) ## add labels # unit vec to camera view", "= zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto',", "edges[1:]) values = mnp.lace(values, values) args = list(args) args[0], args[1]", "yoff = np.sqrt(1.0 - xoff**2) zoff = 0.0 # xoff,", "of the figure relative to the \"standard\" height. For slides", "label, figure=fig, color=(1,1,1), scale=size) ## add translucent dec=0 surface n", "1) for i in range(1, n)] mlab.triangular_mesh(x, y, z, triangles,", "ax = ax_or_fig fig = ax.get_figure() else: raise TypeError('ax_or_fig must", "fmt is None else ax.plot(x, y, fmt, **kw) if len(yerr.shape)", "is made (otherwise you can use textBoxSize). Coordinate can be", "'figure'. If data coordinates are requested and the data is", "if not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of", "matplotlib.pyplot as plt import mypy.my_numpy as mnp dpi = 100", "1.0/w_ax_pts, 1.0/h_ax_pts if coordinate == 'data': xlim = ax.get_xlim() ylim", "v: np.array([v] * n) if np.isscalar(v) else v T, r,", "are created and maximized for an audience far from a", "def textSize(ax_or_fig=None, coordinate='data'): \"\"\" Return x & y scale factors", "text labels and such when you need to know sizes", "if coordinate == 'fig': return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm =", "values = mnp.lace(values, values) args = list(args) args[0], args[1] =", "y[0] if xfrac == 1: return x[-1], y[-1] else: d", "+ zoff, label, figure=fig, color=(1,1,1), scale=size) ## add translucent dec=0", "w_ax_data = xlim[1] - xlim[0] h_ax_data = ylim[1] - ylim[0]", "h_ax_norm * h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72 if coordinate", "mplot.rcParams.update({'font.size': fontsize}) return fig, fontsize def pcolor_reg(x, y, z, **kw):", "0], [0, 0], color=(1,1,1), line_width=1, figure=fig) rtxt = '{:.1f} pc'.format(r)", "dec=0 surface n = 101 t = np.linspace(0.0, 2*np.pi, n)", "so that plots are readable as they are created and", "== 'linear': return newlim(oldlim) elif scale == 'log': return newlim_log(oldlim)", "= fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig, pos=None):", "xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of mouse clicks. Click outside of", "zoff = xc, yc, zc # scale orthogonal vec by", "for app (application) are: 'fullslide' height is the fractional height", "> edges[:-1,1] edges = np.unique(edges) if np.any(gaps): values = np.insert(values,", "y, z, r, r, r, scalars=T, mode='sphere', scale_factor=r_factor, figure=fig, resolution=100)", "lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale) return", "= np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim != 1 or y.ndim", "ax_or_fig is None: fig = plt.gcf() ax = fig.gca() else:", "yhi = np.insert(yhi[inrange], [0, n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return", "[0, n], yhiends) f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl,", "resolution=100) pts.glyph.color_mode = 'color_by_scalar' # center the glyphs on the", "errneg=None, errpos=None): xl = np.log10(x) result = [x] if errneg", "(10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1] < axlim [0]: oldlim", "linear2log(x, errneg=None, errpos=None): xl = np.log10(x) result = [x] if", "edges[1:,0] > edges[:-1,1] edges = np.unique(edges) if np.any(gaps): values =", "of standard size for publishing. implemented values for app (application)", "p[0].get_color() # deal with matplotlib sometimes not showing polygon when", "the dec=0 plane for all but the sun lines =", "deal with matplotlib sometimes not showing polygon when it extends", "y, z))[:-1]: xx, yy, zz = [x1, x1], [y1, y1],", "= np.diff(y) if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy,", "plt.gcf() ax = fig.gca() else: if isinstance(ax_or_fig, plt.Figure): fig =", "in [x,y]] return xp, yp if xlbl is not None:", "ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if xory == 'y' or", "ylo) yhiends = np.interp(xlim, x, yhi) x = np.insert(x[inrange], [0,", "width and height of a text object's bounding box transformed", ": module Current matplotlib module. Use 'import matplotlib as mpl'.", "errpos) - xl result.append(xp) return result def step(*args, **kwargs): edges,", "data point pts.glyph.glyph_source.glyph_source.center = [0, 0, 0] # set a", "**kw) if len(yerr.shape) == 2: ylo = y - yerr[0,:]", "np.diff(x) dy = np.diff(y) if not np.allclose(dx, dx[0], 1e-2) or", "'both': datalim = ax.dataLim.extents[[1,3]] axlim = ax.get_ylim() scale = ax.get_yscale()", "__future__ import division, print_function, absolute_import import numpy as np import", "== 'both': datalim = ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale =", "= mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) # plot", "if errneg is not None: xn = xl - 10**(x", "\"\"\" Return x & y scale factors for converting text", "[0, n], xlim) y = np.insert(y[inrange], [0, n], yends) ylo", "10**(x - np.abs(errneg)) result.append(xn) if errpos is not None: xp", "scale == 'log': return newlim_log(oldlim) elif scale == 'symlog': raise", "NotImplementedError('Past Parke to future Parke, you did\\'t write an implementation", "w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05): if ax is None:", "figure coordinates if transformation is None.\"\"\" fig= txt.get_figure() if figure", "for symlog' 'scaled axes.') if xory == 'x' or xory", "maximized for an audience far from a screen. Parameters ----------", "\"\"\" if ax_or_fig is None: fig = plt.gcf() ax =", "= fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig fig =", "gaps = edges[1:,0] > edges[:-1,1] edges = np.unique(edges) if np.any(gaps):", "transformation is None: transformation = fig.transFigure coordConvert = transformation.inverted().transform bboxDisp", "else: ylo, yhi = y - yerr, y + yerr", "ax_or_fig ax = fig.gca() elif isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig", "labels='', view=None, size=(800,800), txt_scale=1.0): \"\"\" Make a 3D diagram of", "= ax.get_ylim() if ax.get_xscale() == 'log': xlim = np.log10(xlim) if", "figure=fig) if view is not None: mlab.view(*view, figure=fig) ## add", "int Width of the screen in question in pixels. Returns", "in range(1, n)] mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)", "fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of mouse clicks. Click", "import true_temp n = len(ra) dec, ra = dec*np.pi/180.0, ra*np.pi/180.0", "size for publishing. implemented values for app (application) are: 'fullslide'", "== 0.0: xoff = 1.0 yoff = 0.0 zoff =", "= abs(oldlim[1] - oldlim[0]) pad = delta*margin if oldlim[1] >", "factor will be given in dex. \"\"\" if ax_or_fig is", "elif isinstance(ax_or_fig, plt.Axes): ax = ax_or_fig fig = ax.get_figure() else:", "= edges, values ax = kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs)", "common_axes(fig, pos=None): if pos is None: bigax = fig.add_subplot(111) else:", "\"\"\"Get the width and height of a text object's bounding", "= 10.0 halfwidth = 5.0 # use these with line.set_dashes", "l0, l1 = list(map(np.log10, x)) ld = l1 - l0", "[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']] bigax.tick_params(labelleft=False, labelbottom=False,", "[] def onclick(event): if not event.inaxes: fig.canvas.stop_event_loop() else: xy.append([event.xdata, event.ydata])", "not np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The grid must be uniform\")", "= xlim[1] - xlim[0] h_ax_data = ylim[1] - ylim[0] return", "T, labels), (1.0, 5780.0, 'Sun'))) # get xyz coordinates z", "return x[-1], y[-1] else: d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d", "= kwargs.pop('ax', plt.gca()) return ax.plot(*args, **kwargs) def point_along_line(x, y, xfrac=None,", "bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0] -", "is None else ax.plot(x, y, fmt, **kw) if len(yerr.shape) ==", "10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1] < axlim [0]: oldlim =", "import mlab from color.maps import true_temp n = len(ra) dec,", "xfrac=None, xlbl=None, scale='linear'): if scale == 'log': lx, ly =", "np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some bins overlap') if np.any(edges[1:,0] <", "a Figure or Axes instance, if given.') w_fig_in, h_fig_in =", "to the Sun, with semi-accurate colors and distances as desired.", "result.append(xn) if errpos is not None: xp = 10**(x +", "bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left',", "object and default font size \"\"\" if app == 'fullslide':", "xc, yc, zc # scale orthogonal vec by sphere size", "bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1] return w,", "positions relative to the Sun, with semi-accurate colors and distances", "lines = [] for x1, y1, z1 in list(zip(x, y,", "be (y.size, x.size)\") dx = np.diff(x) dy = np.diff(y) if", "newloglim = newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1]", "= ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha) return p[0],f def onscreen_pres(mpl, screenwidth=1200): \"\"\" Set matplotlibrc", "/ 180.0) xc = hc * np.cos(az * np.pi /", "# set a temperature colormap cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table =", "rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if view is not None:", "= ax.get_position().size w_ax_in = w_ax_norm * w_fig_in h_ax_in = h_ax_norm", "= bboxConv[1,1] - bboxConv[0,1] return w, h def stars3d(ra, dec,", "xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]", "x, y = [np.insert(a, 0, 0.0) for a in [x,y]]", "np.interp(xlim, x, y) yloends = np.interp(xlim, x, ylo) yhiends =", "not None: return xlbl, np.interp(xlbl, x, y) def textSize(ax_or_fig=None, coordinate='data'):", "in increasing order.') gaps = edges[1:,0] > edges[:-1,1] edges =", "handful of stars. \"\"\" from mayavi import mlab from color.maps", "/ 180.0) zc = -np.cos(el * np.pi / 180.0) #", "Returns ------- None \"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize = round(14", "labels and such when you need to know sizes before", "ax.get_xscale() == 'log': xlim = np.log10(xlim) if ax.get_yscale() == 'log':", "0.0, 0.0), figure=fig) if view is not None: mlab.view(*view, figure=fig)", "None: ecolor = p[0].get_color() # deal with matplotlib sometimes not", "showing polygon when it extends beyond plot range xlim =", "def common_axes(fig, pos=None): if pos is None: bigax = fig.add_subplot(111)", "height of a text object's bounding box transformed to the", "not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2): raise", "plt.axis('tight') def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):", "z = dist * np.sin(dec) h = dist * np.cos(dec)", "(y.size, x.size)\") dx = np.diff(x) dy = np.diff(y) if not", "len(yerr.shape) == 2: ylo = y - yerr[0,:] yhi =", "before the text is made (otherwise you can use textBoxSize).", "in zip(x, y, z, labels): mlab.text3d(xx + xoff, yy +", "= r*np.cos(t), r*np.sin(t) z = np.zeros(n+1) x, y = [np.insert(a,", "[np.interp(xfrac, f, a) for a in [x,y]] return xp, yp", "ValueError(\"z.shape should be (y.size, x.size)\") dx = np.diff(x) dy =", "as mplot import matplotlib.pyplot as plt import mypy.my_numpy as mnp", "Parke, you did\\'t write an implementation for symlog' 'scaled axes.')", "== 2: if np.any(edges[1:,0] < edges[:-1,1]): raise ValueError('Some bins overlap')", "== 'symlog': raise NotImplementedError('Past Parke to future Parke, you did\\'t", "0.0]) mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if", "these with line.set_dashes and iterate through more linestyles than come", "pos is None: bigax = fig.add_subplot(111) else: bigax = fig.add_axes(pos)", "else ax.plot(x, y, fmt, **kw) if len(yerr.shape) == 2: ylo", "y, z, **kw): \"\"\" Similar to `pcolor`, but assume that", "ax=None, **kw): if ax is None: ax = plt.gca() p", "unit vec orthoganal to camera if xc**2 + yc**2 ==", "[xl] if errneg is not None: xn = xl -", "if np.any(edges[1:,0] < edges[:-1,0]): raise ValueError('Bins must be in increasing", "cmap = true_temp(T) pts.module_manager.scalar_lut_manager.lut.table = cmap # set the camera", "as mnp dpi = 100 fullwidth = 10.0 halfwidth =", "fontsize def textBoxSize(txt, transformation=None, figure=None): \"\"\"Get the width and height", "know sizes before the text is made (otherwise you can", "with potentially gappy 2-column bin specifications edges = np.asarray(edges) if", "10], [20, 8], [10, 5], [3, 2], [30, 5, 3,", "# unit vec orthoganal to camera if xc**2 + yc**2", "box transformed to the desired coordinates. Defaults to figure coordinates", "dy[0], 1e-2): raise ValueError(\"The grid must be uniform\") if np.issubdtype(z.dtype,", "for x1, y1, z1 in list(zip(x, y, z))[:-1]: xx, yy,", "fontsize = 20 figsize = [fullwidth, fullwidth/slideAR*height] fig = mplot.pyplot.figure(figsize=figsize,", "mlab.view() az, el = view[:2] hc = np.sin(el * np.pi", "oldlim] newloglim = newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if", "yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) # plot spheres r_factor", "- ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05): if", "zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(), y.max()], interpolation='nearest', aspect='auto', **kw)", "= np.max(dist) / 30.0 pts = mlab.quiver3d(x, y, z, r,", "yoff, zoff]] # plot labels size = r_factor * txt_scale", "z, **kw): \"\"\" Similar to `pcolor`, but assume that the", "+ yerr if ecolor is None: ecolor = p[0].get_color() #", "line_width=0.5, figure=fig) lines.append(line) # plot spheres r_factor = np.max(dist) /", "v in [xoff, yoff, zoff]] # plot labels size =", "size=(800,800), txt_scale=1.0): \"\"\" Make a 3D diagram of stars positions", "in [xoff, yoff, zoff]] # plot labels size = r_factor", "ax.get_xlim() inrange = mnp.inranges(x, xlim) if not np.all(inrange): n =", "plt.Axes): ax = ax_or_fig fig = ax.get_figure() else: raise TypeError('ax_or_fig", "= mnp.lace(values, values) args = list(args) args[0], args[1] = edges,", "a in [x,y]] return xp, yp if xlbl is not", "w_ax_in*72, h_ax_in*72 if coordinate == 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if", "line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1,", "Created on Fri May 30 17:15:27 2014 @author: Parke \"\"\"", "if np.any(gaps): values = np.insert(values, np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1],", "+ yerr[1,:] else: ylo, yhi = y - yerr, y", "= np.insert(d, 0, 0) f = d/d[-1] xp, yp =", "= coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1]", "np.isscalar(v) else v T, r, labels = list(map(makearr, (T, r,", "n = len(ra) dec, ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr =", "orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False,", "return np.array(xy) def common_axes(fig, pos=None): if pos is None: bigax", "not None: xp = np.log10(x + errpos) - xl result.append(xp)", "given.') w_fig_in, h_fig_in = ax.get_figure().get_size_inches() if coordinate == 'fig': return", "# make figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot", "= np.interp(xlim, x, ylo) yhiends = np.interp(xlim, x, yhi) x", "return (10.0**newloglim[0], 10.0**newloglim[1]) def newlim_either(oldlim,axlim,scale): if axlim[1] < axlim [0]:", "+ 1) for i in range(1, n)] mlab.triangular_mesh(x, y, z,", "= np.max(dist * np.cos(dec)) x, y = r*np.cos(t), r*np.sin(t) z", "figure=fig, color=(1,1,1), scale=size) ## add translucent dec=0 surface n =", "Parke to future Parke, you did\\'t write an implementation for", "0.0 zoff = 0.0 else: xoff = yc / np.sqrt(xc**2", "add ra=0 line line = mlab.plot3d([0, r], [0, 0], [0,", "the factor will be given in dex. \"\"\" if ax_or_fig", "result = [x] if errneg is not None: xn =", "axlim = ax.get_ylim() scale = ax.get_yscale() ax.set_ylim(newlim_either(datalim,axlim,scale)) #TODO: discard this", "[30, 5, 3, 5, 10, 5, 3, 5], [15] +", "plt.gca()) return ax.plot(*args, **kwargs) def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):", "app (application) are: 'fullslide' height is the fractional height of", "[0, n], yends) ylo = np.insert(ylo[inrange], [0, n], yloends) yhi", "1.0/(w_fig_in*72), 1.0/(h_fig_in*72) w_ax_norm, h_ax_norm = ax.get_position().size w_ax_in = w_ax_norm *", "but assume that the grid is uniform, and do plotting", "2 fontsize = round(14 / (800.0 / screenwidth)) mpl.rcParams['font.size'] =", "\\ \"when done.\") cid = fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return", "return xp, yp if xlbl is not None: return xlbl,", "deal with potentially gappy 2-column bin specifications edges = np.asarray(edges)", "ax.plot(x, y, fmt, **kw) if len(yerr.shape) == 2: ylo =", "newlim_either(oldlim,axlim,scale): if axlim[1] < axlim [0]: oldlim = oldlim[::-1] if", "`pcolor`, but assume that the grid is uniform, and do", "as they are created and maximized for an audience far", "yerr if ecolor is None: ecolor = p[0].get_color() # deal", "figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar' # center the glyphs on", "line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5, figure=fig) lines.append(line) #", "labels), (1.0, 5780.0, 'Sun'))) # get xyz coordinates z =", "use these with line.set_dashes and iterate through more linestyles than", "w_ax_in = w_ax_norm * w_fig_in h_ax_in = h_ax_norm * h_fig_in", "is None else figure if transformation is None: transformation =", "dist), (0.0, 0.0, 0.0))) r, T, labels = list(map(np.append, (r,", "a ::2 slice for fewer dashes = [[], [30, 10],", "edges[:-1,0]): raise ValueError('Bins must be in increasing order.') gaps =", "ylim[0] return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts def tight_axis_limits(ax=None, xory='both', margin=0.05): if ax", "bounding box transformed to the desired coordinates. Defaults to figure", "log2linear(x, errneg=None, errpos=None): xl = 10**x result = [xl] if", "< edges[:-1,1]): raise ValueError('Some bins overlap') if np.any(edges[1:,0] < edges[:-1,0]):", "l in oldlim] newloglim = newlim(loglim) return (10.0**newloglim[0], 10.0**newloglim[1]) def", "labelbottom=False, left='off', bottom='off') bigax.set_zorder(-10) return bigax def log_frac(x, frac): l0,", "x & y scale factors for converting text sizes in", "given in dex. \"\"\" if ax_or_fig is None: fig =", "for publishing. implemented values for app (application) are: 'fullslide' height", "else: if isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig ax = fig.gca()", "plane for all but the sun lines = [] for", "\"\"\" Make a 3D diagram of stars positions relative to", "= 'color_by_scalar' # center the glyphs on the data point", "data coordinates are requested and the data is plotted on", "y = np.insert(y[inrange], [0, n], yends) ylo = np.insert(ylo[inrange], [0,", "that plots are readable as they are created and maximized", "y + yerr if ecolor is None: ecolor = p[0].get_color()", "to be in pc (for axes labels). Meant to be", "'fullslide' height is the fractional height of the figure relative", "if app == 'fullslide': fontsize = 20 figsize = [fullwidth,", "np.cos(az * np.pi / 180.0) yc = hc * np.sin(az", "Click outside of the axes \" \\ \"when done.\") cid", "coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1] -", "color=(1,1,1), scale=size) ## add translucent dec=0 surface n = 101", "the screen in question in pixels. Returns ------- None \"\"\"", "else: d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2)) d = np.insert(d, 0,", "zp = np.zeros(z.shape, float) zp[...] = z[...] z = zp", "or not np.allclose(dy, dy[0], 1e-2): raise ValueError(\"The grid must be", "if xory == 'y' or xory == 'both': datalim =", "print(\"Gathering coordinates of mouse clicks. Click outside of the axes", "from a screen. Parameters ---------- mpl : module Current matplotlib", "are requested and the data is plotted on a log", "a screen. Parameters ---------- mpl : module Current matplotlib module.", "mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if view", "figure object and default font size \"\"\" if app ==", "np.insert(values, np.nonzero(gaps), np.nan) edges = mnp.lace(edges[:-1], edges[1:]) values = mnp.lace(values,", "= p[0].get_color() # deal with matplotlib sometimes not showing polygon", "if errneg is not None: xn = xl - np.log10(x", "screewidth : int Width of the screen in question in", "Defaults to figure coordinates if transformation is None.\"\"\" fig= txt.get_figure()", "= w_ax_norm * w_fig_in h_ax_in = h_ax_norm * h_fig_in w_ax_pts,", "the width and height of a text object's bounding box", "y should be 1-dimensional\") if z.ndim != 2 or z.shape", "h * np.cos(ra) y = h * np.sin(ra) # make", "[0, 0], color=(1,1,1), line_width=1, figure=fig) rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0,", "w_fig_in h_ax_in = h_ax_norm * h_fig_in w_ax_pts, h_ax_pts = w_ax_in*72,", "plots are readable as they are created and maximized for", "xlim) if not np.all(inrange): n = np.sum(inrange) yends = np.interp(xlim,", "you can use textBoxSize). Coordinate can be 'data', 'axes', or", "labels size = r_factor * txt_scale * 0.75 for xx,", "np.sqrt(1.0 - xoff**2) zoff = 0.0 # xoff, yoff, zoff", "= [xl] if errneg is not None: xn = xl", "h = dist * np.cos(dec) x = h * np.cos(ra)", "xoff, yoff, zoff = [r_label * v for v in", "0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation) if view is not", "is assumed to be in pc (for axes labels). Meant", "xl = np.log10(x) result = [x] if errneg is not", "180.0) # unit vec orthoganal to camera if xc**2 +", "- xl result.append(xp) return result def step(*args, **kwargs): edges, values", "\"\"\"Generate a figure of standard size for publishing. implemented values", "- yerr[0,:] yhi = y + yerr[1,:] else: ylo, yhi", "= list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0))) r, T,", "y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig) ## add ra=0 line", "= 101 t = np.linspace(0.0, 2*np.pi, n) r = np.max(dist", "* w_fig_in h_ax_in = h_ax_norm * h_fig_in w_ax_pts, h_ax_pts =", "not None: xn = xl - 10**(x - np.abs(errneg)) result.append(xn)", "y + yerr[1,:] else: ylo, yhi = y - yerr,", "should be 1-dimensional\") if z.ndim != 2 or z.shape !=", "# center the glyphs on the data point pts.glyph.glyph_source.glyph_source.center =", "yoff, zoff = [r_label * v for v in [xoff,", "mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label,", "in dex. \"\"\" if ax_or_fig is None: fig = plt.gcf()", "np.diff(y)**2)) d = np.insert(d, 0, 0) f = d/d[-1] xp,", "+ [5]] def click_coords(fig=None, timeout=600.): if fig is None: fig", "if pos is None: bigax = fig.add_subplot(111) else: bigax =", "y) yloends = np.interp(xlim, x, ylo) yhiends = np.interp(xlim, x,", "on Fri May 30 17:15:27 2014 @author: Parke \"\"\" from", "rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0, 0,", "= z[...] z = zp plt.imshow(z, origin='lower', extent=[x.min(), x.max(), y.min(),", "extends beyond plot range xlim = ax.get_xlim() inrange = mnp.inranges(x,", "are readable as they are created and maximized for an", "figure fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size) # plot lines down", "- 10**(x - np.abs(errneg)) result.append(xn) if errpos is not None:", "object's bounding box transformed to the desired coordinates. Defaults to", "def newlim_either(oldlim,axlim,scale): if axlim[1] < axlim [0]: oldlim = oldlim[::-1]", "------- None \"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize = round(14 /", "did\\'t write an implementation for symlog' 'scaled axes.') if xory", "is None: ecolor = p[0].get_color() # deal with matplotlib sometimes", "scale_factor=r_factor, figure=fig, resolution=100) pts.glyph.color_mode = 'color_by_scalar' # center the glyphs", "np.interp(xlim, x, ylo) yhiends = np.interp(xlim, x, yhi) x =", "fig.canvas.mpl_connect('button_press_event', onclick) fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig, pos=None): if", "(otherwise you can use textBoxSize). Coordinate can be 'data', 'axes',", "raise TypeError('ax_or_fig must be a Figure or Axes instance, if", "mplot import matplotlib.pyplot as plt import mypy.my_numpy as mnp dpi", "xn = xl - 10**(x - np.abs(errneg)) result.append(xn) if errpos", "size r_label = 1.0 * r_factor xoff, yoff, zoff =", "None: xp = 10**(x + errpos) - xl result.append(xp) return", "np.insert(ylo[inrange], [0, n], yloends) yhi = np.insert(yhi[inrange], [0, n], yhiends)", "+ yoff, zz + zoff, label, figure=fig, color=(1,1,1), scale=size) ##", "dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda v: np.array([v] * n) if", "ax = fig.gca() else: if isinstance(ax_or_fig, plt.Figure): fig = ax_or_fig", "\"\"\" mpl.rcParams['lines.linewidth'] = 2 fontsize = round(14 / (800.0 /", "f, a) for a in [x,y]] return xp, yp if", "\"\"\" x, y, z = np.asarray(x), np.asarray(y), np.asarray(z) if x.ndim", "fig.canvas.start_event_loop(timeout=timeout) fig.canvas.mpl_disconnect(cid) return np.array(xy) def common_axes(fig, pos=None): if pos is", "None: bigax = fig.add_subplot(111) else: bigax = fig.add_axes(pos) [bigax.spines[s].set_visible(False) for", "if fig is None: fig = plt.gcf() xy = []", "ld = l1 - l0 l = ld*frac + l0", "down to the dec=0 plane for all but the sun", "or 'figure'. If data coordinates are requested and the data", "else: xy.append([event.xdata, event.ydata]) print(\"Gathering coordinates of mouse clicks. Click outside", "coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w", "labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun'))) #", "xlim = np.log10(xlim) if ax.get_yscale() == 'log': ylim = np.log10(ylim)", "through more linestyles than come with matplotlib # consider ussing", "the sun lines = [] for x1, y1, z1 in", "= ax.dataLim.extents[[0,2]] axlim = ax.get_xlim() scale = ax.get_xscale() ax.set_xlim(newlim_either(datalim,axlim,scale)) if", "interpolation='nearest', aspect='auto', **kw) plt.axis('tight') def errorpoly(x, y, yerr, fmt=None, ecolor=None,", "mlab from color.maps import true_temp n = len(ra) dec, ra", "scale=size) ## add translucent dec=0 surface n = 101 t", "fullwidth = 10.0 halfwidth = 5.0 # use these with", "\"\"\" from __future__ import division, print_function, absolute_import import numpy as", "errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw): if ax", "[5]] def click_coords(fig=None, timeout=600.): if fig is None: fig =", "figure=fig) rtxt = '{:.1f} pc'.format(r) orientation=np.array([180.0, 180.0, 0.0]) mlab.text3d(r, 0,", "== 'axes': return 1.0/w_ax_pts, 1.0/h_ax_pts if coordinate == 'data': xlim", "T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))", "the camera view mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig) if view is", "y = h * np.sin(ra) # make figure fig =", "list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0))) r, T, labels", "May 30 17:15:27 2014 @author: Parke \"\"\" from __future__ import", "# plot labels size = r_factor * txt_scale * 0.75", "tight_axis_limits(ax=None, xory='both', margin=0.05): if ax is None: ax = plt.gca()", "ra = dec*np.pi/180.0, ra*np.pi/180.0 makearr = lambda v: np.array([v] *", "= ax.get_xlim() inrange = mnp.inranges(x, xlim) if not np.all(inrange): n", "+ yc**2) yoff = np.sqrt(1.0 - xoff**2) zoff = 0.0", "yoff, zz + zoff, label, figure=fig, color=(1,1,1), scale=size) ## add", "errneg is not None: xn = xl - np.log10(x -", "plotted on a log scale, then the factor will be" ]
[ "dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self, request, *args,", "# site.updated_by = self.request.user # site.date_updated = timezone.now() power.save() #", "HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character success_message = \"Character", "\\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url())", "= 'marvel_world/comic_new.html' # fields = '__all__' <-- superseded by form_class", "if form.is_valid(): character = form.save(commit=False) character.save() for power in form.cleaned_data['super_power']:", "**kwargs) def post(self, request): form = ComicForm(request.POST) if form.is_valid(): comic", "context_object_name= 'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model", "*args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): power =", "@method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model = Comic form_class = ComicForm", ".models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView from .filters import", "Character form_class = CharacterForm success_message = \"Character created successfully\" template_name", "continue else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete old unmatched", "*args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): comic =", "= timezone.now() comic.save() # Current country_area_id values linked to site", "from django.urls import reverse,reverse_lazy def index(request): return HttpResponse(\"Hello, world. You're", "# fields = '__all__' <-- superseded by form_class context_object_name =", "character.character_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterComic.objects \\", "success_message = \"Super power created successfully\" template_name = 'marvel_world/power_new.html' #", "PowerDeleteView(generic.DeleteView): model =Power success_message = \"Super power deleted successfully\" success_url", "= Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model =", "for old_id1 in old_ids1: if old_id1 in new_ids1: continue else:", "successfully\" template_name = 'marvel_world/character_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "context_object_name= 'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model", "self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id)", "class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch')", "= CharacterForm() return render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class", "ids new_ids = [] # Insert new unmatched country entries", "50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model", "entries for power in new_powers: new_id = power.power_id new_ids.append(new_id) if", "form.save(commit=False) # site.updated_by = self.request.user # site.date_updated = timezone.now() character.save()", "if new_id in old_ids: continue else: CharacterPower.objects \\ .create(character=character, power=power)", "form_class context_object_name = 'power' # pk_url_kwarg = 'site_pk' success_message =", "django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from .models import", "HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power success_message = \"Super", "@method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name = 'marvel_world/character_filter.html'", "from django.utils.decorators import method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views", "power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required,", "return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView):", "power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character,", "pk_url_kwarg = 'site_pk' success_message = \"Super power updated successfully\" template_name", "= \"Super power created successfully\" template_name = 'marvel_world/power_new.html' # fields", "timezone.now() power.save() # Current country_area_id values linked to site old_ids", "= character.character_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterPower.objects", "class PowerListView(generic.ListView): model = Power context_object_name = 'powers' template_name =", "values linked to site old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id)", "Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model = Power context_object_name= 'power'", "= Comic form_class = ComicForm # fields = '__all__' <--", "from django.shortcuts import render,redirect from django.http import HttpResponse,HttpResponseRedirect from django.views", "**kwargs): return super().dispatch(*args, **kwargs) def post(self, request): form = PowerForm(request.POST)", "class PowerCreateView(generic.View): model = Power form_class = PowerForm success_message =", "class CharacterDeleteView(generic.DeleteView): model =Character success_message = \"Character deleted successfully\" success_url", "{'form': form}) def get(self, request): form = CharacterForm() return render(request,", "'__all__' <-- superseded by form_class context_object_name = 'character' # pk_url_kwarg", "CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch')", "@method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model = Comic form_class = ComicForm", "comic in new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1) if new_id1 in", "= ComicForm() return render(request, 'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model =", "**kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\", "template_name = 'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "success_message = \"Comic deleted successfully\" success_url = reverse_lazy('comics') context_object_name =", "form = ComicForm() return render(request, 'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model", "site.updated_by = self.request.user # site.date_updated = timezone.now() character.save() # Current", "old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New countries list", "form_class context_object_name = 'comic' # pk_url_kwarg = 'site_pk' success_message =", "entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\", "'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model =", "New ids new_ids1 = [] # Insert new unmatched country", "new_id in old_ids: continue else: CharacterPower.objects \\ .create(character=character, power=power) #", "'power' # pk_url_kwarg = 'site_pk' success_message = \"Super power updated", "success_message = \"Super power deleted successfully\" success_url = reverse_lazy('super_power') context_object_name", "request): form = CharacterForm(request.POST) if form.is_valid(): character = form.save(commit=False) character.save()", "'marvel_world/power_new.html', {'form': form}) def get(self, request): form = PowerForm() return", "self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete()", "self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power success_message", "for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character) # shortcut", "name='dispatch') class PowerCreateView(generic.View): model = Power form_class = PowerForm success_message", "= 'powers' template_name = 'marvel_world/super_power.html' paginate_by = 50 def get_queryset(self):", "new_ids = [] # Insert new unmatched country entries for", "class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model", "in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic)", "\\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class", "country_area_id values linked to site old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\", "@method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character success_message = \"Character deleted", "= CharacterForm # fields = '__all__' <-- superseded by form_class", "'comic' # pk_url_kwarg = 'site_pk' success_message = \"Comic updated successfully\"", "def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model =", "comic.save() # Current country_area_id values linked to site old_ids =", "character = form.save(commit=False) character.save() for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power)", "continue else: CharacterPower.objects \\ .create(character=character, power=power) # Delete old unmatched", "CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete()", "HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url())", "# shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request,", "= 'power' template_name = 'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs): return", "CharacterPower.objects.create(character=character, power=power) return redirect(power) # shortcut to object's get_absolute_url() #", "model = Character context_object_name= 'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch')", "paginate_by = 50 def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class", "HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form': form}) def get(self, request): form", "return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power success_message =", "@method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model = Power form_class = PowerForm", "def form_valid(self, form): character = form.save(commit=False) # site.updated_by = self.request.user", "= Power form_class = PowerForm # fields = '__all__' <--", "return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model = Power form_class", "'comic' template_name = 'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power) # shortcut to", ".create(character=character, comic=comic) # Delete old unmatched country entries for old_id1", "site.date_updated = timezone.now() comic.save() # Current country_area_id values linked to", "world super hero\") class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView):", "updated successfully\" template_name = 'marvel_world/power_update.html' def dispatch(self, *args, **kwargs): return", "def form_valid(self, form): comic = form.save(commit=False) # site.updated_by = self.request.user", "can these loops be refactored? # New ids new_ids =", "get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form': form}) def", "form.save(commit=False) comic.save() for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic)", "success_message = \"Character updated successfully\" template_name = 'marvel_world/character_update.html' def dispatch(self,", "values linked to site old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id)", "comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character) # shortcut to", "return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model = Character context_object_name=", "redirect(character) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return", "'marvel_world/character_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self,", "CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url()) # return", "render,redirect from django.http import HttpResponse,HttpResponseRedirect from django.views import generic from", "power.save() for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power) #", "*args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): character =", "= self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\", "comic=comic) # Delete old unmatched country entries for old_id in", "form.is_valid(): power = form.save(commit=False) power.save() for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character,", "= 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class", "Insert new unmatched country entries for comic in new_comics: new_id1", "= \"Character updated successfully\" template_name = 'marvel_world/character_update.html' def dispatch(self, *args,", "CharacterDeleteView(generic.DeleteView): model =Character success_message = \"Character deleted successfully\" success_url =", "def form_valid(self, form): power = form.save(commit=False) # site.updated_by = self.request.user", "'site_pk' success_message = \"Comic updated successfully\" template_name = 'marvel_world/comic_update.html' def", "@method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name = 'marvel_world/comic_filter.html'", "= 50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView):", "return super().dispatch(*args, **kwargs) def form_valid(self, form): character = form.save(commit=False) #", "[] # Insert new unmatched country entries for power in", "form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power) # shortcut to object's get_absolute_url()", "= 50 def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView):", "for comic in new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1) if new_id1", "= comic.comic_id new_ids1.append(new_id1) if new_id1 in old_ids1: continue else: CharacterComic.objects", "template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model = Power", "\"Super power deleted successfully\" success_url = reverse_lazy('super_power') context_object_name = 'power'", "= reverse_lazy('characters') context_object_name = 'character' template_name = 'marvel_world/character_delete.html' def dispatch(self,", "site.updated_by = self.request.user # site.date_updated = timezone.now() power.save() # Current", "render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model =", "form_valid(self, form): power = form.save(commit=False) # site.updated_by = self.request.user #", "pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model = Comic form_class =", "return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form': form}) def get(self, request):", "context_object_name = 'comic' template_name = 'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs):", "form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model = Comic form_class =", "name='dispatch') class ComicDetailView(generic.DetailView): model = Comic context_object_name= 'comic' template_name =", "return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic success_message =", "deleted successfully\" success_url = reverse_lazy('comics') context_object_name = 'comic' template_name =", "= Character form_class = CharacterForm success_message = \"Character created successfully\"", "**kwargs): return super().dispatch(*args, **kwargs) def post(self, request): form = ComicForm(request.POST)", "Power context_object_name = 'powers' template_name = 'marvel_world/super_power.html' paginate_by = 50", "to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form':", "request): form = PowerForm() return render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required,", "class ComicDetailView(generic.DetailView): model = Comic context_object_name= 'comic' template_name = 'marvel_world/comic_information.html'", "\\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\", "in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete()", "= form.cleaned_data['character'] # TODO can these loops be refactored? #", "name='dispatch') class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required,", "get(self, request): form = PowerForm() return render(request, 'marvel_world/power_new.html', {'form': form})", "old_id1 in old_ids1: if old_id1 in new_ids1: continue else: CharacterComic.objects", "reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def post(self,", "old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New countries list", "= \"Comic deleted successfully\" success_url = reverse_lazy('comics') context_object_name = 'comic'", "**kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): comic = form.save(commit=False)", "form_class = CharacterForm success_message = \"Character created successfully\" template_name =", "=Character success_message = \"Character deleted successfully\" success_url = reverse_lazy('characters') context_object_name", "from .models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView from .filters", "CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New countries list new_chs =", "created successfully\" template_name = 'marvel_world/power_new.html' # fields = '__all__' <--", "template_name = 'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "context_object_name = 'power' # pk_url_kwarg = 'site_pk' success_message = \"Super", "New ids new_ids = [] # Insert new unmatched country", "name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power success_message = \"Super power deleted", "if new_id1 in old_ids1: continue else: CharacterComic.objects \\ .create(character=character, comic=comic)", "unmatched country entries for old_id in old_ids: if old_id in", "template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model = Comic", "Comic form_class = ComicForm # fields = '__all__' <-- superseded", "CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) # shortcut to object's get_absolute_url() #", "form): comic = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated", "\\ .create(character=character, power=power) # Delete old unmatched country entries for", "= 'characters' template_name = 'marvel_world/characters.html' paginate_by = 50 def get_queryset(self):", "@method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model = Power context_object_name = 'powers'", "fields = '__all__' <-- superseded by form_class context_object_name = 'character'", "= 'marvel_world/character_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "form.save(commit=False) # site.updated_by = self.request.user # site.date_updated = timezone.now() power.save()", "You're at the marvel world super hero\") class AboutPageView(generic.TemplateView): template_name", "return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character success_message =", "return redirect(comic) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url())", "else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1 = CharacterComic.objects\\", "new_ids1 = [] # Insert new unmatched country entries for", ".delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power", "name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character success_message = \"Character deleted successfully\"", "new_ids1: continue else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return", ".filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView):", "for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) # shortcut", "'comics' template_name = 'marvel_world/comics.html' paginate_by = 600 def get_queryset(self): return", "model = Power context_object_name = 'powers' template_name = 'marvel_world/super_power.html' paginate_by", "new_ids: continue else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return", "= reverse_lazy('super_power') context_object_name = 'power' template_name = 'marvel_world/power_delete.html' def dispatch(self,", "old unmatched country entries for old_id in old_ids: if old_id", "HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model = Power form_class =", "class PowerUpdateView(generic.UpdateView): model = Power form_class = PowerForm # fields", "reverse_lazy('characters') context_object_name = 'character' template_name = 'marvel_world/character_delete.html' def dispatch(self, *args,", "= form.cleaned_data['comics'] # TODO can these loops be refactored? #", "class ComicListView(generic.ListView): model = Comic context_object_name = 'comics' template_name =", "name='dispatch') class CharacterCreateView(generic.View): model = Character form_class = CharacterForm success_message", "template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter", "object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form': form})", "=Comic success_message = \"Comic deleted successfully\" success_url = reverse_lazy('comics') context_object_name", "PowerCreateView(generic.View): model = Power form_class = PowerForm success_message = \"Super", "= form.save(commit=False) character.save() for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for", "power deleted successfully\" success_url = reverse_lazy('super_power') context_object_name = 'power' template_name", "successfully\" template_name = 'marvel_world/comic_new.html' # fields = '__all__' <-- superseded", "\\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail',", "def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def post(self, request):", "continue else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url())", "new_ids: continue else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1", "# Insert new unmatched country entries for comic in new_comics:", "form = CharacterForm() return render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch')", "CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_powers =", "updated successfully\" template_name = 'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs): return", "new_ids1.append(new_id1) if new_id1 in old_ids1: continue else: CharacterComic.objects \\ .create(character=character,", "= 'comics' template_name = 'marvel_world/comics.html' paginate_by = 600 def get_queryset(self):", "= PowerForm # fields = '__all__' <-- superseded by form_class", "new_ids.append(new_id) if new_id in old_ids: continue else: CharacterComic.objects \\ .create(character=character,", "'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self,", "'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model = Character form_class =", "**kwargs) def form_valid(self, form): comic = form.save(commit=False) # site.updated_by =", "# return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form': form}) def get(self,", ".filter(character_id=character.character_id) # New countries list new_powers = form.cleaned_data['super_power'] # TODO", "new_chs: new_id = character.character_id new_ids.append(new_id) if new_id in old_ids: continue", "else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required,", "success_message = \"Comic created successfully\" template_name = 'marvel_world/comic_new.html' # fields", "'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model = Power context_object_name =", "Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model =", "old_ids1: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete old", "country_area_id values linked to site old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\", "ComicDetailView(generic.DetailView): model = Comic context_object_name= 'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required,", "to site old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New", "in new_chs: new_id = character.character_id new_ids.append(new_id) if new_id in old_ids:", "old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list", "template_name = 'marvel_world/character_new.html' # fields = '__all__' <-- superseded by", "= \"Character created successfully\" template_name = 'marvel_world/character_new.html' # fields =", "new unmatched country entries for character in new_chs: new_id =", "form}) def get(self, request): form = CharacterForm() return render(request, 'marvel_world/character_new.html',", "comic=comic) # Delete old unmatched country entries for old_id1 in", "old_ids: continue else: CharacterPower.objects \\ .create(character=character, power=power) # Delete old", "# site.date_updated = timezone.now() comic.save() # Current country_area_id values linked", "template_name = 'marvel_world/characters.html' paginate_by = 50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name')", "form = CharacterForm(request.POST) if form.is_valid(): character = form.save(commit=False) character.save() for", "old_id in old_ids: if old_id in new_ids: continue else: CharacterPower.objects", "return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model = Comic", "linked to site old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) #", "CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy def index(request): return HttpResponse(\"Hello, world.", "delete(self, request, *args, **kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction", "def index(request): return HttpResponse(\"Hello, world. You're at the marvel world", "form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return", "\\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model =", "values linked to site old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id)", "{'form': form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model = Power form_class", "template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter", "from django.http import HttpResponse,HttpResponseRedirect from django.views import generic from django.contrib.auth.decorators", "# return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form': form}) def get(self,", "render(request, 'marvel_world/comic_new.html', {'form': form}) def get(self, request): form = ComicForm()", "timezone.now() character.save() # Current country_area_id values linked to site old_ids", ".values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_comics = form.cleaned_data['comics']", "updated successfully\" template_name = 'marvel_world/character_update.html' def dispatch(self, *args, **kwargs): return", "class ComicDeleteView(generic.DeleteView): model =Comic success_message = \"Comic deleted successfully\" success_url", "reverse_lazy('comics') context_object_name = 'comic' template_name = 'marvel_world/comic_delete.html' def dispatch(self, *args,", "\"Comic created successfully\" template_name = 'marvel_world/comic_new.html' # fields = '__all__'", "ids new_ids1 = [] # Insert new unmatched country entries", "'powers' template_name = 'marvel_world/super_power.html' paginate_by = 50 def get_queryset(self): return", "form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character) # shortcut to object's get_absolute_url()", "country entries for old_id1 in old_ids1: if old_id1 in new_ids1:", "CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch')", "return render(request, 'marvel_world/character_new.html', {'form': form}) def get(self, request): form =", "flat=True)\\ .filter(power_id=power.power_id) # New countries list new_chs = form.cleaned_data['character'] #", "\\ .create(character=character, comic=comic) # Delete old unmatched country entries for", "model = Power context_object_name= 'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch')", "power updated successfully\" template_name = 'marvel_world/power_update.html' def dispatch(self, *args, **kwargs):", "'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self,", "self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id)", "= PowerForm() return render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class", "class ComicCreateView(generic.View): model = Comic form_class = ComicForm success_message =", "success_message = \"Comic updated successfully\" template_name = 'marvel_world/comic_update.html' def dispatch(self,", ".filter(power_id=power.power_id) # New countries list new_chs = form.cleaned_data['character'] # TODO", "new_comics = form.cleaned_data['comics'] # TODO can these loops be refactored?", "\\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model", "self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic success_message", "= power.power_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterPower.objects", "PowerForm # fields = '__all__' <-- superseded by form_class context_object_name", "success_url = reverse_lazy('super_power') context_object_name = 'power' template_name = 'marvel_world/power_delete.html' def", "in new_ids1: continue else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete()", "import Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy", "ComicForm success_message = \"Comic created successfully\" template_name = 'marvel_world/comic_new.html' #", "HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model =", "'character' # pk_url_kwarg = 'site_pk' success_message = \"Character updated successfully\"", "successfully\" success_url = reverse_lazy('characters') context_object_name = 'character' template_name = 'marvel_world/character_delete.html'", "Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class =", "Current country_area_id values linked to site old_ids = CharacterPower.objects\\ .values_list('character_id',", "= 'marvel_world/power_new.html' # fields = '__all__' <-- superseded by form_class", "super().dispatch(*args, **kwargs) def form_valid(self, form): character = form.save(commit=False) # site.updated_by", "superseded by form_class context_object_name = 'comic' # pk_url_kwarg = 'site_pk'", "= 'marvel_world/characters.html' paginate_by = 50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required,", "# Insert new unmatched country entries for character in new_chs:", "= form.save(commit=False) # site.updated_by = self.request.user # site.date_updated = timezone.now()", "post(self, request): form = CharacterForm(request.POST) if form.is_valid(): character = form.save(commit=False)", "= Comic context_object_name = 'comics' template_name = 'marvel_world/comics.html' paginate_by =", "character in new_chs: new_id = character.character_id new_ids.append(new_id) if new_id in", "if old_id in new_ids: continue else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id)", "CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch')", "success_message = \"Character deleted successfully\" success_url = reverse_lazy('characters') context_object_name =", "get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model = Comic", "power = form.save(commit=False) power.save() for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power)", "class ComicUpdateView(generic.UpdateView): model = Comic form_class = ComicForm # fields", "form.save(commit=False) character.save() for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic", "[] # Insert new unmatched country entries for character in", "**kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterComic.objects \\", "name='dispatch') class ComicUpdateView(generic.UpdateView): model = Comic form_class = ComicForm #", "template_name = 'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "country_area_id values linked to site old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\", "'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model = Character context_object_name =", "'characters' template_name = 'marvel_world/characters.html' paginate_by = 50 def get_queryset(self): return", "by form_class context_object_name = 'power' # pk_url_kwarg = 'site_pk' success_message", "class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html'", "return super().dispatch(*args, **kwargs) def post(self, request): form = PowerForm(request.POST) if", "form_class context_object_name = 'character' # pk_url_kwarg = 'site_pk' success_message =", "context_object_name = 'characters' template_name = 'marvel_world/characters.html' paginate_by = 50 def", "import HttpResponse,HttpResponseRedirect from django.views import generic from django.contrib.auth.decorators import login_required", "Current country_area_id values linked to site old_ids = CharacterPower.objects\\ .values_list('power_id',", "to site old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New", "get(self, request): form = ComicForm() return render(request, 'marvel_world/comic_new.html', {'form': form})", "Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\", "\\ .delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New", "in old_ids: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete", "template_name = 'marvel_world/power_new.html' # fields = '__all__' <-- superseded by", "HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model", "CharacterForm(request.POST) if form.is_valid(): character = form.save(commit=False) character.save() for power in", "@method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model = Character form_class = CharacterForm", "'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model = Comic", "countries list new_comics = form.cleaned_data['comics'] # TODO can these loops", "form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model = Power form_class =", "CharacterForm # fields = '__all__' <-- superseded by form_class context_object_name", "new_powers = form.cleaned_data['super_power'] # TODO can these loops be refactored?", "power=power) for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character) #", "new_powers: new_id = power.power_id new_ids.append(new_id) if new_id in old_ids: continue", ".delete() return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class", "name='dispatch') class CharacterDetailView(generic.DetailView): model = Character context_object_name= 'character' template_name =", "comic.comic_id new_ids1.append(new_id1) if new_id1 in old_ids1: continue else: CharacterComic.objects \\", "class CharacterCreateView(generic.View): model = Character form_class = CharacterForm success_message =", "request): form = ComicForm(request.POST) if form.is_valid(): comic = form.save(commit=False) comic.save()", "CharacterCreateView(generic.View): model = Character form_class = CharacterForm success_message = \"Character", "created successfully\" template_name = 'marvel_world/character_new.html' # fields = '__all__' <--", "= PowerForm success_message = \"Super power created successfully\" template_name =", "= 'marvel_world/super_power.html' paginate_by = 50 def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required,", "'__all__' <-- superseded by form_class context_object_name = 'comic' # pk_url_kwarg", "get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model = Character", "comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model", "old_ids: if old_id in new_ids: continue else: CharacterComic.objects \\ .filter(character_id=old_id,", "character.save() # Current country_area_id values linked to site old_ids =", "successfully\" success_url = reverse_lazy('comics') context_object_name = 'comic' template_name = 'marvel_world/comic_delete.html'", "= 'site_pk' success_message = \"Super power updated successfully\" template_name =", "class CharacterUpdateView(generic.UpdateView): model = Character form_class = CharacterForm # fields", "dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): character", "= \"Character deleted successfully\" success_url = reverse_lazy('characters') context_object_name = 'character'", "= reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "model = Comic form_class = ComicForm success_message = \"Comic created", "template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch')", "timezone.now() comic.save() # Current country_area_id values linked to site old_ids", "\"Super power updated successfully\" template_name = 'marvel_world/power_update.html' def dispatch(self, *args,", "# Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete()", "{'form': form}) def get(self, request): form = PowerForm() return render(request,", "= 'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "form_class = PowerForm # fields = '__all__' <-- superseded by", "model =Comic success_message = \"Comic deleted successfully\" success_url = reverse_lazy('comics')", "'marvel_world/characters.html' paginate_by = 50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch')", "TODO can these loops be refactored? # New ids new_ids1", "=Power success_message = \"Super power deleted successfully\" success_url = reverse_lazy('super_power')", "form): character = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated", "\\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class", "= self.get_object() # Delete HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id) \\", "new_ids.append(new_id) if new_id in old_ids: continue else: CharacterPower.objects \\ .create(character=character,", "FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm from", "these loops be refactored? # New ids new_ids1 = []", ".filters import Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm from django.urls import", ".delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model = Power", "render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model =", "template_name = 'marvel_world/comics.html' paginate_by = 600 def get_queryset(self): return Comic.objects.all().order_by('comic_name')", "site.date_updated = timezone.now() power.save() # Current country_area_id values linked to", "@method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model = Comic context_object_name= 'comic' template_name", "ComicForm(request.POST) if form.is_valid(): comic = form.save(commit=False) comic.save() for character in", "\\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class", "Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model = Character", "get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form': form}) def", "site.date_updated = timezone.now() character.save() # Current country_area_id values linked to", "refactored? # New ids new_ids1 = [] # Insert new", "context_object_name = 'character' # pk_url_kwarg = 'site_pk' success_message = \"Character", "= Character context_object_name= 'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class", "= self.request.user # site.date_updated = timezone.now() power.save() # Current country_area_id", "new_ids: continue else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete() return", "template_name = 'marvel_world/comic_new.html' # fields = '__all__' <-- superseded by", "= 'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "'marvel_world/character_new.html' # fields = '__all__' <-- superseded by form_class #", "unmatched country entries for character in new_chs: new_id = character.character_id", "'site_pk' success_message = \"Super power updated successfully\" template_name = 'marvel_world/power_update.html'", "\\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character", "= 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView): model = Power context_object_name", "countries list new_chs = form.cleaned_data['character'] # TODO can these loops", "def post(self, request): form = PowerForm(request.POST) if form.is_valid(): power =", "return render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model", "deleted successfully\" success_url = reverse_lazy('characters') context_object_name = 'character' template_name =", "= reverse_lazy('comics') context_object_name = 'comic' template_name = 'marvel_world/comic_delete.html' def dispatch(self,", ".create(character=character, comic=comic) # Delete old unmatched country entries for old_id", "entries for character in new_chs: new_id = character.character_id new_ids.append(new_id) if", "Character context_object_name = 'characters' template_name = 'marvel_world/characters.html' paginate_by = 50", "# Delete HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id) \\ .delete() self.object.delete()", "old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list", "def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self, request,", "superseded by form_class # success_url = reverse_lazy('heritagesites/site_list') def dispatch(self, *args,", "model = Comic context_object_name= 'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch')", "Insert new unmatched country entries for power in new_powers: new_id", "super hero\") class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name", "'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model = Character form_class =", "dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def post(self, request): form", "successfully\" template_name = 'marvel_world/power_new.html' # fields = '__all__' <-- superseded", "= [] # Insert new unmatched country entries for power", ".values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New countries list new_chs = form.cleaned_data['character']", "Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy def", "= CharacterForm(request.POST) if form.is_valid(): character = form.save(commit=False) character.save() for power", ".values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New countries list new_chs = form.cleaned_data['character']", "superseded by form_class context_object_name = 'power' # pk_url_kwarg = 'site_pk'", ".delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic", "django.utils.decorators import method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import", "context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model = Character form_class", "old_ids: if old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=character.character_id,", "post(self, request): form = ComicForm(request.POST) if form.is_valid(): comic = form.save(commit=False)", "comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model", "entries for old_id1 in old_ids1: if old_id1 in new_ids1: continue", "from django_filters.views import FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter from .forms", "import generic from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator", "old_id in old_ids: if old_id in new_ids: continue else: CharacterComic.objects", "@method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model = Character form_class = CharacterForm", "{'form': form}) #class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch')", "# Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects", "# site.date_updated = timezone.now() power.save() # Current country_area_id values linked", "continue else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url())", "form_valid(self, form): comic = form.save(commit=False) # site.updated_by = self.request.user #", "pk_url_kwarg = 'site_pk' success_message = \"Character updated successfully\" template_name =", "'marvel_world/super_power.html' paginate_by = 50 def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch')", "template_name = 'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name =", "CharacterListView(generic.ListView): model = Character context_object_name = 'characters' template_name = 'marvel_world/characters.html'", "success_url = reverse_lazy('comics') context_object_name = 'comic' template_name = 'marvel_world/comic_delete.html' def", "self.request.user # site.date_updated = timezone.now() character.save() # Current country_area_id values", "return redirect(power) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url())", "name='dispatch') class PowerListView(generic.ListView): model = Power context_object_name = 'powers' template_name", "form}) def get(self, request): form = PowerForm() return render(request, 'marvel_world/power_new.html',", "Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return", "= Character form_class = CharacterForm # fields = '__all__' <--", "# pk_url_kwarg = 'site_pk' success_message = \"Super power updated successfully\"", "comic=comic) return redirect(comic) # shortcut to object's get_absolute_url() # return", "{'form': form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model = Comic form_class", "= \"Super power deleted successfully\" success_url = reverse_lazy('super_power') context_object_name =", "old_id in new_ids: continue else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\", "for power in new_powers: new_id = power.power_id new_ids.append(new_id) if new_id", "= self.request.user # site.date_updated = timezone.now() comic.save() # Current country_area_id", "Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model = Comic context_object_name= 'comic'", "'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required,", "linked to site old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) #", "def delete(self, request, *args, **kwargs): self.object = self.get_object() # Delete", "*args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self, request, *args, **kwargs):", "character.character_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterPower.objects \\", "template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model = Character", "= CharacterForm success_message = \"Character created successfully\" template_name = 'marvel_world/character_new.html'", ".filter(character_id=character.character_id) # New countries list new_comics = form.cleaned_data['comics'] # TODO", "= CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New countries list new_chs", "'marvel_world/comic_new.html', {'form': form}) def get(self, request): form = ComicForm() return", "class CharacterListView(generic.ListView): model = Character context_object_name = 'characters' template_name =", "= Power form_class = PowerForm success_message = \"Super power created", "unmatched country entries for comic in new_comics: new_id1 = comic.comic_id", "name='dispatch') class PowerUpdateView(generic.UpdateView): model = Power form_class = PowerForm #", "AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required,", "= character.character_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterComic.objects", "reverse,reverse_lazy def index(request): return HttpResponse(\"Hello, world. You're at the marvel", "template_name = 'marvel_world/character_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "power = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated =", "in old_ids: if old_id in new_ids: continue else: CharacterComic.objects \\", "if old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id)", "def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model =", ".delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView): model =Character success_message", "def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model =", "'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self,", "= 'comic' template_name = 'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs): return", "PowerDetailView(generic.DetailView): model = Power context_object_name= 'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required,", "in old_ids: continue else: CharacterPower.objects \\ .create(character=character, power=power) # Delete", "get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form': form}) def", "render(request, 'marvel_world/character_new.html', {'form': form}) def get(self, request): form = CharacterForm()", "new_id1 in old_ids1: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) #", "self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id)", "form.cleaned_data['character'] # TODO can these loops be refactored? # New", "shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html',", "CharacterPower.objects \\ .create(character=character, power=power) # Delete old unmatched country entries", "\"Character deleted successfully\" success_url = reverse_lazy('characters') context_object_name = 'character' template_name", "old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\", "\"Super power created successfully\" template_name = 'marvel_world/power_new.html' # fields =", "success_message = \"Super power updated successfully\" template_name = 'marvel_world/power_update.html' def", "Comic form_class = ComicForm success_message = \"Comic created successfully\" template_name", "CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class", "form_class # success_url = reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs): return", "power=power) # Delete old unmatched country entries for old_id in", "TODO can these loops be refactored? # New ids new_ids", "entries for comic in new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1) if", "*args, **kwargs): return super().dispatch(*args, **kwargs) def post(self, request): form =", ".filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView):", "CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New countries list new_chs =", "return super().dispatch(*args, **kwargs) def delete(self, request, *args, **kwargs): self.object =", "in new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1) if new_id1 in old_ids1:", "name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic success_message = \"Comic deleted successfully\"", "# Delete old unmatched country entries for old_id1 in old_ids1:", "dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): power", "else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required,", "import CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy def index(request): return HttpResponse(\"Hello,", "template_name = 'marvel_world/power_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "successfully\" success_url = reverse_lazy('super_power') context_object_name = 'power' template_name = 'marvel_world/power_delete.html'", "name='dispatch') class CharacterUpdateView(generic.UpdateView): model = Character form_class = CharacterForm #", "in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete()", "= Comic context_object_name= 'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class", "comic = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated =", "post(self, request): form = PowerForm(request.POST) if form.is_valid(): power = form.save(commit=False)", "redirect(power) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return", "success_message = \"Character created successfully\" template_name = 'marvel_world/character_new.html' # fields", "form.is_valid(): character = form.save(commit=False) character.save() for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character,", "= CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_comics", "model = Character form_class = CharacterForm success_message = \"Character created", "power=power) return redirect(power) # shortcut to object's get_absolute_url() # return", "'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def delete(self,", "@method_decorator(login_required, name='dispatch') class PowerUpdateView(generic.UpdateView): model = Power form_class = PowerForm", "@method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model =Power success_message = \"Super power", "model = Power form_class = PowerForm success_message = \"Super power", "= 'character' # pk_url_kwarg = 'site_pk' success_message = \"Character updated", "\"Comic deleted successfully\" success_url = reverse_lazy('comics') context_object_name = 'comic' template_name", "= Power context_object_name = 'powers' template_name = 'marvel_world/super_power.html' paginate_by =", "= '__all__' <-- superseded by form_class context_object_name = 'comic' #", "# site.date_updated = timezone.now() character.save() # Current country_area_id values linked", "successfully\" template_name = 'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete()", "import method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView", "Insert new unmatched country entries for character in new_chs: new_id", "success_url = reverse_lazy('characters') context_object_name = 'character' template_name = 'marvel_world/character_delete.html' def", "'marvel_world/comics.html' paginate_by = 600 def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch')", "to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form':", "Delete old unmatched country entries for old_id1 in old_ids1: if", "PowerListView(generic.ListView): model = Power context_object_name = 'powers' template_name = 'marvel_world/super_power.html'", ".forms import CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy def index(request): return", "# New countries list new_powers = form.cleaned_data['super_power'] # TODO can", "by form_class context_object_name = 'character' # pk_url_kwarg = 'site_pk' success_message", "form_class = PowerForm success_message = \"Super power created successfully\" template_name", "world. You're at the marvel world super hero\") class AboutPageView(generic.TemplateView):", "<-- superseded by form_class context_object_name = 'comic' # pk_url_kwarg =", "model =Power success_message = \"Super power deleted successfully\" success_url =", "return super().dispatch(*args, **kwargs) def form_valid(self, form): power = form.save(commit=False) #", "# pk_url_kwarg = 'site_pk' success_message = \"Character updated successfully\" template_name", "if old_id1 in new_ids1: continue else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1)", "import FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm", "character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) # shortcut to", "for old_id in old_ids: if old_id in new_ids: continue else:", "country entries for comic in new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1)", "refactored? # New ids new_ids = [] # Insert new", "return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model = Comic context_object_name=", "Power context_object_name= 'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView):", "'__all__' <-- superseded by form_class # success_url = reverse_lazy('heritagesites/site_list') def", "CharacterForm() return render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View):", "form}) def get(self, request): form = ComicForm() return render(request, 'marvel_world/comic_new.html',", "= 'marvel_world/comic_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "context_object_name = 'power' template_name = 'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs):", "form.cleaned_data['super_power'] # TODO can these loops be refactored? # New", "Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model = Character context_object_name= 'character'", "CharacterPower.objects.create(character=character, power=power) for comic in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character)", "return super().dispatch(*args, **kwargs) def form_valid(self, form): comic = form.save(commit=False) #", "django_filters.views import FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter from .forms import", "template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model = Character", "# fields = '__all__' <-- superseded by form_class # success_url", "'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model = Comic context_object_name =", "Power form_class = PowerForm # fields = '__all__' <-- superseded", "**kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): character = form.save(commit=False)", "def get(self, request): form = ComicForm() return render(request, 'marvel_world/comic_new.html', {'form':", "from django.views import generic from django.contrib.auth.decorators import login_required from django.utils.decorators", "in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power) # shortcut to object's", "context_object_name = 'comic' # pk_url_kwarg = 'site_pk' success_message = \"Comic", "import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter", "PowerForm(request.POST) if form.is_valid(): power = form.save(commit=False) power.save() for character in", "HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form': form}) def get(self, request): form", "country entries for old_id in old_ids: if old_id in new_ids:", "# Current country_area_id values linked to site old_ids = CharacterComic.objects\\", "{'form': form}) def get(self, request): form = ComicForm() return render(request,", "if old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id)", "= 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model = Character context_object_name", "form.is_valid(): comic = form.save(commit=False) comic.save() for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character,", "return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model = Power context_object_name=", "CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url()) @method_decorator(login_required, name='dispatch')", "= Comic form_class = ComicForm success_message = \"Comic created successfully\"", "'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name =", "'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class PowerCreateView(generic.View): model = Power", "# New countries list new_comics = form.cleaned_data['comics'] # TODO can", "list new_chs = form.cleaned_data['character'] # TODO can these loops be", "dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): comic", "flat=True)\\ .filter(character_id=character.character_id) # New countries list new_comics = form.cleaned_data['comics'] #", "unmatched country entries for old_id1 in old_ids1: if old_id1 in", "\\ .delete() return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch')", "= self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\", "= CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_powers", "HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html', {'form': form}) def get(self, request): form", "model = Power form_class = PowerForm # fields = '__all__'", "name='dispatch') class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required,", "django.views import generic from django.contrib.auth.decorators import login_required from django.utils.decorators import", "context_object_name = 'character' template_name = 'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs):", "countries list new_powers = form.cleaned_data['super_power'] # TODO can these loops", "= CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New countries list new_chs", "from .forms import CharacterForm,PowerForm,ComicForm from django.urls import reverse,reverse_lazy def index(request):", "class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch')", "paginate_by = 50 def get_queryset(self): return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name') @method_decorator(login_required, name='dispatch') class", "= 'site_pk' success_message = \"Character updated successfully\" template_name = 'marvel_world/character_update.html'", "@method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic success_message = \"Comic deleted", "HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url())", "'marvel_world/power_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self,", "return render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View): model", "new_id = power.power_id new_ids.append(new_id) if new_id in old_ids: continue else:", "form_valid(self, form): character = form.save(commit=False) # site.updated_by = self.request.user #", "= 'marvel_world/comics.html' paginate_by = 600 def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required,", "from .filters import Marvel_worldFilter,Marvel_comicFilter from .forms import CharacterForm,PowerForm,ComicForm from django.urls", "= 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class = Marvel_worldFilter template_name", "PowerForm success_message = \"Super power created successfully\" template_name = 'marvel_world/power_new.html'", "successfully\" template_name = 'marvel_world/power_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "= 'marvel_world/power_update.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "created successfully\" template_name = 'marvel_world/comic_new.html' # fields = '__all__' <--", "HttpResponse(\"Hello, world. You're at the marvel world super hero\") class", "context_object_name= 'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class", "form_class = ComicForm success_message = \"Comic created successfully\" template_name =", "fields = '__all__' <-- superseded by form_class context_object_name = 'power'", "CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView): model", "filterset_class = Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView):", "# success_url = reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "# Insert new unmatched country entries for power in new_powers:", "= Power context_object_name= 'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class", "= form.save(commit=False) comic.save() for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return", "by form_class # success_url = reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs):", "# site.updated_by = self.request.user # site.date_updated = timezone.now() character.save() #", "New countries list new_chs = form.cleaned_data['character'] # TODO can these", "'power' template_name = 'marvel_world/super_power_information.html' @method_decorator(login_required, name='dispatch') class CharacterFilterView(FilterView): filterset_class =", "power created successfully\" template_name = 'marvel_world/power_new.html' # fields = '__all__'", "in old_ids1: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete", "old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\", ".filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete() return HttpResponseRedirect(comic.get_absolute_url()) @method_decorator(login_required, name='dispatch') class CharacterDeleteView(generic.DeleteView):", "super().dispatch(*args, **kwargs) def delete(self, request, *args, **kwargs): self.object = self.get_object()", "return HttpResponse(\"Hello, world. You're at the marvel world super hero\")", "def post(self, request): form = ComicForm(request.POST) if form.is_valid(): comic =", "paginate_by = 600 def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class", "\\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class PowerDeleteView(generic.DeleteView): model", "form_class = ComicForm # fields = '__all__' <-- superseded by", "def post(self, request): form = CharacterForm(request.POST) if form.is_valid(): character =", "def get(self, request): form = CharacterForm() return render(request, 'marvel_world/character_new.html', {'form':", "model = Comic context_object_name = 'comics' template_name = 'marvel_world/comics.html' paginate_by", "in new_powers: new_id = power.power_id new_ids.append(new_id) if new_id in old_ids:", "request): form = ComicForm() return render(request, 'marvel_world/comic_new.html', {'form': form}) #class", "<-- superseded by form_class # success_url = reverse_lazy('heritagesites/site_list') def dispatch(self,", "form): power = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated", "new_id in old_ids: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) #", "class PowerDetailView(generic.DetailView): model = Power context_object_name= 'power' template_name = 'marvel_world/super_power_information.html'", "**kwargs) def delete(self, request, *args, **kwargs): self.object = self.get_object() #", "name='dispatch') class ComicListView(generic.ListView): model = Comic context_object_name = 'comics' template_name", "ComicForm() return render(request, 'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model = Characters", "by form_class context_object_name = 'comic' # pk_url_kwarg = 'site_pk' success_message", "\\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class", "CharacterUpdateView(generic.UpdateView): model = Character form_class = CharacterForm # fields =", "= 'comic' # pk_url_kwarg = 'site_pk' success_message = \"Comic updated", "CharacterDetailView(generic.DetailView): model = Character context_object_name= 'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required,", "super().dispatch(*args, **kwargs) def form_valid(self, form): power = form.save(commit=False) # site.updated_by", "in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) # shortcut to object's", "fields = '__all__' <-- superseded by form_class # success_url =", "Character form_class = CharacterForm # fields = '__all__' <-- superseded", "= \"Comic created successfully\" template_name = 'marvel_world/comic_new.html' # fields =", "else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete old unmatched country", "ComicForm # fields = '__all__' <-- superseded by form_class context_object_name", "= '__all__' <-- superseded by form_class # success_url = reverse_lazy('heritagesites/site_list')", "'character' template_name = 'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "def get(self, request): form = PowerForm() return render(request, 'marvel_world/power_new.html', {'form':", "= 'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def", "object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form': form})", "comic.save() for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) #", "if form.is_valid(): power = form.save(commit=False) power.save() for character in form.cleaned_data['character']:", "hero\") class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name =", "PowerForm() return render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch') class ComicCreateView(generic.View):", "= 'site_pk' success_message = \"Comic updated successfully\" template_name = 'marvel_world/comic_update.html'", "class CharacterDetailView(generic.DetailView): model = Character context_object_name= 'character' template_name = 'marvel_world/character_information.html'", "new_chs = form.cleaned_data['character'] # TODO can these loops be refactored?", "'marvel_world/character_new.html', {'form': form}) def get(self, request): form = CharacterForm() return", "= \"Super power updated successfully\" template_name = 'marvel_world/power_update.html' def dispatch(self,", "filterset_class = Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View):", "unmatched country entries for power in new_powers: new_id = power.power_id", "power.power_id new_ids.append(new_id) if new_id in old_ids: continue else: CharacterPower.objects \\", "= Character context_object_name = 'characters' template_name = 'marvel_world/characters.html' paginate_by =", "super().dispatch(*args, **kwargs) def post(self, request): form = CharacterForm(request.POST) if form.is_valid():", ".filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView):", "Comic context_object_name = 'comics' template_name = 'marvel_world/comics.html' paginate_by = 600", "@method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model = Power context_object_name= 'power' template_name", "= Marvel_worldFilter template_name = 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class", "comic=comic) return redirect(character) # shortcut to object's get_absolute_url() # return", "'marvel_world/power_new.html' # fields = '__all__' <-- superseded by form_class #", "if form.is_valid(): comic = form.save(commit=False) comic.save() for character in form.cleaned_data['character']:", "return render(request, 'marvel_world/comic_new.html', {'form': form}) def get(self, request): form =", "site.updated_by = self.request.user # site.date_updated = timezone.now() comic.save() # Current", "in new_ids: continue else: CharacterComic.objects \\ .filter(character_id=old_id, comic_id=comic.comic_id) \\ .delete()", "= 'marvel_world/character_filter.html' @method_decorator(login_required, name='dispatch') class ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name", "pk_url_kwarg = 'site_pk' success_message = \"Comic updated successfully\" template_name =", "get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model = Power", "import login_required from django.utils.decorators import method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic", "form_class = CharacterForm # fields = '__all__' <-- superseded by", "= form.save(commit=False) power.save() for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return", "# New ids new_ids1 = [] # Insert new unmatched", "new_id1 = comic.comic_id new_ids1.append(new_id1) if new_id1 in old_ids1: continue else:", "form.save(commit=False) # site.updated_by = self.request.user # site.date_updated = timezone.now() comic.save()", "'power' template_name = 'marvel_world/power_delete.html' def dispatch(self, *args, **kwargs): return super().dispatch(*args,", "Power form_class = PowerForm success_message = \"Super power created successfully\"", "= form.cleaned_data['super_power'] # TODO can these loops be refactored? #", "site old_ids = CharacterPower.objects\\ .values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New countries", "\"Character updated successfully\" template_name = 'marvel_world/character_update.html' def dispatch(self, *args, **kwargs):", "new unmatched country entries for comic in new_comics: new_id1 =", "index(request): return HttpResponse(\"Hello, world. You're at the marvel world super", "@method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView): model = Character context_object_name = 'characters'", "context_object_name = 'powers' template_name = 'marvel_world/super_power.html' paginate_by = 50 def", "return render(request, 'marvel_world/power_new.html', {'form': form}) def get(self, request): form =", "= timezone.now() character.save() # Current country_area_id values linked to site", "power in new_powers: new_id = power.power_id new_ids.append(new_id) if new_id in", "'__all__' <-- superseded by form_class context_object_name = 'power' # pk_url_kwarg", "new_comics: new_id1 = comic.comic_id new_ids1.append(new_id1) if new_id1 in old_ids1: continue", "[] # Insert new unmatched country entries for comic in", ".filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id)", "country entries for character in new_chs: new_id = character.character_id new_ids.append(new_id)", "for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power) # shortcut", "form = ComicForm(request.POST) if form.is_valid(): comic = form.save(commit=False) comic.save() for", "@method_decorator(login_required, name='dispatch') class CharacterDetailView(generic.DetailView): model = Character context_object_name= 'character' template_name", "= ComicForm # fields = '__all__' <-- superseded by form_class", "old_ids: continue else: CharacterComic.objects \\ .create(character=character, comic=comic) # Delete old", "site old_ids = CharacterComic.objects\\ .values_list('character_id', flat=True)\\ .filter(comic_id=comic.comic_id) # New countries", "at the marvel world super hero\") class AboutPageView(generic.TemplateView): template_name =", "redirect(comic) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return", "CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_comics =", "class PowerDeleteView(generic.DeleteView): model =Power success_message = \"Super power deleted successfully\"", "continue else: CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1 =", "for character in new_chs: new_id = character.character_id new_ids.append(new_id) if new_id", "power.save() # Current country_area_id values linked to site old_ids =", "redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model = Comic form_class", "super().dispatch(*args, **kwargs) def post(self, request): form = ComicForm(request.POST) if form.is_valid():", "form = PowerForm() return render(request, 'marvel_world/power_new.html', {'form': form}) @method_decorator(login_required, name='dispatch')", "old_id1 in new_ids1: continue else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\", "model = Character context_object_name = 'characters' template_name = 'marvel_world/characters.html' paginate_by", "linked to site old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) #", "django.shortcuts import render,redirect from django.http import HttpResponse,HttpResponseRedirect from django.views import", "Current country_area_id values linked to site old_ids = CharacterComic.objects\\ .values_list('character_id',", "new unmatched country entries for power in new_powers: new_id =", "= Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model", "form = PowerForm(request.POST) if form.is_valid(): power = form.save(commit=False) power.save() for", "import reverse,reverse_lazy def index(request): return HttpResponse(\"Hello, world. You're at the", "'marvel_world/about.html' class HomePageView(generic.TemplateView): template_name = 'marvel_world/home.html' @method_decorator(login_required, name='dispatch') class CharacterListView(generic.ListView):", "# return redirect('heritagesites/site_detail', pk=site.pk) @method_decorator(login_required, name='dispatch') class ComicUpdateView(generic.UpdateView): model =", "the marvel world super hero\") class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html'", "django.http import HttpResponse,HttpResponseRedirect from django.views import generic from django.contrib.auth.decorators import", "model = Comic form_class = ComicForm # fields = '__all__'", "= self.request.user # site.date_updated = timezone.now() character.save() # Current country_area_id", "Delete HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id) \\ .delete() self.object.delete() return", "template_name = 'marvel_world/super_power.html' paginate_by = 50 def get_queryset(self): return Power.objects.all().order_by('power_name')", "CharacterForm success_message = \"Character created successfully\" template_name = 'marvel_world/character_new.html' #", "ComicDeleteView(generic.DeleteView): model =Comic success_message = \"Comic deleted successfully\" success_url =", "return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form': form}) def get(self, request):", "= 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class CharacterCreateView(generic.View): model = Character form_class", "= 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model = Comic context_object_name", "character = form.save(commit=False) # site.updated_by = self.request.user # site.date_updated =", "**kwargs) def form_valid(self, form): power = form.save(commit=False) # site.updated_by =", "in old_ids1: if old_id1 in new_ids1: continue else: CharacterComic.objects \\", "character.save() for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic in", ".delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) # New countries", "generic from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from", "600 def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView): model", "return super().dispatch(*args, **kwargs) def post(self, request): form = CharacterForm(request.POST) if", "for power in form.cleaned_data['super_power']: CharacterPower.objects.create(character=character, power=power) for comic in form.cleaned_data['comics']:", "entries for old_id in old_ids: if old_id in new_ids: continue", "= ComicForm success_message = \"Comic created successfully\" template_name = 'marvel_world/comic_new.html'", "context_object_name = 'comics' template_name = 'marvel_world/comics.html' paginate_by = 600 def", "= timezone.now() power.save() # Current country_area_id values linked to site", ".filter(comic_id=comic.comic_id) # New countries list new_chs = form.cleaned_data['character'] # TODO", "= 'marvel_world/character_new.html' # fields = '__all__' <-- superseded by form_class", "name='dispatch') class CharacterListView(generic.ListView): model = Character context_object_name = 'characters' template_name", "# pk_url_kwarg = 'site_pk' success_message = \"Comic updated successfully\" template_name", "request): form = CharacterForm() return render(request, 'marvel_world/character_new.html', {'form': form}) @method_decorator(login_required,", "return super().dispatch(*args, **kwargs) def post(self, request): form = ComicForm(request.POST) if", "model =Character success_message = \"Character deleted successfully\" success_url = reverse_lazy('characters')", "super().dispatch(*args, **kwargs) def post(self, request): form = PowerForm(request.POST) if form.is_valid():", "Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView from .filters import Marvel_worldFilter,Marvel_comicFilter from", "loops be refactored? # New ids new_ids1 = [] #", "super().dispatch(*args, **kwargs) def form_valid(self, form): comic = form.save(commit=False) # site.updated_by", "form.cleaned_data['comics'] # TODO can these loops be refactored? # New", "# TODO can these loops be refactored? # New ids", "Comic context_object_name= 'comic' template_name = 'marvel_world/comic_information.html' @method_decorator(login_required, name='dispatch') class PowerListView(generic.ListView):", "render(request, 'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html'", "Character context_object_name= 'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView):", "to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form':", "CharacterPower.objects \\ .filter(character_id=character.character_id, power_id=old_id) \\ .delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id',", "CharacterComic.objects.create(character=character, comic=comic) return redirect(character) # shortcut to object's get_absolute_url() #", "login_required from django.utils.decorators import method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic from", "<-- superseded by form_class context_object_name = 'power' # pk_url_kwarg =", "ComicUpdateView(generic.UpdateView): model = Comic form_class = ComicForm # fields =", "django.urls import reverse,reverse_lazy def index(request): return HttpResponse(\"Hello, world. You're at", "shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/power_new.html',", "New countries list new_powers = form.cleaned_data['super_power'] # TODO can these", "# Delete old unmatched country entries for old_id in old_ids:", "object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form': form})", "flat=True)\\ .filter(comic_id=comic.comic_id) # New countries list new_chs = form.cleaned_data['character'] #", "*args, **kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterComic.objects", "= 'power' # pk_url_kwarg = 'site_pk' success_message = \"Super power", "# site.updated_by = self.request.user # site.date_updated = timezone.now() comic.save() #", "shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html',", "form}) #class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class", "<-- superseded by form_class context_object_name = 'character' # pk_url_kwarg =", "= '__all__' <-- superseded by form_class context_object_name = 'power' #", ".filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return", "list new_powers = form.cleaned_data['super_power'] # TODO can these loops be", "**kwargs): return super().dispatch(*args, **kwargs) def post(self, request): form = CharacterForm(request.POST)", "reverse_lazy('super_power') context_object_name = 'power' template_name = 'marvel_world/power_delete.html' def dispatch(self, *args,", "in form.cleaned_data['comics']: CharacterComic.objects.create(character=character, comic=comic) return redirect(character) # shortcut to object's", "# Current country_area_id values linked to site old_ids = CharacterPower.objects\\", "list new_comics = form.cleaned_data['comics'] # TODO can these loops be", "def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form):", "get(self, request): form = CharacterForm() return render(request, 'marvel_world/character_new.html', {'form': form})", "in old_ids: if old_id in new_ids: continue else: CharacterPower.objects \\", "fields = '__all__' <-- superseded by form_class context_object_name = 'comic'", "= \"Comic updated successfully\" template_name = 'marvel_world/comic_update.html' def dispatch(self, *args,", "\"Character created successfully\" template_name = 'marvel_world/character_new.html' # fields = '__all__'", "ComicFilterView(FilterView): filterset_class = Marvel_comicFilter template_name = 'marvel_world/comic_filter.html' @method_decorator(login_required, name='dispatch') class", "power_id=old_id) \\ .delete() old_ids1 = CharacterComic.objects\\ .values_list('comic_id', flat=True)\\ .filter(character_id=character.character_id) #", "deleted successfully\" success_url = reverse_lazy('super_power') context_object_name = 'power' template_name =", ".create(character=character, power=power) # Delete old unmatched country entries for old_id", "**kwargs) def form_valid(self, form): character = form.save(commit=False) # site.updated_by =", "New countries list new_comics = form.cleaned_data['comics'] # TODO can these", "'marvel_world/comic_new.html' # fields = '__all__' <-- superseded by form_class #", "continue else: CharacterComic.objects \\ .filter(character_id=character.character_id, comic_id=old_id1) \\ .delete() return HttpResponseRedirect(character.get_absolute_url())", "render(request, 'marvel_world/power_new.html', {'form': form}) def get(self, request): form = PowerForm()", "be refactored? # New ids new_ids1 = [] # Insert", "return redirect(character) # shortcut to object's get_absolute_url() # return HttpResponseRedirect(site.get_absolute_url())", "return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/comic_new.html', {'form': form}) def get(self, request):", "'site_pk' success_message = \"Character updated successfully\" template_name = 'marvel_world/character_update.html' def", "these loops be refactored? # New ids new_ids = []", "= 600 def get_queryset(self): return Comic.objects.all().order_by('comic_name') @method_decorator(login_required, name='dispatch') class ComicDetailView(generic.DetailView):", "HttpResponse,HttpResponseRedirect from django.views import generic from django.contrib.auth.decorators import login_required from", "# New ids new_ids = [] # Insert new unmatched", "CharacterComic.objects \\ .create(character=character, comic=comic) # Delete old unmatched country entries", "form.save(commit=False) power.save() for character in form.cleaned_data['character']: CharacterPower.objects.create(character=character, power=power) return redirect(power)", "= ComicForm(request.POST) if form.is_valid(): comic = form.save(commit=False) comic.save() for character", "# New countries list new_chs = form.cleaned_data['character'] # TODO can", "self.get_object() # Delete HeritageSiteJurisdiction entries CharacterComic.objects \\ .filter(comic_id=self.object.comic_id) \\ .delete()", "= [] # Insert new unmatched country entries for comic", "superseded by form_class context_object_name = 'character' # pk_url_kwarg = 'site_pk'", "else: CharacterPower.objects \\ .create(character=character, power=power) # Delete old unmatched country", "from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from .models", "name='dispatch') class ComicCreateView(generic.View): model = Comic form_class = ComicForm success_message", "form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic) return redirect(comic) # shortcut to object's get_absolute_url()", "@method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model = Comic context_object_name = 'comics'", "# return HttpResponseRedirect(site.get_absolute_url()) return render(request, 'marvel_world/character_new.html', {'form': form}) def get(self,", "HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required, name='dispatch') class ComicDeleteView(generic.DeleteView): model =Comic success_message = \"Comic", "marvel world super hero\") class AboutPageView(generic.TemplateView): template_name = 'marvel_world/about.html' class", "request, *args, **kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction entries", "= PowerForm(request.POST) if form.is_valid(): power = form.save(commit=False) power.save() for character", "site old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New countries", "if new_id in old_ids: continue else: CharacterComic.objects \\ .create(character=character, comic=comic)", ".filter(character_id=old_id, power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url()) # return redirect('heritagesites/site_detail', pk=site.pk)", ".delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required,", "HeritageSiteJurisdiction entries CharacterPower.objects \\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id)", "= 'character' template_name = 'marvel_world/character_delete.html' def dispatch(self, *args, **kwargs): return", "name='dispatch') class PowerDetailView(generic.DetailView): model = Power context_object_name= 'power' template_name =", "new_id = character.character_id new_ids.append(new_id) if new_id in old_ids: continue else:", "**kwargs) def post(self, request): form = CharacterForm(request.POST) if form.is_valid(): character", "method_decorator from .models import Character,Comic,Power,CharacterPower,CharacterComic from django_filters.views import FilterView from", "be refactored? # New ids new_ids = [] # Insert", "**kwargs) def post(self, request): form = PowerForm(request.POST) if form.is_valid(): power", "Delete old unmatched country entries for old_id in old_ids: if", "PowerUpdateView(generic.UpdateView): model = Power form_class = PowerForm # fields =", "can these loops be refactored? # New ids new_ids1 =", "self.request.user # site.date_updated = timezone.now() comic.save() # Current country_area_id values", "\\ .filter(character_id=self.object.character_id) \\ .delete() CharacterComic.objects \\ .filter(character_id=self.object.character_id) \\ .delete() self.object.delete()", "50 def get_queryset(self): return Power.objects.all().order_by('power_name') @method_decorator(login_required, name='dispatch') class PowerDetailView(generic.DetailView): model", "ComicCreateView(generic.View): model = Comic form_class = ComicForm success_message = \"Comic", "model = Character form_class = CharacterForm # fields = '__all__'", "successfully\" template_name = 'marvel_world/character_new.html' # fields = '__all__' <-- superseded", "#class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class CharacterUpdateView(generic.UpdateView):", "flat=True)\\ .filter(character_id=character.character_id) # New countries list new_powers = form.cleaned_data['super_power'] #", "to site old_ids = CharacterPower.objects\\ .values_list('character_id', flat=True)\\ .filter(power_id=power.power_id) # New", "= [] # Insert new unmatched country entries for character", "loops be refactored? # New ids new_ids = [] #", "country entries for power in new_powers: new_id = power.power_id new_ids.append(new_id)", "ComicListView(generic.ListView): model = Comic context_object_name = 'comics' template_name = 'marvel_world/comics.html'", "**kwargs): return super().dispatch(*args, **kwargs) def form_valid(self, form): power = form.save(commit=False)", "old_ids: if old_id in new_ids: continue else: CharacterPower.objects \\ .filter(character_id=old_id,", "*args, **kwargs): self.object = self.get_object() # Delete HeritageSiteJurisdiction entries CharacterPower.objects", "request): form = PowerForm(request.POST) if form.is_valid(): power = form.save(commit=False) power.save()", "old_ids1: if old_id1 in new_ids1: continue else: CharacterComic.objects \\ .filter(character_id=character.character_id,", "'character' template_name = 'marvel_world/character_information.html' @method_decorator(login_required, name='dispatch') class ComicListView(generic.ListView): model =", "success_url = reverse_lazy('heritagesites/site_list') def dispatch(self, *args, **kwargs): return super().dispatch(*args, **kwargs)", "import render,redirect from django.http import HttpResponse,HttpResponseRedirect from django.views import generic", "**kwargs): return super().dispatch(*args, **kwargs) def delete(self, request, *args, **kwargs): self.object", "\"Comic updated successfully\" template_name = 'marvel_world/comic_update.html' def dispatch(self, *args, **kwargs):", "old unmatched country entries for old_id1 in old_ids1: if old_id1", "else: CharacterPower.objects \\ .filter(character_id=old_id, power_id=power.power_id) \\ .delete() return HttpResponseRedirect(power.get_absolute_url()) #", "self.request.user # site.date_updated = timezone.now() power.save() # Current country_area_id values", "= '__all__' <-- superseded by form_class context_object_name = 'character' #", ".values_list('power_id', flat=True)\\ .filter(character_id=character.character_id) # New countries list new_powers = form.cleaned_data['super_power']", "entries CharacterPower.objects \\ .filter(power_id=self.object.power_id) \\ .delete() self.object.delete() return HttpResponseRedirect(self.get_success_url()) @method_decorator(login_required,", "return render(request, 'marvel_world/comic_new.html', {'form': form}) #class CharacterDetailView(generic.DetailView):model = Characters context_object_name=", "comic = form.save(commit=False) comic.save() for character in form.cleaned_data['character']: CharacterComic.objects.create(character=character, comic=comic)" ]
[ "GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input A GPIO.setup(18,GPIO.OUT)", "import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor", "time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input A GPIO.setup(18,GPIO.OUT) #Right motor", "main loop\"\"\" import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False)", "as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input A", "input A GPIO.setup(18,GPIO.OUT) #Right motor input B GPIO.setup(23,GPIO.OUT) GPIO.output(18,GPIO.HIGH) GPIO.output(23,GPIO.LOW)", "import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input A GPIO.setup(18,GPIO.OUT) #Right", "execute the main loop\"\"\" import RPi.GPIO as GPIO import time", "motor input A GPIO.setup(18,GPIO.OUT) #Right motor input B GPIO.setup(23,GPIO.OUT) GPIO.output(18,GPIO.HIGH)", "the main loop\"\"\" import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM)", "and execute the main loop\"\"\" import RPi.GPIO as GPIO import", "RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input", "\"\"\"Set-up and execute the main loop\"\"\" import RPi.GPIO as GPIO", "loop\"\"\" import RPi.GPIO as GPIO import time GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right", "#Right motor input A GPIO.setup(18,GPIO.OUT) #Right motor input B GPIO.setup(23,GPIO.OUT)", "GPIO.setwarnings(False) #Right motor input A GPIO.setup(18,GPIO.OUT) #Right motor input B", "GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) #Right motor input A GPIO.setup(18,GPIO.OUT) #Right motor input" ]
[ "获取查询结果 result = cur.fetchall() return result finally: cur.close() conn.close() def", "# 获取查询结果 result = cur.fetchall() return result finally: cur.close() conn.close()", "cur.fetchall() return result finally: cur.close() conn.close() def get_result_db(unitag): # 创建连接", "%s' cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall() return result finally:", "get_result_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur = conn.cursor() #", "'', 'db': 'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag):", "= cur.fetchall() return result finally: cur.close() conn.close() def get_result_db(unitag): #", "def get_result_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur = conn.cursor()", "{ 'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'password': '', 'db':", "result finally: cur.close() conn.close() def get_result_db(unitag): # 创建连接 conn =", "= 'SELECT * FROM result where unitag= %s' cur.execute(sql,unitag) #", "try: # 执行sql语句,进行查询 sql = 'SELECT * FROM summary where", "= { 'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'password': '',", "pymysql # 连接配置信息 config = { 'host': '127.0.0.1', 'port': 3306,", "sql = 'SELECT * FROM summary where unitag= %s' cur.execute(sql,unitag)", "return result finally: cur.close() conn.close() def get_result_db(unitag): # 创建连接 conn", "conn.close() def get_result_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur =", "执行sql语句,进行查询 sql = 'SELECT * FROM result where unitag= %s'", "pymysql.connect(**config) cur = conn.cursor() # 执行sql语句 try: # 执行sql语句,进行查询 sql", "config = { 'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'password':", "conn = pymysql.connect(**config) cur = conn.cursor() # 执行sql语句 try: #", "where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall() return", "连接配置信息 config = { 'host': '127.0.0.1', 'port': 3306, 'user': 'root',", "'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag): # 创建连接", "# 执行sql语句 try: # 执行sql语句,进行查询 sql = 'SELECT * FROM", "unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall() return result", "'user': 'root', 'password': '', 'db': 'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor,", "finally: cur.close() conn.close() def get_result_db(unitag): # 创建连接 conn = pymysql.connect(**config)", "= 'SELECT * FROM summary where unitag= %s' cur.execute(sql,unitag) #", "cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall() return result finally: cur.close()", "FROM result where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result =", "'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag): # 创建连接 conn", "= conn.cursor() # 执行sql语句 try: # 执行sql语句,进行查询 sql = 'SELECT", "def get_summary_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur = conn.cursor()", "创建连接 conn = pymysql.connect(**config) cur = conn.cursor() # 执行sql语句 try:", "result where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall()", "'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'password': '', 'db': 'classdata',", "} def get_summary_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur =", "* FROM summary where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result", "# 连接配置信息 config = { 'host': '127.0.0.1', 'port': 3306, 'user':", "* FROM result where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result", "cur.close() conn.close() def get_result_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur", "'SELECT * FROM result where unitag= %s' cur.execute(sql,unitag) # 获取查询结果", "# 执行sql语句,进行查询 sql = 'SELECT * FROM summary where unitag=", "get_summary_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur = conn.cursor() #", "'127.0.0.1', 'port': 3306, 'user': 'root', 'password': '', 'db': 'classdata', 'charset':", "执行sql语句 try: # 执行sql语句,进行查询 sql = 'SELECT * FROM result", "# 执行sql语句,进行查询 sql = 'SELECT * FROM result where unitag=", "3306, 'user': 'root', 'password': '', 'db': 'classdata', 'charset': 'utf8', 'cursorclass':", "cur = conn.cursor() # 执行sql语句 try: # 执行sql语句,进行查询 sql =", "conn.cursor() # 执行sql语句 try: # 执行sql语句,进行查询 sql = 'SELECT *", "执行sql语句 try: # 执行sql语句,进行查询 sql = 'SELECT * FROM summary", "执行sql语句,进行查询 sql = 'SELECT * FROM summary where unitag= %s'", "try: # 执行sql语句,进行查询 sql = 'SELECT * FROM result where", "pymysql.cursors.DictCursor, } def get_summary_db(unitag): # 创建连接 conn = pymysql.connect(**config) cur", "'db': 'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag): #", "= pymysql.connect(**config) cur = conn.cursor() # 执行sql语句 try: # 执行sql语句,进行查询", "import pymysql # 连接配置信息 config = { 'host': '127.0.0.1', 'port':", "FROM summary where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result =", "summary where unitag= %s' cur.execute(sql,unitag) # 获取查询结果 result = cur.fetchall()", "# 创建连接 conn = pymysql.connect(**config) cur = conn.cursor() # 执行sql语句", "result = cur.fetchall() return result finally: cur.close() conn.close() def get_result_db(unitag):", "'port': 3306, 'user': 'root', 'password': '', 'db': 'classdata', 'charset': 'utf8',", "'SELECT * FROM summary where unitag= %s' cur.execute(sql,unitag) # 获取查询结果", "sql = 'SELECT * FROM result where unitag= %s' cur.execute(sql,unitag)", "'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag): # 创建连接 conn =", "'password': '', 'db': 'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, } def", "'cursorclass': pymysql.cursors.DictCursor, } def get_summary_db(unitag): # 创建连接 conn = pymysql.connect(**config)", "'root', 'password': '', 'db': 'classdata', 'charset': 'utf8', 'cursorclass': pymysql.cursors.DictCursor, }" ]
[ "serializers from registerapp import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class = serializers.RegisterSerializer", "rest_framework.views import APIView from registerapp import serializers from registerapp import", "import viewsets from rest_framework.views import APIView from registerapp import serializers", "import APIView from registerapp import serializers from registerapp import models", "rest_framework import viewsets from rest_framework.views import APIView from registerapp import", "import serializers from registerapp import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class =", "import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class = serializers.RegisterSerializer queryset = models.RegisterPage.objects.all()", "<gh_stars>0 from rest_framework import viewsets from rest_framework.views import APIView from", "registerapp import serializers from registerapp import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class", "from rest_framework.views import APIView from registerapp import serializers from registerapp", "from registerapp import serializers from registerapp import models class RegisterViewSet(viewsets.ModelViewSet):", "from registerapp import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class = serializers.RegisterSerializer queryset", "registerapp import models class RegisterViewSet(viewsets.ModelViewSet): serializer_class = serializers.RegisterSerializer queryset =", "APIView from registerapp import serializers from registerapp import models class", "from rest_framework import viewsets from rest_framework.views import APIView from registerapp", "viewsets from rest_framework.views import APIView from registerapp import serializers from" ]
[ "luutp Contact: <EMAIL> Created on: 2021/02/27 \"\"\" # Utilities #", "self.left_motor = DCMotor(32, 36, 38, alpha=1.0) self.right_motor = DCMotor(33, 35,", "35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed)", "# -*- coding: utf-8 -*- \"\"\" jduck.py Description: Author: luutp", "def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self):", "================================================================================ class JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs): self.left_motor = DCMotor(32,", "Utilities from traitlets.config.configurable import SingletonConfigurable # Custom Packages from jduck.DCMotor", "import SingletonConfigurable # Custom Packages from jduck.DCMotor import DCMotor #", "# ================================IMPORT PACKAGES==================================== # Utilities from traitlets.config.configurable import SingletonConfigurable #", "DCMotor(32, 36, 38, alpha=1.0) self.right_motor = DCMotor(33, 35, 37, alpha=1.0)", "JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs): self.left_motor = DCMotor(32, 36, 38,", "-*- \"\"\" jduck.py Description: Author: luutp Contact: <EMAIL> Created on:", "utf-8 -*- \"\"\" jduck.py Description: Author: luutp Contact: <EMAIL> Created", "alpha=1.0) self.right_motor = DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def", "__init__(self, *args, **kwargs): self.left_motor = DCMotor(32, 36, 38, alpha=1.0) self.right_motor", "*args, **kwargs): self.left_motor = DCMotor(32, 36, 38, alpha=1.0) self.right_motor =", "Description: Author: luutp Contact: <EMAIL> Created on: 2021/02/27 \"\"\" #", "def __init__(self, *args, **kwargs): self.left_motor = DCMotor(32, 36, 38, alpha=1.0)", "= DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed,", "36, 38, alpha=1.0) self.right_motor = DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50)", "Packages from jduck.DCMotor import DCMotor # ================================================================================ class JDuck(SingletonConfigurable): def", "-*- coding: utf-8 -*- \"\"\" jduck.py Description: Author: luutp Contact:", "<EMAIL> Created on: 2021/02/27 \"\"\" # Utilities # %% #", "self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward() self.right_motor.rotate_backward()", "# Utilities # %% # ================================IMPORT PACKAGES==================================== # Utilities from", "move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward()", "turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward() self.right_motor.rotate_backward() def stop(self): self.left_motor.stop()", "self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward() self.right_motor.rotate_backward() def", "<reponame>luutp/jduck #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" jduck.py", "alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def", "set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def", "def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward() self.right_motor.rotate_backward() def stop(self):", "self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def", "jduck.DCMotor import DCMotor # ================================================================================ class JDuck(SingletonConfigurable): def __init__(self, *args,", "**kwargs): self.left_motor = DCMotor(32, 36, 38, alpha=1.0) self.right_motor = DCMotor(33,", "= DCMotor(32, 36, 38, alpha=1.0) self.right_motor = DCMotor(33, 35, 37,", "# %% # ================================IMPORT PACKAGES==================================== # Utilities from traitlets.config.configurable import", "38, alpha=1.0) self.right_motor = DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50)", "2021/02/27 \"\"\" # Utilities # %% # ================================IMPORT PACKAGES==================================== #", "class JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs): self.left_motor = DCMotor(32, 36,", "# Utilities from traitlets.config.configurable import SingletonConfigurable # Custom Packages from", "Contact: <EMAIL> Created on: 2021/02/27 \"\"\" # Utilities # %%", "right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward()", "SingletonConfigurable # Custom Packages from jduck.DCMotor import DCMotor # ================================================================================", "import DCMotor # ================================================================================ class JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs):", "coding: utf-8 -*- \"\"\" jduck.py Description: Author: luutp Contact: <EMAIL>", "self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward()", "Utilities # %% # ================================IMPORT PACKAGES==================================== # Utilities from traitlets.config.configurable", "def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward()", "traitlets.config.configurable import SingletonConfigurable # Custom Packages from jduck.DCMotor import DCMotor", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\" jduck.py Description:", "# Custom Packages from jduck.DCMotor import DCMotor # ================================================================================ class", "def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self):", "\"\"\" # Utilities # %% # ================================IMPORT PACKAGES==================================== # Utilities", "self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def", "37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed)", "left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self):", "================================IMPORT PACKAGES==================================== # Utilities from traitlets.config.configurable import SingletonConfigurable # Custom", "Author: luutp Contact: <EMAIL> Created on: 2021/02/27 \"\"\" # Utilities", "\"\"\" jduck.py Description: Author: luutp Contact: <EMAIL> Created on: 2021/02/27", "on: 2021/02/27 \"\"\" # Utilities # %% # ================================IMPORT PACKAGES====================================", "self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward() self.right_motor.rotate_backward() def stop(self): self.left_motor.stop() self.right_motor.stop()", "from traitlets.config.configurable import SingletonConfigurable # Custom Packages from jduck.DCMotor import", "DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed):", "self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward()", "python # -*- coding: utf-8 -*- \"\"\" jduck.py Description: Author:", "from jduck.DCMotor import DCMotor # ================================================================================ class JDuck(SingletonConfigurable): def __init__(self,", "jduck.py Description: Author: luutp Contact: <EMAIL> Created on: 2021/02/27 \"\"\"", "Created on: 2021/02/27 \"\"\" # Utilities # %% # ================================IMPORT", "PACKAGES==================================== # Utilities from traitlets.config.configurable import SingletonConfigurable # Custom Packages", "self.right_motor = DCMotor(33, 35, 37, alpha=1.0) self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self,", "Custom Packages from jduck.DCMotor import DCMotor # ================================================================================ class JDuck(SingletonConfigurable):", "DCMotor # ================================================================================ class JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs): self.left_motor", "# ================================================================================ class JDuck(SingletonConfigurable): def __init__(self, *args, **kwargs): self.left_motor =", "self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self): self.left_motor.rotate_forward() self.right_motor.rotate_forward() def move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward()", "self.left_motor.set_speed(50) self.right_motor.set_speed(50) def set_speeds(self, left_speed, right_speed): self.left_motor.set_speed(left_speed) self.right_motor.set_speed(right_speed) def move_forward(self):", "move_backward(self): self.left_motor.rotate_backward() self.right_motor.rotate_backward() def turn_left(self): self.left_motor.rotate_backward() self.right_motor.rotate_forward() def turn_right(self): self.left_motor.rotate_forward()", "%% # ================================IMPORT PACKAGES==================================== # Utilities from traitlets.config.configurable import SingletonConfigurable" ]
[ "GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk, ) from", "sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions,", "Int = GraphQLInt InputField = GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType", "MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType):", "out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node:", "str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType] =", "import GraphQLInputObjectField as GraphQLInputField except ImportError: from graphql.type import GraphQLInputField", "None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__(", "GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType,", "graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType as SQLACompositeType #", "import CompositeType as SQLACompositeType # Handle name changes from graphql-core", "class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def __init__( self, name: str,", "super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sql_function", "disable= too-few-public-methods sql_function = None class HasSQLAComposite: # pylint: disable=", "= None class HasSQLFunction: # pylint: disable= too-few-public-methods sql_function =", "UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def __init__( self,", "CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType,", "= GraphQLID InterfaceType = GraphQLInterfaceType Int = GraphQLInt InputField =", "GraphQLType List = GraphQLList NonNull = GraphQLNonNull Argument = GraphQLArgument", "= GraphQLString ScalarType = GraphQLScalarType ID = GraphQLID InterfaceType =", "import GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType as SQLACompositeType # Handle", "self, name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None, out_type:", "__init__( self, name: str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None,", "class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLFieldMap],", "= None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, description: typing.Optional[str] =", "graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type", "Thunk, ) from graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType", "= GraphQLInt InputField = GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType =", "too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self, name:", "GraphQLSchema, GraphQLString, GraphQLType, Thunk, ) from graphql.type.definition import GraphQLInputFieldOutType from", "name changes from graphql-core and graphql-core-next try: from graphql.type import", "typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]]", "pass class CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType):", "fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sql_function = sql_function", "typing.Any]] = None, description: typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode] =", "None: super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes,", "class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass class", "GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk,", "SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self, name: str, fields:", "extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class CreateInputType(InputObjectType): pass", "GraphQLSchema Field = GraphQLField Float = GraphQLFloat EnumType = GraphQLEnumType", "from nebulo.sql.composite import CompositeType as SQLACompositeType # Handle name changes", "import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID,", "typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields,", "pylint: disable=missing-class-docstring,invalid-name import typing from graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode,", "class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def __init__(", "extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class ConnectionType(ObjectType): pass class EdgeType(ObjectType):", "name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType]", "ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, )", "super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model", "EnumType = GraphQLEnumType EnumValue = GraphQLEnumValue Schema = GraphQLSchema Field", "import GraphQLInputField Type = GraphQLType List = GraphQLList NonNull =", "changes from graphql-core and graphql-core-next try: from graphql.type import GraphQLInputObjectField", "NonNull = GraphQLNonNull Argument = GraphQLArgument Boolean = GraphQLBoolean String", "= sqla_model class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType):", "GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType,", "= GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue = GraphQLEnumValue Schema =", "HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self, name: str,", "try: from graphql.type import GraphQLInputObjectField as GraphQLInputField except ImportError: from", "= GraphQLEnumValue Schema = GraphQLSchema Field = GraphQLField Float =", "as GraphQLInputField except ImportError: from graphql.type import GraphQLInputField Type =", "GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString,", "nebulo.sql.composite import CompositeType as SQLACompositeType # Handle name changes from", "too-few-public-methods sqla_table = None class HasSQLFunction: # pylint: disable= too-few-public-methods", "Type = GraphQLType List = GraphQLList NonNull = GraphQLNonNull Argument", "class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass", "None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None,", "pass class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction):", "= None, sql_function=None, ) -> None: super().__init__( name=name, fields=fields, description=description,", "GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo,", "Argument = GraphQLArgument Boolean = GraphQLBoolean String = GraphQLString ScalarType", "GraphQLArgument Boolean = GraphQLBoolean String = GraphQLString ScalarType = GraphQLScalarType", "None class HasSQLAComposite: # pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType class", "= None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None,", "EnumType = GraphQLEnumType class HasSQLAModel: # pylint: disable= too-few-public-methods sqla_table", "class HasSQLFunction: # pylint: disable= too-few-public-methods sql_function = None class", "GraphQLInputField Type = GraphQLType List = GraphQLList NonNull = GraphQLNonNull", "fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None,", "InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description:", "sql_function = None class HasSQLAComposite: # pylint: disable= too-few-public-methods sqla_composite:", "SQLACompositeType # Handle name changes from graphql-core and graphql-core-next try:", "GraphQLFloat EnumType = GraphQLEnumType class HasSQLAModel: # pylint: disable= too-few-public-methods", "GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt,", "UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class", "GraphQLString, GraphQLType, Thunk, ) from graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite", "class TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass", "InterfaceType = GraphQLInterfaceType Int = GraphQLInt InputField = GraphQLInputField ResolveInfo", "= None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None:", "typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]]", "name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model =", "GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType,", "= GraphQLEnumType class HasSQLAModel: # pylint: disable= too-few-public-methods sqla_table =", "GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk, ) from graphql.type.definition import GraphQLInputFieldOutType", "GraphQLField Float = GraphQLFloat EnumType = GraphQLEnumType class HasSQLAModel: #", "HasSQLAComposite: # pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel):", "pass class FunctionInputType(GraphQLInputObjectType): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap],", "GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk, ) from graphql.type.definition import", "None, out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None,", "extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class ConnectionType(ObjectType):", "fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model", "sql_function=None, ) -> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions,", "typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields,", "# pylint: disable= too-few-public-methods sqla_table = None class HasSQLFunction: #", "ObjectTypeExtensionNode, ) from graphql.type import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue,", "HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]]", "FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self, name:", "typing.Optional[typing.Dict[str, typing.Any]] = None, description: typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode]", "= GraphQLSchema Field = GraphQLField Float = GraphQLFloat EnumType =", "None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None,", "GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk, ) from graphql.type.definition", "pylint: disable= too-few-public-methods sqla_table = None class HasSQLFunction: # pylint:", "class HasSQLAComposite: # pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType,", "description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class ConnectionType(ObjectType): pass", "FunctionInputType(GraphQLInputObjectType): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str]", "def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] =", "except ImportError: from graphql.type import GraphQLInputField Type = GraphQLType List", "pass class EdgeType(ObjectType): pass class TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite):", "pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__(", "HasSQLAModel: # pylint: disable= too-few-public-methods sqla_table = None class HasSQLFunction:", "= None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None:", "Schema = GraphQLSchema Field = GraphQLField Float = GraphQLFloat EnumType", "InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type import ( GraphQLArgument, GraphQLBoolean,", "InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type import ( GraphQLArgument,", "GraphQLID InterfaceType = GraphQLInterfaceType Int = GraphQLInt InputField = GraphQLInputField", "typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] =", "sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions,", "( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap,", "GraphQLInt InputField = GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType = GraphQLEnumType", "GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue = GraphQLEnumValue", "super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, )", "extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__( name=name,", "interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model", "= None class HasSQLAComposite: # pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType", "self.sqla_model = sqla_model class ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass class", "ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class CreateInputType(InputObjectType): pass class", "is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, description:", "= None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] =", "typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode]", "= sqla_model class ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass class TableType(ObjectType):", ") self.sqla_model = sqla_model class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass", "ImportError: from graphql.type import GraphQLInputField Type = GraphQLType List =", "HasSQLFunction: # pylint: disable= too-few-public-methods sql_function = None class HasSQLAComposite:", "self, name: str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of:", "__init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None,", "extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, ) -> None: super().__init__( name=name,", "# pylint: disable= too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def", "class CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass", "= GraphQLInterfaceType Int = GraphQLInt InputField = GraphQLInputField ResolveInfo =", "GraphQLInterfaceType Int = GraphQLInt InputField = GraphQLInputField ResolveInfo = GraphQLResolveInfo", "typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, ) ->", "None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None,", "class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap],", "extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, description: typing.Optional[str] = None, ast_node:", "DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def", "graphql.type import GraphQLInputObjectField as GraphQLInputField except ImportError: from graphql.type import", "from graphql.type import GraphQLInputField Type = GraphQLType List = GraphQLList", "pass class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def __init__( self, name:", "def __init__( self, name: str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] =", "TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType):", "import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type import", "sqla_model class ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass class TableType(ObjectType): pass", "EdgeType(ObjectType): pass class TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite): pass class", "None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of,", "= None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None,", "import typing from graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode,", "= GraphQLEnumType EnumValue = GraphQLEnumValue Schema = GraphQLSchema Field =", "ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType,", "DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def __init__( self, name: str, fields:", "class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass class", "typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, ) ->", "ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, )", "None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type,", "and graphql-core-next try: from graphql.type import GraphQLInputObjectField as GraphQLInputField except", "is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class", "disable= too-few-public-methods sqla_table = None class HasSQLFunction: # pylint: disable=", "typing from graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, )", "= GraphQLArgument Boolean = GraphQLBoolean String = GraphQLString ScalarType =", "= GraphQLField Float = GraphQLFloat EnumType = GraphQLEnumType class HasSQLAModel:", "self.sqla_model = sqla_model class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass class", "HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str]", "extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType):", "None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, ) -> None: super().__init__(", "= GraphQLFloat EnumType = GraphQLEnumType class HasSQLAModel: # pylint: disable=", "from graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType as SQLACompositeType", ") self.sqla_model = sqla_model class ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass", "Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions:", "GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType,", "extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes:", "# pylint: disable=missing-class-docstring,invalid-name import typing from graphql.language import ( InputObjectTypeDefinitionNode,", "Handle name changes from graphql-core and graphql-core-next try: from graphql.type", "pass class TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType):", "None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__(", "ScalarType = GraphQLScalarType ID = GraphQLID InterfaceType = GraphQLInterfaceType Int", "from graphql.type import GraphQLInputObjectField as GraphQLInputField except ImportError: from graphql.type", "= GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue =", "from graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from", "from graphql-core and graphql-core-next try: from graphql.type import GraphQLInputObjectField as", "None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None,", "ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class ConnectionType(ObjectType): pass class", "too-few-public-methods sql_function = None class HasSQLAComposite: # pylint: disable= too-few-public-methods", "class ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass class TableType(ObjectType): pass class", "Field = GraphQLField Float = GraphQLFloat EnumType = GraphQLEnumType class", "pass class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass", "sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self, name: str,", "name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sql_function =", "ConnectionType(ObjectType): pass class EdgeType(ObjectType): pass class TableType(ObjectType): pass class CompositeType(ObjectType,", "GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType,", "Boolean = GraphQLBoolean String = GraphQLString ScalarType = GraphQLScalarType ID", "graphql.type import GraphQLInputField Type = GraphQLType List = GraphQLList NonNull", "List = GraphQLList NonNull = GraphQLNonNull Argument = GraphQLArgument Boolean", "None class HasSQLFunction: # pylint: disable= too-few-public-methods sql_function = None", "= None, out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] =", "GraphQLInputField except ImportError: from graphql.type import GraphQLInputField Type = GraphQLType", "# Handle name changes from graphql-core and graphql-core-next try: from", "graphql-core-next try: from graphql.type import GraphQLInputObjectField as GraphQLInputField except ImportError:", "( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode, ObjectTypeExtensionNode, ) from graphql.type import (", ") from graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType as", "GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema,", "GraphQLNonNull Argument = GraphQLArgument Boolean = GraphQLBoolean String = GraphQLString", "= GraphQLBoolean String = GraphQLString ScalarType = GraphQLScalarType ID =", "HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType):", "None: super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, )", "# pylint: disable= too-few-public-methods sql_function = None class HasSQLAComposite: #", "pass class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass", "GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn,", "GraphQLEnumType EnumValue = GraphQLEnumValue Schema = GraphQLSchema Field = GraphQLField", "CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType):", ") -> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node,", "= None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, interfaces=interfaces,", "= GraphQLScalarType ID = GraphQLID InterfaceType = GraphQLInterfaceType Int =", "description: typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]]", "typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]]", "None, description: typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes:", "class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self,", "GraphQLInputObjectField as GraphQLInputField except ImportError: from graphql.type import GraphQLInputField Type", "fields: Thunk[GraphQLInputFieldMap], description: typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType] = None,", "name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model", "class FunctionInputType(GraphQLInputObjectType): def __init__( self, name: str, fields: Thunk[GraphQLInputFieldMap], description:", "GraphQLList, GraphQLNonNull, GraphQLObjectType, GraphQLResolveInfo, GraphQLScalarType, GraphQLSchema, GraphQLString, GraphQLType, Thunk, )", "GraphQLBoolean String = GraphQLString ScalarType = GraphQLScalarType ID = GraphQLID", "from graphql.type import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap,", "typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, description: typing.Optional[str]", "-> None: super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node,", "GraphQLString ScalarType = GraphQLScalarType ID = GraphQLID InterfaceType = GraphQLInterfaceType", "disable=missing-class-docstring,invalid-name import typing from graphql.language import ( InputObjectTypeDefinitionNode, InputObjectTypeExtensionNode, ObjectTypeDefinitionNode,", "= None, sqla_model=None, ) -> None: super().__init__( name=name, fields=fields, description=description,", "typing.Optional[InputObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, ) ->", "CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass class CreatePayloadType(MutationPayloadType): pass class", "pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__(", "EnumValue = GraphQLEnumValue Schema = GraphQLSchema Field = GraphQLField Float", "out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class CreateInputType(InputObjectType):", "sqla_model class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass", "typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] =", "String = GraphQLString ScalarType = GraphQLScalarType ID = GraphQLID InterfaceType", "CompositeType as SQLACompositeType # Handle name changes from graphql-core and", "GraphQLField, GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList,", "GraphQLScalarType ID = GraphQLID InterfaceType = GraphQLInterfaceType Int = GraphQLInt", "GraphQLEnumValue Schema = GraphQLSchema Field = GraphQLField Float = GraphQLFloat", "pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel): def __init__( self, name: str, fields:", "InputField = GraphQLInputField ResolveInfo = GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue", "= None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] =", "extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sqla_model=None, ) -> None: super().__init__( name=name,", "pass class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass class FunctionInputType(GraphQLInputObjectType): def", "None, sql_function=None, ) -> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type,", "class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType, HasSQLAModel):", "class CreateInputType(InputObjectType): pass class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass class", "description: typing.Optional[str] = None, out_type: typing.Optional[GraphQLInputFieldOutType] = None, extensions: typing.Optional[typing.Dict[str,", "= None, description: typing.Optional[str] = None, ast_node: typing.Optional[ObjectTypeDefinitionNode] = None,", "Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions:", "GraphQLFieldMap, GraphQLFloat, GraphQLID, GraphQLInputFieldMap, GraphQLInputObjectType, GraphQLInt, GraphQLInterfaceType, GraphQLIsTypeOfFn, GraphQLList, GraphQLNonNull,", "ID = GraphQLID InterfaceType = GraphQLInterfaceType Int = GraphQLInt InputField", "ResolveInfo = GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue = GraphQLEnumValue Schema", "graphql.type import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLFieldMap, GraphQLFloat,", "Float = GraphQLFloat EnumType = GraphQLEnumType class HasSQLAModel: # pylint:", "interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None, extensions: typing.Optional[typing.Dict[str,", "GraphQLType, Thunk, ) from graphql.type.definition import GraphQLInputFieldOutType from nebulo.sql.composite import", "= GraphQLType List = GraphQLList NonNull = GraphQLNonNull Argument =", "typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, ) -> None: super().__init__( name=name, fields=fields,", "class EdgeType(ObjectType): pass class TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite): pass", "fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model =", "= None, extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None, sql_function=None, ) -> None:", "class TableInputType(InputObjectType): pass class UpdateInputType(InputObjectType): pass class DeleteInputType(InputObjectType): pass class", "GraphQLEnumType class HasSQLAModel: # pylint: disable= too-few-public-methods sqla_table = None", "graphql-core and graphql-core-next try: from graphql.type import GraphQLInputObjectField as GraphQLInputField", "disable= too-few-public-methods sqla_composite: SQLACompositeType class ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self,", "= GraphQLList NonNull = GraphQLNonNull Argument = GraphQLArgument Boolean =", ") -> None: super().__init__( name=name, fields=fields, interfaces=interfaces, is_type_of=is_type_of, extensions=extensions, description=description,", "GraphQLInputFieldOutType from nebulo.sql.composite import CompositeType as SQLACompositeType # Handle name", "ast_node: typing.Optional[ObjectTypeDefinitionNode] = None, extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None, sqla_model=None, )", "ObjectType(GraphQLObjectType, HasSQLAModel): def __init__( self, name: str, fields: Thunk[GraphQLFieldMap], interfaces:", "str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn] =", "name: str, fields: Thunk[GraphQLFieldMap], interfaces: typing.Optional[Thunk[typing.Collection[\"GraphQLInterfaceType\"]]] = None, is_type_of: typing.Optional[GraphQLIsTypeOfFn]", "pass class DeletePayloadType(MutationPayloadType): pass class FunctionPayloadType(MutationPayloadType, HasSQLFunction): pass class InputObjectType(GraphQLInputObjectType,", ") from graphql.type import ( GraphQLArgument, GraphQLBoolean, GraphQLEnumType, GraphQLEnumValue, GraphQLField,", "sqla_table = None class HasSQLFunction: # pylint: disable= too-few-public-methods sql_function", "pylint: disable= too-few-public-methods sql_function = None class HasSQLAComposite: # pylint:", "class HasSQLAModel: # pylint: disable= too-few-public-methods sqla_table = None class", "GraphQLList NonNull = GraphQLNonNull Argument = GraphQLArgument Boolean = GraphQLBoolean", "None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None,", "description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes, ) self.sqla_model = sqla_model class", "= GraphQLNonNull Argument = GraphQLArgument Boolean = GraphQLBoolean String =", "as SQLACompositeType # Handle name changes from graphql-core and graphql-core-next", "GraphQLResolveInfo EnumType = GraphQLEnumType EnumValue = GraphQLEnumValue Schema = GraphQLSchema", "pass class CreatePayloadType(MutationPayloadType): pass class UpdatePayloadType(MutationPayloadType): pass class DeletePayloadType(MutationPayloadType): pass", "-> None: super().__init__( name=name, fields=fields, description=description, out_type=out_type, extensions=extensions, ast_node=ast_node, extension_ast_nodes=extension_ast_nodes,", "None, extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, description: typing.Optional[str] = None,", "TableType(ObjectType): pass class CompositeType(ObjectType, HasSQLAComposite): pass class MutationPayloadType(ObjectType): pass class" ]
[ "want the tensorflow from the environment). tf = importlib.import_module(\"tensorflow\") #", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "LLC # # Licensed under the Apache License, Version 2.0", "grossly functions. This is largely here to verify that everything", "b): return tf.tanh(a + b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests", "# Dynamically import tensorflow. try: # Use a dynamic import", "print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\") if __name__ == \"__main__\": tf.test.main()", "model to XLA workflow grossly functions. This is largely here", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "as to avoid hermetic dependency analysis # (i.e. we only", "analysis # (i.e. we only want the tensorflow from the", "tests because tensorflow is not available\") sys.exit(0) class StatelessModule(tf.Module): def", "try: # Use a dynamic import so as to avoid", "division from __future__ import print_function import importlib import os import", "my_module = StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) #", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return tf.tanh(a + b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that", "distributed under the License is distributed on an \"AS IS\"", "we only want the tensorflow from the environment). tf =", "functions. This is largely here to verify that everything is", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "tf.saved_model.save(my_module, sm_dir, options=options) # Load it up. input_module = compiler.tf_load_saved_model(sm_dir)", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "hermetic dependency analysis # (i.e. we only want the tensorflow", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "except ImportError: print(\"Not running tests because tensorflow is not available\")", "to verify that everything is linked in that needs to", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic saved", "here to verify that everything is linked in that needs", "except in compliance with the License. # You may obtain", "= importlib.import_module(\"tensorflow\") # Just in case if linked against a", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "case if linked against a pre-V2 defaulted version. if hasattr(tf,", "# Use a dynamic import so as to avoid hermetic", "not use this file except in compliance with the License.", "everything is linked in that needs to be and that", "class StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32)", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "Google LLC # # Licensed under the Apache License, Version", "tensorflow is not available\") sys.exit(0) class StatelessModule(tf.Module): def __init__(self): pass", "def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ]) def", "in that needs to be and that there are not", "is linked in that needs to be and that there", "be and that there are not no-ops, etc. \"\"\" with", "writing, software # distributed under the License is distributed on", "temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir) my_module =", "in writing, software # distributed under the License is distributed", "input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\") if __name__ == \"__main__\":", "you may not use this file except in compliance with", "import so as to avoid hermetic dependency analysis # (i.e.", "Just in case if linked against a pre-V2 defaulted version.", "workflow grossly functions. This is largely here to verify that", "largely here to verify that everything is linked in that", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "the environment). tf = importlib.import_module(\"tensorflow\") # Just in case if", "language governing permissions and # limitations under the License. from", "the License. from __future__ import absolute_import from __future__ import division", "]) def add(self, a, b): return tf.tanh(a + b) class", "basic saved model to XLA workflow grossly functions. This is", "not available\") sys.exit(0) class StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4],", "dynamic import so as to avoid hermetic dependency analysis #", "tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) # Load it up. input_module =", "use this file except in compliance with the License. #", "to avoid hermetic dependency analysis # (i.e. we only want", "os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir) my_module = StatelessModule() options =", "limitations under the License. from __future__ import absolute_import from __future__", "CONDITIONS OF ANY KIND, either express or implied. # See", "tf = tf.compat.v2 except ImportError: print(\"Not running tests because tensorflow", "there are not no-ops, etc. \"\"\" with tempfile.TemporaryDirectory() as temp_dir:", "sys.exit(0) class StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4],", "StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) # Load it", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "governing permissions and # limitations under the License. from __future__", "pre-V2 defaulted version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "(i.e. we only want the tensorflow from the environment). tf", "a, b): return tf.tanh(a + b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self):", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Dynamically import tensorflow. try: # Use a dynamic import so", "@tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ]) def add(self, a, b):", "# You may obtain a copy of the License at", "are not no-ops, etc. \"\"\" with tempfile.TemporaryDirectory() as temp_dir: sm_dir", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "tf.float32) ]) def add(self, a, b): return tf.tanh(a + b)", "only want the tensorflow from the environment). tf = importlib.import_module(\"tensorflow\")", "Use a dynamic import so as to avoid hermetic dependency", "under the License is distributed on an \"AS IS\" BASIS,", "against a pre-V2 defaulted version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf", "License for the specific language governing permissions and # limitations", "a basic saved model to XLA workflow grossly functions. This", "dependency analysis # (i.e. we only want the tensorflow from", "it up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA ASM:\",", "import tensorflow. try: # Use a dynamic import so as", "print(\"Not running tests because tensorflow is not available\") sys.exit(0) class", "input_module = compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm,", "if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2 except ImportError: print(\"Not", "available\") sys.exit(0) class StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32),", "# limitations under the License. from __future__ import absolute_import from", "under the License. from __future__ import absolute_import from __future__ import", "environment). tf = importlib.import_module(\"tensorflow\") # Just in case if linked", "linked in that needs to be and that there are", "__future__ import print_function import importlib import os import sys import", "in case if linked against a pre-V2 defaulted version. if", "2019 Google LLC # # Licensed under the Apache License,", "# Load it up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm()", "= input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\") if __name__ ==", "the License for the specific language governing permissions and #", "xla_asm = input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\") if __name__", "options=options) # Load it up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm =", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "so as to avoid hermetic dependency analysis # (i.e. we", "importlib.import_module(\"tensorflow\") # Just in case if linked against a pre-V2", "# you may not use this file except in compliance", "testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic saved model to XLA workflow", "= tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) # Load it up. input_module", "either express or implied. # See the License for the", "tempfile.TemporaryDirectory() as temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir)", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "OR CONDITIONS OF ANY KIND, either express or implied. #", "that needs to be and that there are not no-ops,", "that everything is linked in that needs to be and", "the License is distributed on an \"AS IS\" BASIS, #", "from __future__ import absolute_import from __future__ import division from __future__", "defaulted version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2 except", "is largely here to verify that everything is linked in", "to be and that there are not no-ops, etc. \"\"\"", "import compiler # Dynamically import tensorflow. try: # Use a", "in compliance with the License. # You may obtain a", "permissions and # limitations under the License. from __future__ import", "software # distributed under the License is distributed on an", "tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ]) def add(self, a, b): return", "= compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\")", "import tempfile from pyiree.tf import compiler # Dynamically import tensorflow.", "no-ops, etc. \"\"\" with tempfile.TemporaryDirectory() as temp_dir: sm_dir = os.path.join(temp_dir,", "= tf.compat.v2 except ImportError: print(\"Not running tests because tensorflow is", "and that there are not no-ops, etc. \"\"\" with tempfile.TemporaryDirectory()", "# # Unless required by applicable law or agreed to", "from pyiree.tf import compiler # Dynamically import tensorflow. try: #", "because tensorflow is not available\") sys.exit(0) class StatelessModule(tf.Module): def __init__(self):", "running tests because tensorflow is not available\") sys.exit(0) class StatelessModule(tf.Module):", "to:\", sm_dir) my_module = StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir,", "sm_dir, options=options) # Load it up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm", "not no-ops, etc. \"\"\" with tempfile.TemporaryDirectory() as temp_dir: sm_dir =", "class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic saved model", "saved model to XLA workflow grossly functions. This is largely", "Version 2.0 (the \"License\"); # you may not use this", "as temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir) my_module", "add(self, a, b): return tf.tanh(a + b) class RuntimeTest(tf.test.TestCase): def", "law or agreed to in writing, software # distributed under", "compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA ASM:\", xla_asm) self.assertRegex(xla_asm, \"mhlo.tanh\") if", "to XLA workflow grossly functions. This is largely here to", "ImportError: print(\"Not running tests because tensorflow is not available\") sys.exit(0)", "import division from __future__ import print_function import importlib import os", "tf.enable_v2_behavior() tf = tf.compat.v2 except ImportError: print(\"Not running tests because", "\"\"\" with tempfile.TemporaryDirectory() as temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving", "XLA workflow grossly functions. This is largely here to verify", "absolute_import from __future__ import division from __future__ import print_function import", "sm_dir) my_module = StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options)", "This is largely here to verify that everything is linked", "__future__ import absolute_import from __future__ import division from __future__ import", "__future__ import division from __future__ import print_function import importlib import", "+ b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic", "that a basic saved model to XLA workflow grossly functions.", "tensorflow from the environment). tf = importlib.import_module(\"tensorflow\") # Just in", "implied. # See the License for the specific language governing", "= os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir) my_module = StatelessModule() options", "verify that everything is linked in that needs to be", "under the Apache License, Version 2.0 (the \"License\"); # you", "a dynamic import so as to avoid hermetic dependency analysis", "__init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ]) def add(self,", "\"License\"); # you may not use this file except in", "a pre-V2 defaulted version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf =", "pyiree.tf import compiler # Dynamically import tensorflow. try: # Use", "version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2 except ImportError:", "from the environment). tf = importlib.import_module(\"tensorflow\") # Just in case", "needs to be and that there are not no-ops, etc.", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "from __future__ import print_function import importlib import os import sys", "avoid hermetic dependency analysis # (i.e. we only want the", "tf.tanh(a + b) class RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a", "# Copyright 2019 Google LLC # # Licensed under the", "import print_function import importlib import os import sys import tempfile", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "License. from __future__ import absolute_import from __future__ import division from", "import os import sys import tempfile from pyiree.tf import compiler", "tensorflow. try: # Use a dynamic import so as to", "tf.compat.v2 except ImportError: print(\"Not running tests because tensorflow is not", "tf.TensorSpec([4], tf.float32) ]) def add(self, a, b): return tf.tanh(a +", "RuntimeTest(tf.test.TestCase): def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic saved model to", "sys import tempfile from pyiree.tf import compiler # Dynamically import", "the tensorflow from the environment). tf = importlib.import_module(\"tensorflow\") # Just", "if linked against a pre-V2 defaulted version. if hasattr(tf, \"enable_v2_behavior\"):", "sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\", sm_dir) my_module = StatelessModule()", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ])", "print(\"Saving to:\", sm_dir) my_module = StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "def add(self, a, b): return tf.tanh(a + b) class RuntimeTest(tf.test.TestCase):", "\"\"\"Tests that a basic saved model to XLA workflow grossly", "# (i.e. we only want the tensorflow from the environment).", "compiler # Dynamically import tensorflow. try: # Use a dynamic", "Load it up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "and # limitations under the License. from __future__ import absolute_import", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2 except ImportError: print(\"Not running", "to in writing, software # distributed under the License is", "pass @tf.function(input_signature=[ tf.TensorSpec([4], tf.float32), tf.TensorSpec([4], tf.float32) ]) def add(self, a,", "that there are not no-ops, etc. \"\"\" with tempfile.TemporaryDirectory() as", "with tempfile.TemporaryDirectory() as temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\") print(\"Saving to:\",", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "importlib import os import sys import tempfile from pyiree.tf import", "options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) # Load it up.", "You may obtain a copy of the License at #", "\"simple.sm\") print(\"Saving to:\", sm_dir) my_module = StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True)", "os import sys import tempfile from pyiree.tf import compiler #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "= StatelessModule() options = tf.saved_model.SaveOptions(save_debug_info=True) tf.saved_model.save(my_module, sm_dir, options=options) # Load", "required by applicable law or agreed to in writing, software", "# Just in case if linked against a pre-V2 defaulted", "tf = importlib.import_module(\"tensorflow\") # Just in case if linked against", "Copyright 2019 Google LLC # # Licensed under the Apache", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "print_function import importlib import os import sys import tempfile from", "import sys import tempfile from pyiree.tf import compiler # Dynamically", "with the License. # You may obtain a copy of", "tf.float32), tf.TensorSpec([4], tf.float32) ]) def add(self, a, b): return tf.tanh(a", "this file except in compliance with the License. # You", "is not available\") sys.exit(0) class StatelessModule(tf.Module): def __init__(self): pass @tf.function(input_signature=[", "the Apache License, Version 2.0 (the \"License\"); # you may", "up. input_module = compiler.tf_load_saved_model(sm_dir) xla_asm = input_module.to_asm() print(\"XLA ASM:\", xla_asm)", "import absolute_import from __future__ import division from __future__ import print_function", "def testLoadSavedModelToXlaPipeline(self): \"\"\"Tests that a basic saved model to XLA", "etc. \"\"\" with tempfile.TemporaryDirectory() as temp_dir: sm_dir = os.path.join(temp_dir, \"simple.sm\")", "from __future__ import division from __future__ import print_function import importlib", "import importlib import os import sys import tempfile from pyiree.tf", "\"enable_v2_behavior\"): tf.enable_v2_behavior() tf = tf.compat.v2 except ImportError: print(\"Not running tests", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "tempfile from pyiree.tf import compiler # Dynamically import tensorflow. try:", "linked against a pre-V2 defaulted version. if hasattr(tf, \"enable_v2_behavior\"): tf.enable_v2_behavior()" ]
[ "Geo #from .grandchild_objects import Http #from .grandchild_objects import Meta from", "properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo", "\"\" self.meta = \"\" self.ssl = \"\" self.whois = \"\"", ".grandchild_objects import WhoIs class Properties(object): FIELD_MAP = { \"cookies\": \"cookies\",", "__init__(self): self.cookies = \"\" self.dns = \"\" self.dom = \"\"", "#properties.meta = Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl) #properties.whois = WhoIs.from_dictionary(properties.whois) return", "import Ssl #from .grandchild_objects import WhoIs class Properties(object): FIELD_MAP =", "\"\" self.http = \"\" self.meta = \"\" self.ssl = \"\"", "properties.dom = Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta", "self.dns = \"\" self.dom = \"\" self.geo = \"\" self.http", "key_name in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns", "setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom", "\"dom\": \"dom\", \"geo\": \"geo\", \"http\": \"http\", \"meta\": \"meta\", \"ssl\": \"ssl\",", "Dom from .grandchild_objects import Geo #from .grandchild_objects import Http #from", "= Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo =", "in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns =", "\"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\": \"dom\", \"geo\": \"geo\", \"http\": \"http\",", "import Meta from .grandchild_objects import Ssl #from .grandchild_objects import WhoIs", "\"http\", \"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\" } def __init__(self):", "<reponame>taco-chainalysis/pypulsedive from .grandchild_objects import Cookies from .grandchild_objects import Dns from", "= \"\" self.dom = \"\" self.geo = \"\" self.http =", "properties.geo = Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl", "\"dns\": \"dns\", \"dom\": \"dom\", \"geo\": \"geo\", \"http\": \"http\", \"meta\": \"meta\",", "from_dictionary(properties_dict: dict): properties = Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\") for", "FIELD_MAP = { \"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\": \"dom\", \"geo\":", "{ \"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\": \"dom\", \"geo\": \"geo\", \"http\":", "#properties.http = Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl) #properties.whois", "= Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl) #properties.whois = WhoIs.from_dictionary(properties.whois) return properties", ".grandchild_objects import Meta from .grandchild_objects import Ssl #from .grandchild_objects import", "= Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl =", "Http #from .grandchild_objects import Meta from .grandchild_objects import Ssl #from", "import Cookies from .grandchild_objects import Dns from .grandchild_objects import Dom", "\"ssl\", \"whois\": \"whois\" } def __init__(self): self.cookies = \"\" self.dns", "Dns from .grandchild_objects import Dom from .grandchild_objects import Geo #from", "\"\" self.geo = \"\" self.http = \"\" self.meta = \"\"", "field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom =", "\"cookies\", \"dns\": \"dns\", \"dom\": \"dom\", \"geo\": \"geo\", \"http\": \"http\", \"meta\":", "import Http #from .grandchild_objects import Meta from .grandchild_objects import Ssl", ".grandchild_objects import Dom from .grandchild_objects import Geo #from .grandchild_objects import", "field_map = getattr(properties.__class__, \"FIELD_MAP\") for key_name in field_map: if key_name", "\"\" @staticmethod def from_dictionary(properties_dict: dict): properties = Properties() field_map =", "Cookies from .grandchild_objects import Dns from .grandchild_objects import Dom from", "def __init__(self): self.cookies = \"\" self.dns = \"\" self.dom =", "properties.dns = Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http", "\"whois\" } def __init__(self): self.cookies = \"\" self.dns = \"\"", ".grandchild_objects import Ssl #from .grandchild_objects import WhoIs class Properties(object): FIELD_MAP", "dict): properties = Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\") for key_name", "\"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\" } def __init__(self): self.cookies", "\"\" self.whois = \"\" @staticmethod def from_dictionary(properties_dict: dict): properties =", "= \"\" self.dns = \"\" self.dom = \"\" self.geo =", "\"geo\", \"http\": \"http\", \"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\" }", "Properties(object): FIELD_MAP = { \"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\": \"dom\",", "from .grandchild_objects import Geo #from .grandchild_objects import Http #from .grandchild_objects", "= \"\" self.ssl = \"\" self.whois = \"\" @staticmethod def", "Meta from .grandchild_objects import Ssl #from .grandchild_objects import WhoIs class", "#from .grandchild_objects import Meta from .grandchild_objects import Ssl #from .grandchild_objects", "\"ssl\": \"ssl\", \"whois\": \"whois\" } def __init__(self): self.cookies = \"\"", "from .grandchild_objects import Dom from .grandchild_objects import Geo #from .grandchild_objects", "import WhoIs class Properties(object): FIELD_MAP = { \"cookies\": \"cookies\", \"dns\":", "\"FIELD_MAP\") for key_name in field_map: if key_name in properties_dict: setattr(properties,", "self.ssl = \"\" self.whois = \"\" @staticmethod def from_dictionary(properties_dict: dict):", "from .grandchild_objects import Dns from .grandchild_objects import Dom from .grandchild_objects", "in field_map: if key_name in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies", "def from_dictionary(properties_dict: dict): properties = Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\")", "from .grandchild_objects import Ssl #from .grandchild_objects import WhoIs class Properties(object):", "= \"\" self.geo = \"\" self.http = \"\" self.meta =", "\"dom\", \"geo\": \"geo\", \"http\": \"http\", \"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\":", "Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo)", "= Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\") for key_name in field_map:", "Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta)", "self.meta = \"\" self.ssl = \"\" self.whois = \"\" @staticmethod", "class Properties(object): FIELD_MAP = { \"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\":", "\"\" self.ssl = \"\" self.whois = \"\" @staticmethod def from_dictionary(properties_dict:", "for key_name in field_map: if key_name in properties_dict: setattr(properties, field_map[key_name],", "Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http)", "getattr(properties.__class__, \"FIELD_MAP\") for key_name in field_map: if key_name in properties_dict:", "= \"\" @staticmethod def from_dictionary(properties_dict: dict): properties = Properties() field_map", "WhoIs class Properties(object): FIELD_MAP = { \"cookies\": \"cookies\", \"dns\": \"dns\",", "\"geo\": \"geo\", \"http\": \"http\", \"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\"", "import Geo #from .grandchild_objects import Http #from .grandchild_objects import Meta", ".grandchild_objects import Http #from .grandchild_objects import Meta from .grandchild_objects import", "key_name in field_map: if key_name in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name])", "properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom)", ".grandchild_objects import Cookies from .grandchild_objects import Dns from .grandchild_objects import", "\"whois\": \"whois\" } def __init__(self): self.cookies = \"\" self.dns =", "} def __init__(self): self.cookies = \"\" self.dns = \"\" self.dom", "self.http = \"\" self.meta = \"\" self.ssl = \"\" self.whois", "= \"\" self.http = \"\" self.meta = \"\" self.ssl =", "from .grandchild_objects import Cookies from .grandchild_objects import Dns from .grandchild_objects", "#from .grandchild_objects import WhoIs class Properties(object): FIELD_MAP = { \"cookies\":", "\"dns\", \"dom\": \"dom\", \"geo\": \"geo\", \"http\": \"http\", \"meta\": \"meta\", \"ssl\":", "properties = Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\") for key_name in", "Ssl #from .grandchild_objects import WhoIs class Properties(object): FIELD_MAP = {", "\"\" self.dom = \"\" self.geo = \"\" self.http = \"\"", "\"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\" } def __init__(self): self.cookies =", "field_map: if key_name in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies =", "if key_name in properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies)", "\"\" self.dns = \"\" self.dom = \"\" self.geo = \"\"", "self.dom = \"\" self.geo = \"\" self.http = \"\" self.meta", "Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl)", "= Dns.from_dictionary(properties.dns) properties.dom = Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http =", "\"http\": \"http\", \"meta\": \"meta\", \"ssl\": \"ssl\", \"whois\": \"whois\" } def", "self.cookies = \"\" self.dns = \"\" self.dom = \"\" self.geo", "Properties() field_map = getattr(properties.__class__, \"FIELD_MAP\") for key_name in field_map: if", "self.whois = \"\" @staticmethod def from_dictionary(properties_dict: dict): properties = Properties()", "@staticmethod def from_dictionary(properties_dict: dict): properties = Properties() field_map = getattr(properties.__class__,", "= Dom.from_dictionary(properties.dom) properties.geo = Geo.from_dictionary(properties.geo) #properties.http = Http.from_dictionary(properties.http) #properties.meta =", "properties_dict: setattr(properties, field_map[key_name], properties_dict[key_name]) properties.cookies = Cookies.from_dictionary(properties.cookies) properties.dns = Dns.from_dictionary(properties.dns)", "= Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl) #properties.whois =", "import Dom from .grandchild_objects import Geo #from .grandchild_objects import Http", "Http.from_dictionary(properties.http) #properties.meta = Meta.from_dictionary(properties.meta) properties.ssl = Ssl.from_dictionary(properties.ssl) #properties.whois = WhoIs.from_dictionary(properties.whois)", "= \"\" self.meta = \"\" self.ssl = \"\" self.whois =", "import Dns from .grandchild_objects import Dom from .grandchild_objects import Geo", "= \"\" self.whois = \"\" @staticmethod def from_dictionary(properties_dict: dict): properties", ".grandchild_objects import Geo #from .grandchild_objects import Http #from .grandchild_objects import", "self.geo = \"\" self.http = \"\" self.meta = \"\" self.ssl", "= getattr(properties.__class__, \"FIELD_MAP\") for key_name in field_map: if key_name in", "= { \"cookies\": \"cookies\", \"dns\": \"dns\", \"dom\": \"dom\", \"geo\": \"geo\",", ".grandchild_objects import Dns from .grandchild_objects import Dom from .grandchild_objects import", "#from .grandchild_objects import Http #from .grandchild_objects import Meta from .grandchild_objects" ]
[ "parser.add_argument(\\ '-l', default = False, type = int, \\ help", "in c_skew_max] ori, ter = check_peaks([c_skew_min, c_skew_max], length) return ori,", "too close or too far apart and have greatest y", "False: ori, ter = 'n/a', 'n/a' else: ori, ter =", "x window)') parser.add_argument(\\ '-w', default = 1000, type = int,", "['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)'] #", "genome in fastas: sequence = [] for seq in parse_fasta(genome):", "parser.add_argument(\\ '-w', default = 1000, type = int, \\ help", "'n/a', 'n/a' else: ori, ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s ->", "[abs(i) for i in gmc] # G + C #", "peaks are too close or too far apart, they are", "added this to make sure gets origin and ter right", "if N != 0: skew = [skew[0][0::N], skew[1][0::N]] if ori", "'*', action = 'store', required = True, \\ help =", "\"\"\" select pair of min and max that are not", "(a <= farthest and a >= closest) or (b <=farthest", "and terminus of replication based on # cumulative gc skew", "gc skew and cumulative sum of gc skew over sequence", "sorted(pairs, reverse = True)[0] return [tr[0], pk[0]] def find_ori_ter(c_skew, length):", "base in seq: try: gmc.append(replacements[base]) except: gmc.append(0) # convert to", "- C) and (G + C) weights = np.ones(window)/window gmc", "\"\"\" plot with differnt y axes title = title for", "weights = np.ones(window)/window gmc = [[i, c] for i, c", "calculate gc skew and cumulative sum of gc skew over", "'g'] a_colors = cycle(colors) b_colors = cycle(colors[::-1]) a_label = cycle(legend[0])", "A] + [min(i[0]) for i in B]) xmax = max([max(i[0])", "= max([max(i[0]) for i in A] + [max(i[0]) for i", "0.0\\ ) # save pdf = PdfPages('%s.pdf' % title.replace(' ',", "legend, vert = False): \"\"\" plot with differnt y axes", "far apart and have greatest y distance between one another", "{'G':1, 'C':-1, 'A':0, 'T':0, 'N':0} gmc = [] # G", "sequences into single genome') parser.add_argument(\\ '--no-plot', action = 'store_false', \\", "sequence windows gc skew = ((G - C) / (G", "use as mplUse mplUse('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf", "len(c_skew_min) == 0: return [False, False] else: c_skew_min = [[c_skew[0][i],", "type = int, \\ help = 'minimum contig length (default", "parsing fastas if single is True, combine sequences in multifasta", "8) ax2.plot(x, y, c = next(b_colors), linewidth = 2, label", "y for gc skew cummulative sums cs = 0 #", "from ctbBio.fasta import iterate_fasta as parse_fasta def plot_two(title, subtitle, A,", "<= farthest and a >= closest) or (b <=farthest and", "ter, skew, c_skew def parse_genomes(fastas, single): \"\"\" generator for parsing", "[] for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence),", "m in gmc[0::slide]: p = gpc[i][1] if p == 0:", "= {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0} gmc = [] #", "colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g']", "= int(len(skew[0])/1000) if N != 0: skew = [skew[0][0::N], skew[1][0::N]]", "cs = 0 # cummulative sum # select windows to", "if (a <= farthest and a >= closest) or (b", "length, seq, window, slide, plot_skew): \"\"\" calculate gc skew and", "ax2.plot(x, y, c = next(b_colors), linewidth = 2, label =", "* genome length \"\"\" # convert to G - C", "(default = 1000)') parser.add_argument(\\ '-s', default = 10, type =", "label = next(b_label)) xmin = min([min(i[1]) for i in A]", "closest) or (b <=farthest and b >= closest): pairs.append([pt, tr,", "iterate_fasta as parse_fasta def plot_two(title, subtitle, A, B, labels, legend,", "= 4, label = next(a_label)) # add vertical lines if", "i in A] + [max(i[0]) for i in B]) ax2.set_xlim(xmin,", "argparse import numpy as np from scipy import signal from", "= 8) ax2.plot(x, y, c = next(b_colors), linewidth = 2,", "seq, window, slide, plot_skew): \"\"\" calculate gc skew and cumulative", "replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0} gmc = []", "calculate sliding windows for (G - C) and (G +", "C) and (G + C) weights = np.ones(window)/window gmc =", "[labels[1]]]) else: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0], 'Ori:%s'", "Ori and Ter of replication') parser.add_argument(\\ '-f', nargs = '*',", "plot_two(title, subtitle, A, B, labels, legend, vert = False): \"\"\"", "\\ framealpha = 0.0\\ ) # save pdf = PdfPages('%s.pdf'", "labels, \\ [[labels[0]], [labels[1]]]) else: plot_two(title, subtitle, [skew], [c_skew], labels,", "== '__main__': parser = argparse.ArgumentParser(description = \\ '# calculate gc", "label, x label] legend = [[left legend], [right legend]] \"\"\"", "axis [[x], [y]] B = data for right axis lables", "b_label = cycle(legend[1]) # plot left axis and x -", "cumulative GC Skew \"\"\" # find origin and terminus of", "parser.add_argument(\\ '--no-plot', action = 'store_false', \\ help = 'do not", "import iterate_fasta as parse_fasta def plot_two(title, subtitle, A, B, labels,", "subtitle, A, B, labels, legend, vert = False): \"\"\" plot", "from matplotlib import use as mplUse mplUse('Agg') import matplotlib.pyplot as", "title for chart A = data for left axis [[x],", "slide for i, m in gmc[0::slide]: p = gpc[i][1] if", "if len(c_skew_min) == 0 or len(c_skew_min) == 0: return [False,", "min_len = args['l'] if min_len is False: min_len = 10", "c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew, length) # plot data if", "mplUse('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype']", "import use as mplUse mplUse('Agg') import matplotlib.pyplot as plt from", "plotting modules from matplotlib import use as mplUse mplUse('Agg') import", "in parse_genomes(fastas, single): if length < min_len: print('%s: Too Short'", "pair of min and max that are not too close", "1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist() # return", "c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist() # return False", "min([min(i[1]) for i in A] + [min(i[0]) for i in", "C gpc = [abs(i) for i in gmc] # G", "required = True, \\ help = 'fasta(s)') parser.add_argument(\\ '-l', default", "help = 'window length (default = 1000)') parser.add_argument(\\ '-s', default", "- axis for a in A: x, y = a", "distance between one another \"\"\" # if ori/ter peaks are", "close or too far apart and have greatest y distance", "(tr[0] - pk[0]) % length b = (pk[0] - tr[0])", "for i in c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i", "vertical lines if vert is not False: for i in", "labelpad = 8) ax2.plot(x, y, c = next(b_colors), linewidth =", "in list, use stdin if first item in list is", "for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence)", "apart and have greatest y distance between one another \"\"\"", "seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\" open", "fastas = open_files(args['f']) single, plot_skew = args['single'], args['no_plot'] window, slide", "that are not too close or too far apart and", "\"\"\" fig, ax1 = plt.subplots() colors = ['0.75', 'b', 'r',", "+ [min(i[0]) for i in B]) xmax = max([max(i[0]) for", ">= closest) or (b <=farthest and b >= closest): pairs.append([pt,", "%s, slide = %s)' % (window, slide) labels = ['GC", "and (G + C) weights = np.ones(window)/window gmc = [[i,", "< min_len: print('%s: Too Short' % (name), file=sys.stderr) continue ori,", "too far apart and have greatest y distance between one", "= 2) # plot right axis ax2 = ax1.twinx() for", "+ C # calculate sliding windows for (G - C)", "= (pk[0] - tr[0]) % length pt = abs(tr[1] -", "in list is '-' \"\"\" if files is None: return", "for i in A] + [min(i[0]) for i in B])", "return ori, ter, skew, c_skew def parse_genomes(fastas, single): \"\"\" generator", "fontsize = 10) # legend ax1.legend(loc = 'upper left', \\", "= PdfPages('%s.pdf' % title.replace(' ', '_')) pdf.savefig(bbox_inches = 'tight') plt.close()", "* window for name, length, seq in parse_genomes(fastas, single): if", "enumerate(skew[0]): out = [name, pos, skew[1][i], c_skew[1][i]] print('\\t'.join([str(i) for i", "cycle(colors) b_colors = cycle(colors[::-1]) a_label = cycle(legend[0]) b_label = cycle(legend[1])", "Skew'])) for i, pos in enumerate(skew[0]): out = [name, pos,", "to stdout') args = vars(parser.parse_args()) fastas = open_files(args['f']) single, plot_skew", "plot right axis ax2 = ax1.twinx() for b in B:", "= 'combine multi-fasta sequences into single genome') parser.add_argument(\\ '--no-plot', action", "and b >= closest): pairs.append([pt, tr, pk]) if len(pairs) ==", "pt = abs(tr[1] - pk[1]) # distance between values if", "help = 'slide length (default = 10)') parser.add_argument(\\ '--single', action", "axis lables = [left label, right label, x label] legend", "ter), file=sys.stderr) if plot_skew is False: print('\\t'.join(['# Name', 'Position', 'GC", "default = False, type = int, \\ help = 'minimum", "left axis [[x], [y]] B = data for right axis", "sequence) else: for genome in fastas: for seq in parse_fasta(genome):", "probably wrong closest, farthest = int(length * float(0.45)), int(length *", "differnt y axes title = title for chart A =", "[skew[0][0::N], skew[1][0::N]] if ori is False: plot_two(title, subtitle, [skew], [c_skew],", "gmc[0::slide]: p = gpc[i][1] if p == 0: gcs =", "sequence = [] for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.',", "0 # cummulative sum # select windows to use based", "and ter right tr, pk = sorted(list(pair), key = lambda", "= 0.0 ) plt.legend(loc = 'upper right', \\ bbox_to_anchor=(0.45, -0.125),", "# plotting modules from matplotlib import use as mplUse mplUse('Agg')", "'o', ms = 4, label = next(a_label)) # add vertical", "= 16) plt.title(subtitle, fontsize = 10) # legend ax1.legend(loc =", "between one another \"\"\" # if ori/ter peaks are too", "[[c_skew[0][i], c_skew[1][i]] for i in c_skew_max] ori, ter = check_peaks([c_skew_min,", "in vert: x, c = i ax1.axvline(x = x, c", "= '%s GC Skew' % (name) subtitle = '(window =", "is True, combine sequences in multifasta file \"\"\" if single", "genome in fastas: for seq in parse_fasta(genome): ID = seq[0].split('>',", "'{:,}'.format(ter) print('%s -> Origin: %s Terminus: %s' \\ % (name,", "c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc =", "or too far apart, they are probably wrong closest, farthest", "window, slide, plot_skew): \"\"\" calculate gc skew and cumulative sum", "# legend ax1.legend(loc = 'upper left', \\ bbox_to_anchor=(0.55, -0.125), \\", "pk[0]] def find_ori_ter(c_skew, length): \"\"\" find origin and terminus of", "terminus of replication based on cumulative GC Skew \"\"\" #", "p == 0: gcs = 0 else: gcs = m/p", "xmax) # title plt.suptitle(title, fontsize = 16) plt.title(subtitle, fontsize =", "= [left label, right label, x label] legend = [[left", "A] + [max(i[0]) for i in B]) ax2.set_xlim(xmin, xmax) #", "[tr[0], pk[0]] def find_ori_ter(c_skew, length): \"\"\" find origin and terminus", "= gc_skew(name, length, seq, window, slide, plot_skew) if ori ==", "= ['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)']", "parser = argparse.ArgumentParser(description = \\ '# calculate gc skew and", "scipy import signal from itertools import cycle, product # plotting", "sum # select windows to use based on slide for", "a ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c =", "another \"\"\" # if ori/ter peaks are too close or", "+ C)) * window size * genome length \"\"\" #", "['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g'] a_colors =", "= \\ '# calculate gc skew and find Ori and", "10, type = int, \\ help = 'slide length (default", "save pdf = PdfPages('%s.pdf' % title.replace(' ', '_')) pdf.savefig(bbox_inches =", "\\ prop = {'size':8}, \\ framealpha = 0.0 ) plt.legend(loc", "\"\"\" # if ori/ter peaks are too close or too", "np from scipy import signal from itertools import cycle, product", "\\ prop = {'size':8}, \\ framealpha = 0.0\\ ) #", "terminus of replication based on # cumulative gc skew min", "for left axis [[x], [y]] B = data for right", "[False, False] else: c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in", "pairs = [] for pair in list(product(*peaks)): ### added this", "# cumulative gc skew min and max peaks c_skew_min =", "action = 'store_true', \\ help = 'combine multi-fasta sequences into", ") plt.legend(loc = 'upper right', \\ bbox_to_anchor=(0.45, -0.125), \\ prop", "\"\"\" # find origin and terminus of replication based on", "False): \"\"\" plot with differnt y axes title = title", "'A':0, 'T':0, 'N':0} gmc = [] # G - C", "args['single'], args['no_plot'] window, slide = args['w'], args['s'] min_len = args['l']", "i, m in gmc[0::slide]: p = gpc[i][1] if p ==", "pk[1]) # distance between values if (a <= farthest and", "yield (ID, len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\" open files in", "in B: x, y = b ax2.set_ylabel(labels[1], labelpad = 8)", "c = i ax1.axvline(x = x, c = c, label", "PdfPages plt.rcParams['pdf.fonttype'] = 42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) #", "= args['w'], args['s'] min_len = args['l'] if min_len is False:", "<=farthest and b >= closest): pairs.append([pt, tr, pk]) if len(pairs)", "origin and terminus of replication based on cumulative GC Skew", "if first item in list is '-' \"\"\" if files", "return False if no peaks were detected if len(c_skew_min) ==", "[c_skew], labels, \\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s' %", "and have greatest y distance between one another \"\"\" #", "= args['l'] if min_len is False: min_len = 10 *", "genome') parser.add_argument(\\ '--no-plot', action = 'store_false', \\ help = 'do", "= b ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x, y, c =", "plot_skew = args['single'], args['no_plot'] window, slide = args['w'], args['s'] min_len", "None: return files if files[0] == '-': return (sys.stdin) return", "data for left axis [[x], [y]] B = data for", "gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew, length)", "= ((G - C) / (G + C)) * window", "False] pt, tr, pk = sorted(pairs, reverse = True)[0] return", "left', \\ bbox_to_anchor=(0.55, -0.125), \\ prop = {'size':8}, \\ framealpha", "import PdfPages plt.rcParams['pdf.fonttype'] = 42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})", "ax1.legend(loc = 'upper left', \\ bbox_to_anchor=(0.55, -0.125), \\ prop =", "C) weights = np.ones(window)/window gmc = [[i, c] for i,", "gc skew <NAME> <EMAIL> \"\"\" # python modules import os", "x, y = b ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x, y,", "return [tr[0], pk[0]] def find_ori_ter(c_skew, length): \"\"\" find origin and", "in gmc] # G + C # calculate sliding windows", "for parsing fastas if single is True, combine sequences in", "c = next(b_colors), linewidth = 2, label = next(b_label)) xmin", "c_skew def parse_genomes(fastas, single): \"\"\" generator for parsing fastas if", "= 'store_true', \\ help = 'combine multi-fasta sequences into single", "print('%s: Too Short' % (name), file=sys.stderr) continue ori, ter, skew,", "labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c = next(a_colors), marker", "window size * genome length \"\"\" # convert to G", "# plot left axis and x - axis for a", "windows for (G - C) and (G + C) weights", "pdf.close() def check_peaks(peaks, length): \"\"\" select pair of min and", "origin and ter right tr, pk = sorted(list(pair), key =", "and cummulative gc skew sum skew = [[], []] #", "Name', 'Position', 'GC Skew', 'Cumulative GC Skew'])) for i, pos", "= args['single'], args['no_plot'] window, slide = args['w'], args['s'] min_len =", "vert = [(ori, 'r'), (ter, 'b')]) return ori, ter, skew,", "B, labels, legend, vert = False): \"\"\" plot with differnt", "as parse_fasta def plot_two(title, subtitle, A, B, labels, legend, vert", "parser.add_argument(\\ '-s', default = 10, type = int, \\ help", "C)) * window size * genome length \"\"\" # convert", "False: min_len = 10 * window for name, length, seq", "return (sys.stdin) return (open(i) for i in files) if __name__", "# if ori/ter peaks are too close or too far", "and a >= closest) or (b <=farthest and b >=", "plot data if plot_skew is True: title = '%s GC", "to make sure gets origin and ter right tr, pk", "# plot data if plot_skew is True: title = '%s", "'Cumulative GC Skew'])) for i, pos in enumerate(skew[0]): out =", ">= closest): pairs.append([pt, tr, pk]) if len(pairs) == 0: return", "script for calculating gc skew <NAME> <EMAIL> \"\"\" # python", "= int(length * float(0.45)), int(length * float(0.55)) pairs = []", "parse_genomes(fastas, single): if length < min_len: print('%s: Too Short' %", "42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta", "ctbBio.fasta import iterate_fasta as parse_fasta def plot_two(title, subtitle, A, B,", "x, c = c, label = next(a_label), linewidth = 2)", "y, c = next(b_colors), linewidth = 2, label = next(b_label))", "Skew \"\"\" # find origin and terminus of replication based", "length) return ori, ter def gc_skew(name, length, seq, window, slide,", "sum skew = [[], []] # x and y for", "window for name, length, seq in parse_genomes(fastas, single): if length", "+ [max(i[0]) for i in B]) ax2.set_xlim(xmin, xmax) # title", "i in gmc] # G + C # calculate sliding", "skew[1].append(gcs) c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew, length) # plot data", "calculate gc skew and cummulative gc skew sum skew =", "- C for base in seq: try: gmc.append(replacements[base]) except: gmc.append(0)", "in list(product(*peaks)): ### added this to make sure gets origin", "add vertical lines if vert is not False: for i", "# return False if no peaks were detected if len(c_skew_min)", "and peak a = (tr[0] - pk[0]) % length b", "%s)' % (window, slide) labels = ['GC Skew', 'Cumulative GC", "= 'minimum contig length (default = 10 x window)') parser.add_argument(\\", "(G - C) and (G + C) weights = np.ones(window)/window", "'%s GC Skew' % (name) subtitle = '(window = %s,", "are too close or too far apart, they are probably", "= 1000)') parser.add_argument(\\ '-s', default = 10, type = int,", "next(a_label), linewidth = 2) # plot right axis ax2 =", "x, c = i ax1.axvline(x = x, c = c,", "Terminus: %s' \\ % (name, ori, ter), file=sys.stderr) if plot_skew", "x label] legend = [[left legend], [right legend]] \"\"\" fig,", "'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\ vert", "is False: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0]], [labels[1]]])", "-0.125), \\ prop = {'size':8}, \\ framealpha = 0.0 )", "= False, type = int, \\ help = 'minimum contig", "False if no peaks were detected if len(c_skew_min) == 0", "[[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] #", "= argparse.ArgumentParser(description = \\ '# calculate gc skew and find", "Ter of replication') parser.add_argument(\\ '-f', nargs = '*', action =", "= 42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from", "A = data for left axis [[x], [y]] B =", "into single genome') parser.add_argument(\\ '--no-plot', action = 'store_false', \\ help", "B = data for right axis lables = [left label,", "parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper())) def", "[min(i[0]) for i in B]) xmax = max([max(i[0]) for i", "make sure gets origin and ter right tr, pk =", "for chart A = data for left axis [[x], [y]]", "* float(0.45)), int(length * float(0.55)) pairs = [] for pair", "= ax1.twinx() for b in B: x, y = b", "pk = sorted(pairs, reverse = True)[0] return [tr[0], pk[0]] def", "GC Skew', 'Position on Genome (bp)'] # remove some points", "gmc = [] # G - C for base in", "'# calculate gc skew and find Ori and Ter of", "b ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x, y, c = next(b_colors),", "slide) labels = ['GC Skew', 'Cumulative GC Skew', 'Position on", "True, \\ help = 'fasta(s)') parser.add_argument(\\ '-l', default = False,", "plot_skew) if ori == False: ori, ter = 'n/a', 'n/a'", "data for right axis lables = [left label, right label,", "% length pt = abs(tr[1] - pk[1]) # distance between", "on Genome (bp)'] # remove some points for plotting (approx.", "plot_skew is True: title = '%s GC Skew' % (name)", "for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate gc", "of replication based on cumulative GC Skew \"\"\" # find", "= False) # trough and peak a = (tr[0] -", "\\ bbox_to_anchor=(0.55, -0.125), \\ prop = {'size':8}, \\ framealpha =", "# remove some points for plotting (approx. 1,000 datapoints) N", "in parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper()))", "for b in B: x, y = b ax2.set_ylabel(labels[1], labelpad", "open files in list, use stdin if first item in", "[[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc", "based on cumulative GC Skew \"\"\" # find origin and", "detected if len(c_skew_min) == 0 or len(c_skew_min) == 0: return", "rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta import iterate_fasta as parse_fasta", "for gc skew cummulative sums cs = 0 # cummulative", "= 'tight') plt.close() pdf.close() def check_peaks(peaks, length): \"\"\" select pair", "and max that are not too close or too far", "label = next(a_label)) # add vertical lines if vert is", "('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\ vert = [(ori,", "\"\"\" script for calculating gc skew <NAME> <EMAIL> \"\"\" #", "file=sys.stderr) continue ori, ter, skew, c_skew = gc_skew(name, length, seq,", "min_len = 10 * window for name, length, seq in", "action = 'store_false', \\ help = 'do not generate plots,", "contig length (default = 10 x window)') parser.add_argument(\\ '-w', default", "skew[1][0::N]] if ori is False: plot_two(title, subtitle, [skew], [c_skew], labels,", "= signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist() # return False if", "of replication') parser.add_argument(\\ '-f', nargs = '*', action = 'store',", "'-w', default = 1000, type = int, \\ help =", "multi-fasta sequences into single genome') parser.add_argument(\\ '--no-plot', action = 'store_false',", "and x - axis for a in A: x, y", "G + C # calculate sliding windows for (G -", "find_ori_ter(c_skew, length): \"\"\" find origin and terminus of replication based", "= next(a_colors), marker = 'o', ms = 4, label =", "marker = 'o', ms = 4, label = next(a_label)) #", "c_skew = [[], []] # x and y for gc", "= 10 x window)') parser.add_argument(\\ '-w', default = 1000, type", "\\ help = 'do not generate plots, print GC Skew", "def find_ori_ter(c_skew, length): \"\"\" find origin and terminus of replication", "are probably wrong closest, farthest = int(length * float(0.45)), int(length", "skew = ((G - C) / (G + C)) *", "(default = 10)') parser.add_argument(\\ '--single', action = 'store_true', \\ help", "ax1 = plt.subplots() colors = ['0.75', 'b', 'r', 'c', 'y',", "a_label = cycle(legend[0]) b_label = cycle(legend[1]) # plot left axis", "y = b ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x, y, c", "of gc skew over sequence windows gc skew = ((G", "Origin: %s Terminus: %s' \\ % (name, ori, ter), file=sys.stderr)", "axis and x - axis for a in A: x,", "x[1], reverse = False) # trough and peak a =", "y = a ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y,", "in B]) xmax = max([max(i[0]) for i in A] +", "= sorted(pairs, reverse = True)[0] return [tr[0], pk[0]] def find_ori_ter(c_skew,", "C for base in seq: try: gmc.append(replacements[base]) except: gmc.append(0) #", "'b')]) return ori, ter, skew, c_skew def parse_genomes(fastas, single): \"\"\"", "np.less, order = 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order =", "distance between values if (a <= farthest and a >=", "\"\"\" if single is True: for genome in fastas: sequence", "if files[0] == '-': return (sys.stdin) return (open(i) for i", "= %s, slide = %s)' % (window, slide) labels =", "'store_true', \\ help = 'combine multi-fasta sequences into single genome')", "[[], []] # x and y for gc skew cummulative", "title = title for chart A = data for left", "'same').tolist())] gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc,", "as plt from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] = 42 from", "% (name) subtitle = '(window = %s, slide = %s)'", "[[labels[0]], [labels[1]]]) else: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0],", "import sys import argparse import numpy as np from scipy", "length) # plot data if plot_skew is True: title =", "framealpha = 0.0\\ ) # save pdf = PdfPages('%s.pdf' %", "python3 \"\"\" script for calculating gc skew <NAME> <EMAIL> \"\"\"", "if no peaks were detected if len(c_skew_min) == 0 or", "yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence) else: for genome in fastas:", "0: return [False, False] else: c_skew_min = [[c_skew[0][i], c_skew[1][i]] for", "x, y = a ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x,", "if single is True: for genome in fastas: sequence =", "length, seq in parse_genomes(fastas, single): if length < min_len: print('%s:", "reverse = False) # trough and peak a = (tr[0]", "closest): pairs.append([pt, tr, pk]) if len(pairs) == 0: return [False,", "for i in c_skew_max] ori, ter = check_peaks([c_skew_min, c_skew_max], length)", "if vert is not False: for i in vert: x,", "+ C) weights = np.ones(window)/window gmc = [[i, c] for", "(window, slide) labels = ['GC Skew', 'Cumulative GC Skew', 'Position", "bbox_to_anchor=(0.45, -0.125), \\ prop = {'size':8}, \\ framealpha = 0.0\\", "product # plotting modules from matplotlib import use as mplUse", "sys import argparse import numpy as np from scipy import", "= True)[0] return [tr[0], pk[0]] def find_ori_ter(c_skew, length): \"\"\" find", "type = int, \\ help = 'slide length (default =", "pos in enumerate(skew[0]): out = [name, pos, skew[1][i], c_skew[1][i]] print('\\t'.join([str(i)", "i, pos in enumerate(skew[0]): out = [name, pos, skew[1][i], c_skew[1][i]]", "sure gets origin and ter right tr, pk = sorted(list(pair),", "vert is not False: for i in vert: x, c", "= vars(parser.parse_args()) fastas = open_files(args['f']) single, plot_skew = args['single'], args['no_plot']", "Short' % (name), file=sys.stderr) continue ori, ter, skew, c_skew =", "ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin: %s Terminus: %s'", "length (default = 1000)') parser.add_argument(\\ '-s', default = 10, type", "= (tr[0] - pk[0]) % length b = (pk[0] -", "files) if __name__ == '__main__': parser = argparse.ArgumentParser(description = \\", "for gc skew c_skew = [[], []] # x and", "right', \\ bbox_to_anchor=(0.45, -0.125), \\ prop = {'size':8}, \\ framealpha", "= 'slide length (default = 10)') parser.add_argument(\\ '--single', action =", "'Position', 'GC Skew', 'Cumulative GC Skew'])) for i, pos in", "convert to G + C gpc = [abs(i) for i", "on cumulative GC Skew \"\"\" # find origin and terminus", "1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\" open files", "args = vars(parser.parse_args()) fastas = open_files(args['f']) single, plot_skew = args['single'],", "gc_skew(name, length, seq, window, slide, plot_skew): \"\"\" calculate gc skew", "peak a = (tr[0] - pk[0]) % length b =", "if plot_skew is False: print('\\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative", "fastas: sequence = [] for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield", "# cummulative sum # select windows to use based on", "= sorted(list(pair), key = lambda x: x[1], reverse = False)", "# convert to G - C replacements = {'G':1, 'C':-1,", "is True: title = '%s GC Skew' % (name) subtitle", "argparse.ArgumentParser(description = \\ '# calculate gc skew and find Ori", "subtitle = '(window = %s, slide = %s)' % (window,", "== 0: return [False, False] pt, tr, pk = sorted(pairs,", "\"\"\" # python modules import os import sys import argparse", "enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc = [[i, c] for i, c", "of replication based on # cumulative gc skew min and", "True: for genome in fastas: sequence = [] for seq", "or (b <=farthest and b >= closest): pairs.append([pt, tr, pk])", "ori, ter, skew, c_skew def parse_genomes(fastas, single): \"\"\" generator for", "right tr, pk = sorted(list(pair), key = lambda x: x[1],", "skew, c_skew def parse_genomes(fastas, single): \"\"\" generator for parsing fastas", "length < min_len: print('%s: Too Short' % (name), file=sys.stderr) continue", "(name, ori, ter), file=sys.stderr) if plot_skew is False: print('\\t'.join(['# Name',", "gc skew over sequence windows gc skew = ((G -", "(G + C) weights = np.ones(window)/window gmc = [[i, c]", "plt.close() pdf.close() def check_peaks(peaks, length): \"\"\" select pair of min", "(pk[0] - tr[0]) % length pt = abs(tr[1] - pk[1])", "[] # G - C for base in seq: try:", "gc skew sum skew = [[], []] # x and", "slide = %s)' % (window, slide) labels = ['GC Skew',", "python modules import os import sys import argparse import numpy", "help = 'combine multi-fasta sequences into single genome') parser.add_argument(\\ '--no-plot',", "seq in parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]),", "right axis ax2 = ax1.twinx() for b in B: x,", "False: print('\\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew'])) for", "args['w'], args['s'] min_len = args['l'] if min_len is False: min_len", "= 10) # legend ax1.legend(loc = 'upper left', \\ bbox_to_anchor=(0.55,", "'__main__': parser = argparse.ArgumentParser(description = \\ '# calculate gc skew", "and cumulative sum of gc skew over sequence windows gc", "in multifasta file \"\"\" if single is True: for genome", "chart A = data for left axis [[x], [y]] B", "'k', 'g'] a_colors = cycle(colors) b_colors = cycle(colors[::-1]) a_label =", "\\ bbox_to_anchor=(0.45, -0.125), \\ prop = {'size':8}, \\ framealpha =", "find_ori_ter(c_skew, length) # plot data if plot_skew is True: title", "files[0] == '-': return (sys.stdin) return (open(i) for i in", "i ax1.axvline(x = x, c = c, label = next(a_label),", "if ori/ter peaks are too close or too far apart,", "ter = check_peaks([c_skew_min, c_skew_max], length) return ori, ter def gc_skew(name,", "= [[], []] # x and y for gc skew", "if length < min_len: print('%s: Too Short' % (name), file=sys.stderr)", "= x, c = c, label = next(a_label), linewidth =", "c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]),", "skew over sequence windows gc skew = ((G - C)", "= plt.subplots() colors = ['0.75', 'b', 'r', 'c', 'y', 'm',", "in A] + [max(i[0]) for i in B]) ax2.set_xlim(xmin, xmax)", "[[c_skew[0][i], c_skew[1][i]] for i in c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]]", "= np.ones(window)/window gmc = [[i, c] for i, c in", "over sequence windows gc skew = ((G - C) /", "for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc = [[i,", "parser.add_argument(\\ '-f', nargs = '*', action = 'store', required =", "'store', required = True, \\ help = 'fasta(s)') parser.add_argument(\\ '-l',", "right axis lables = [left label, right label, x label]", "'_')) pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close() def check_peaks(peaks, length): \"\"\"", "plot_skew is False: print('\\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC", "= cycle(legend[0]) b_label = cycle(legend[1]) # plot left axis and", "xmin = min([min(i[1]) for i in A] + [min(i[0]) for", "{'size':8}, \\ framealpha = 0.0\\ ) # save pdf =", "single, plot_skew = args['single'], args['no_plot'] window, slide = args['w'], args['s']", "'-' \"\"\" if files is None: return files if files[0]", "ter def gc_skew(name, length, seq, window, slide, plot_skew): \"\"\" calculate", "= 'o', ms = 4, label = next(a_label)) # add", "'r'), (ter, 'b')]) return ori, ter, skew, c_skew def parse_genomes(fastas,", "pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close() def check_peaks(peaks, length): \"\"\" select", "p = gpc[i][1] if p == 0: gcs = 0", "True: title = '%s GC Skew' % (name) subtitle =", "a in A: x, y = a ax1.set_ylabel(labels[0], labelpad =", "os import sys import argparse import numpy as np from", "replication based on # cumulative gc skew min and max", "files in list, use stdin if first item in list", "type = int, \\ help = 'window length (default =", "for i in B]) ax2.set_xlim(xmin, xmax) # title plt.suptitle(title, fontsize", "min and max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order =", "min_len: print('%s: Too Short' % (name), file=sys.stderr) continue ori, ter,", "tr, pk = sorted(list(pair), key = lambda x: x[1], reverse", "abs(tr[1] - pk[1]) # distance between values if (a <=", "in seq: try: gmc.append(replacements[base]) except: gmc.append(0) # convert to G", "min and max that are not too close or too", "True)[0] return [tr[0], pk[0]] def find_ori_ter(c_skew, length): \"\"\" find origin", "[[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\", "# ctb from ctbBio.fasta import iterate_fasta as parse_fasta def plot_two(title,", "skew min and max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order", "generate plots, print GC Skew to stdout') args = vars(parser.parse_args())", "ori, ter = 'n/a', 'n/a' else: ori, ter = '{:,}'.format(ori),", "False] else: c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min]", "== 0: gcs = 0 else: gcs = m/p cs", "i in B]) ax2.set_xlim(xmin, xmax) # title plt.suptitle(title, fontsize =", "cumulative gc skew min and max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]),", "10 * window for name, length, seq in parse_genomes(fastas, single):", "ax1.plot(x, y, c = next(a_colors), marker = 'o', ms =", "matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] = 42 from matplotlib import rc", "legend]] \"\"\" fig, ax1 = plt.subplots() colors = ['0.75', 'b',", "'store_false', \\ help = 'do not generate plots, print GC", "plt.rcParams['pdf.fonttype'] = 42 from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb", "= 'fasta(s)') parser.add_argument(\\ '-l', default = False, type = int,", "for i, m in gmc[0::slide]: p = gpc[i][1] if p", "m/p cs += gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter", "GC Skew'])) for i, pos in enumerate(skew[0]): out = [name,", "lables = [left label, right label, x label] legend =", "values if (a <= farthest and a >= closest) or", "if p == 0: gcs = 0 else: gcs =", "ter = find_ori_ter(c_skew, length) # plot data if plot_skew is", "\\ help = 'fasta(s)') parser.add_argument(\\ '-l', default = False, type", "stdout') args = vars(parser.parse_args()) fastas = open_files(args['f']) single, plot_skew =", "\"\"\" find origin and terminus of replication based on cumulative", "default = 10, type = int, \\ help = 'slide", "find origin and terminus of replication based on cumulative GC", "= '(window = %s, slide = %s)' % (window, slide)", "plt.legend(loc = 'upper right', \\ bbox_to_anchor=(0.45, -0.125), \\ prop =", "int, \\ help = 'minimum contig length (default = 10", "= 10 * window for name, length, seq in parse_genomes(fastas,", "'minimum contig length (default = 10 x window)') parser.add_argument(\\ '-w',", "is True: for genome in fastas: sequence = [] for", "pk]) if len(pairs) == 0: return [False, False] pt, tr,", "name, length, seq in parse_genomes(fastas, single): if length < min_len:", "is not False: for i in vert: x, c =", "# add vertical lines if vert is not False: for", "else: c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min] c_skew_max", "%s' \\ % (name, ori, ter), file=sys.stderr) if plot_skew is", "np.greater, order = 1)[0].tolist() # return False if no peaks", "lines if vert is not False: for i in vert:", "cummulative sum # select windows to use based on slide", "= min([min(i[1]) for i in A] + [min(i[0]) for i", "c_skew = gc_skew(name, length, seq, window, slide, plot_skew) if ori", "bbox_to_anchor=(0.55, -0.125), \\ prop = {'size':8}, \\ framealpha = 0.0", "axis ax2 = ax1.twinx() for b in B: x, y", "gcs = m/p cs += gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs)", "if ori is False: plot_two(title, subtitle, [skew], [c_skew], labels, \\", "% (name), file=sys.stderr) continue ori, ter, skew, c_skew = gc_skew(name,", "import cycle, product # plotting modules from matplotlib import use", "<EMAIL> \"\"\" # python modules import os import sys import", "key = lambda x: x[1], reverse = False) # trough", "gmc] # G + C # calculate sliding windows for", "single): if length < min_len: print('%s: Too Short' % (name),", "too far apart, they are probably wrong closest, farthest =", "= 'window length (default = 1000)') parser.add_argument(\\ '-s', default =", "import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta import iterate_fasta as", "[max(i[0]) for i in B]) ax2.set_xlim(xmin, xmax) # title plt.suptitle(title,", "# find origin and terminus of replication based on #", "# calculate sliding windows for (G - C) and (G", "'r', 'c', 'y', 'm', 'k', 'g'] a_colors = cycle(colors) b_colors", "'(window = %s, slide = %s)' % (window, slide) labels", "rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta import iterate_fasta as parse_fasta def", "'slide length (default = 10)') parser.add_argument(\\ '--single', action = 'store_true',", "ori, ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin: %s Terminus:", "Skew' % (name) subtitle = '(window = %s, slide =", "i in c_skew_max] ori, ter = check_peaks([c_skew_min, c_skew_max], length) return", "[]] # x and y for gc skew cummulative sums", "'Position on Genome (bp)'] # remove some points for plotting", "else: for genome in fastas: for seq in parse_fasta(genome): ID", "'-s', default = 10, type = int, \\ help =", "(ID, len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\" open files in list,", "('{:,}'.format(ter))], [labels[1]]], \\ vert = [(ori, 'r'), (ter, 'b')]) return", "# plot right axis ax2 = ax1.twinx() for b in", "multifasta file \"\"\" if single is True: for genome in", "gc_skew(name, length, seq, window, slide, plot_skew) if ori == False:", "length): \"\"\" select pair of min and max that are", "ax1.axvline(x = x, c = c, label = next(a_label), linewidth", "ori/ter peaks are too close or too far apart, they", "'y', 'm', 'k', 'g'] a_colors = cycle(colors) b_colors = cycle(colors[::-1])", "0: skew = [skew[0][0::N], skew[1][0::N]] if ori is False: plot_two(title,", "item in list is '-' \"\"\" if files is None:", "= next(b_label)) xmin = min([min(i[1]) for i in A] +", "help = 'fasta(s)') parser.add_argument(\\ '-l', default = False, type =", "a_colors = cycle(colors) b_colors = cycle(colors[::-1]) a_label = cycle(legend[0]) b_label", "skew and find Ori and Ter of replication') parser.add_argument(\\ '-f',", "int, \\ help = 'window length (default = 1000)') parser.add_argument(\\", "if min_len is False: min_len = 10 * window for", "[right legend]] \"\"\" fig, ax1 = plt.subplots() colors = ['0.75',", "[c_skew], labels, \\ [[labels[0]], [labels[1]]]) else: plot_two(title, subtitle, [skew], [c_skew],", "c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max] ori, ter", "if plot_skew is True: title = '%s GC Skew' %", "max([max(i[0]) for i in A] + [max(i[0]) for i in", "seq, window, slide, plot_skew) if ori == False: ori, ter", "from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] = 42 from matplotlib import", "linewidth = 2, label = next(b_label)) xmin = min([min(i[1]) for", "or too far apart and have greatest y distance between", "label = next(a_label), linewidth = 2) # plot right axis", "import argparse import numpy as np from scipy import signal", "c, label = next(a_label), linewidth = 2) # plot right", "for i in gmc] # G + C # calculate", "length): \"\"\" find origin and terminus of replication based on", "replication') parser.add_argument(\\ '-f', nargs = '*', action = 'store', required", "# x and y for gc skew c_skew = [[],", "C # calculate sliding windows for (G - C) and", "\"\"\" if files is None: return files if files[0] ==", "\\ framealpha = 0.0 ) plt.legend(loc = 'upper right', \\", "(b <=farthest and b >= closest): pairs.append([pt, tr, pk]) if", "= False): \"\"\" plot with differnt y axes title =", "select pair of min and max that are not too", "'GC Skew', 'Cumulative GC Skew'])) for i, pos in enumerate(skew[0]):", "c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min] c_skew_max =", "\"\"\" generator for parsing fastas if single is True, combine", "gpc = [abs(i) for i in gmc] # G +", "c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max] ori,", "continue ori, ter, skew, c_skew = gc_skew(name, length, seq, window,", "### added this to make sure gets origin and ter", "i in c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in", "import signal from itertools import cycle, product # plotting modules", "i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc = [[i, c]", "Skew', 'Cumulative GC Skew'])) for i, pos in enumerate(skew[0]): out", "return (open(i) for i in files) if __name__ == '__main__':", "'upper right', \\ bbox_to_anchor=(0.45, -0.125), \\ prop = {'size':8}, \\", "greatest y distance between one another \"\"\" # if ori/ter", "remove some points for plotting (approx. 1,000 datapoints) N =", "= [skew[0][0::N], skew[1][0::N]] if ori is False: plot_two(title, subtitle, [skew],", "title = '%s GC Skew' % (name) subtitle = '(window", "ori, ter, skew, c_skew = gc_skew(name, length, seq, window, slide,", "__name__ == '__main__': parser = argparse.ArgumentParser(description = \\ '# calculate", "= data for left axis [[x], [y]] B = data", "label, right label, x label] legend = [[left legend], [right", "(bp)'] # remove some points for plotting (approx. 1,000 datapoints)", "return files if files[0] == '-': return (sys.stdin) return (open(i)", "= a ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c", "plt.subplots() colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k',", "too close or too far apart, they are probably wrong", "args['s'] min_len = args['l'] if min_len is False: min_len =", "- pk[0]) % length b = (pk[0] - tr[0]) %", "labels = ['GC Skew', 'Cumulative GC Skew', 'Position on Genome", "plotting (approx. 1,000 datapoints) N = int(len(skew[0])/1000) if N !=", "gmc.append(0) # convert to G + C gpc = [abs(i)", "- pk[1]) # distance between values if (a <= farthest", "10 x window)') parser.add_argument(\\ '-w', default = 1000, type =", "fig, ax1 = plt.subplots() colors = ['0.75', 'b', 'r', 'c',", "vert = False): \"\"\" plot with differnt y axes title", "check_peaks(peaks, length): \"\"\" select pair of min and max that", "are not too close or too far apart and have", "'do not generate plots, print GC Skew to stdout') args", "from itertools import cycle, product # plotting modules from matplotlib", "ter right tr, pk = sorted(list(pair), key = lambda x:", "1,000 datapoints) N = int(len(skew[0])/1000) if N != 0: skew", "pk[0]) % length b = (pk[0] - tr[0]) % length", "from scipy import signal from itertools import cycle, product #", "order = 1)[0].tolist() # return False if no peaks were", "in A: x, y = a ax1.set_ylabel(labels[0], labelpad = 3)", "== 0: return [False, False] else: c_skew_min = [[c_skew[0][i], c_skew[1][i]]", "weights, 'same').tolist())] # calculate gc skew and cummulative gc skew", "ori, ter), file=sys.stderr) if plot_skew is False: print('\\t'.join(['# Name', 'Position',", "\\ [[labels[0]], [labels[1]]]) else: plot_two(title, subtitle, [skew], [c_skew], labels, \\", "plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)),", "origin and terminus of replication based on # cumulative gc", "and Ter of replication') parser.add_argument(\\ '-f', nargs = '*', action", "generator for parsing fastas if single is True, combine sequences", "N != 0: skew = [skew[0][0::N], skew[1][0::N]] if ori is", "pdf = PdfPages('%s.pdf' % title.replace(' ', '_')) pdf.savefig(bbox_inches = 'tight')", "def open_files(files): \"\"\" open files in list, use stdin if", "closest, farthest = int(length * float(0.45)), int(length * float(0.55)) pairs", "(sys.stdin) return (open(i) for i in files) if __name__ ==", "= 'store_false', \\ help = 'do not generate plots, print", "signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist() # return False if no", "and y for gc skew cummulative sums cs = 0", "parse_fasta def plot_two(title, subtitle, A, B, labels, legend, vert =", "matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] = 42", "ax2.set_xlim(xmin, xmax) # title plt.suptitle(title, fontsize = 16) plt.title(subtitle, fontsize", "\"\"\" # convert to G - C replacements = {'G':1,", "(approx. 1,000 datapoints) N = int(len(skew[0])/1000) if N != 0:", "# distance between values if (a <= farthest and a", "# python modules import os import sys import argparse import", "open_files(files): \"\"\" open files in list, use stdin if first", "convert to G - C replacements = {'G':1, 'C':-1, 'A':0,", "= [] # G - C for base in seq:", "prop = {'size':8}, \\ framealpha = 0.0\\ ) # save", "next(a_label)) # add vertical lines if vert is not False:", "\\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\ vert = [(ori, 'r'),", "Too Short' % (name), file=sys.stderr) continue ori, ter, skew, c_skew", "x and y for gc skew c_skew = [[], []]", "def parse_genomes(fastas, single): \"\"\" generator for parsing fastas if single", "min_len is False: min_len = 10 * window for name,", "pairs.append([pt, tr, pk]) if len(pairs) == 0: return [False, False]", "* window size * genome length \"\"\" # convert to", "1)[0].tolist() # return False if no peaks were detected if", "in A] + [min(i[0]) for i in B]) xmax =", "= cycle(legend[1]) # plot left axis and x - axis", "matplotlib import use as mplUse mplUse('Agg') import matplotlib.pyplot as plt", "= c, label = next(a_label), linewidth = 2) # plot", "vars(parser.parse_args()) fastas = open_files(args['f']) single, plot_skew = args['single'], args['no_plot'] window,", "= check_peaks([c_skew_min, c_skew_max], length) return ori, ter def gc_skew(name, length,", "= lambda x: x[1], reverse = False) # trough and", "- tr[0]) % length pt = abs(tr[1] - pk[1]) #", "else: gcs = m/p cs += gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs)", "{'size':8}, \\ framealpha = 0.0 ) plt.legend(loc = 'upper right',", "matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta import iterate_fasta", "= int, \\ help = 'slide length (default = 10)')", "c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate gc skew and", "= [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max] ori, ter =", "if files is None: return files if files[0] == '-':", "c_skew_max] ori, ter = check_peaks([c_skew_min, c_skew_max], length) return ori, ter", "select windows to use based on slide for i, m", "in c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max]", "'--no-plot', action = 'store_false', \\ help = 'do not generate", "B: x, y = b ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x,", "they are probably wrong closest, farthest = int(length * float(0.45)),", "10) # legend ax1.legend(loc = 'upper left', \\ bbox_to_anchor=(0.55, -0.125),", "skew sum skew = [[], []] # x and y", "== False: ori, ter = 'n/a', 'n/a' else: ori, ter", "signal from itertools import cycle, product # plotting modules from", "is '-' \"\"\" if files is None: return files if", "(name), file=sys.stderr) continue ori, ter, skew, c_skew = gc_skew(name, length,", "y, c = next(a_colors), marker = 'o', ms = 4,", "datapoints) N = int(len(skew[0])/1000) if N != 0: skew =", "False, type = int, \\ help = 'minimum contig length", "2) # plot right axis ax2 = ax1.twinx() for b", "0: return [False, False] pt, tr, pk = sorted(pairs, reverse", "calculate gc skew and find Ori and Ter of replication')", "ter, skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew)", "gcs = 0 else: gcs = m/p cs += gcs", "False) # trough and peak a = (tr[0] - pk[0])", "or len(c_skew_min) == 0: return [False, False] else: c_skew_min =", "and find Ori and Ter of replication') parser.add_argument(\\ '-f', nargs", "length b = (pk[0] - tr[0]) % length pt =", "= 10)') parser.add_argument(\\ '--single', action = 'store_true', \\ help =", "skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew, length) #", "to use based on slide for i, m in gmc[0::slide]:", "!= 0: skew = [skew[0][0::N], skew[1][0::N]] if ori is False:", "subtitle, [skew], [c_skew], labels, \\ [[labels[0]], [labels[1]]]) else: plot_two(title, subtitle,", "in enumerate(skew[0]): out = [name, pos, skew[1][i], c_skew[1][i]] print('\\t'.join([str(i) for", "float(0.45)), int(length * float(0.55)) pairs = [] for pair in", "plt from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] = 42 from matplotlib", "plt.suptitle(title, fontsize = 16) plt.title(subtitle, fontsize = 10) # legend", "slide, plot_skew): \"\"\" calculate gc skew and cumulative sum of", "gmc.append(replacements[base]) except: gmc.append(0) # convert to G + C gpc", "based on slide for i, m in gmc[0::slide]: p =", "return [False, False] pt, tr, pk = sorted(pairs, reverse =", "ax2 = ax1.twinx() for b in B: x, y =", "PdfPages('%s.pdf' % title.replace(' ', '_')) pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close()", "if __name__ == '__main__': parser = argparse.ArgumentParser(description = \\ '#", "\\ % (name, ori, ter), file=sys.stderr) if plot_skew is False:", "= i ax1.axvline(x = x, c = c, label =", "\\ vert = [(ori, 'r'), (ter, 'b')]) return ori, ter,", "A, B, labels, legend, vert = False): \"\"\" plot with", "cycle(colors[::-1]) a_label = cycle(legend[0]) b_label = cycle(legend[1]) # plot left", "(ter, 'b')]) return ori, ter, skew, c_skew def parse_genomes(fastas, single):", "= seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\"", "0: gcs = 0 else: gcs = m/p cs +=", "for genome in fastas: sequence = [] for seq in", "= [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min] c_skew_max = [[c_skew[0][i],", "for i in vert: x, c = i ax1.axvline(x =", "try: gmc.append(replacements[base]) except: gmc.append(0) # convert to G + C", "= open_files(args['f']) single, plot_skew = args['single'], args['no_plot'] window, slide =", "16) plt.title(subtitle, fontsize = 10) # legend ax1.legend(loc = 'upper", "legend ax1.legend(loc = 'upper left', \\ bbox_to_anchor=(0.55, -0.125), \\ prop", "ori, ter = check_peaks([c_skew_min, c_skew_max], length) return ori, ter def", "= gpc[i][1] if p == 0: gcs = 0 else:", "trough and peak a = (tr[0] - pk[0]) % length", "Genome (bp)'] # remove some points for plotting (approx. 1,000", "ms = 4, label = next(a_label)) # add vertical lines", "* float(0.55)) pairs = [] for pair in list(product(*peaks)): ###", "= [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())]", "help = 'do not generate plots, print GC Skew to", "= data for right axis lables = [left label, right", "= next(a_label), linewidth = 2) # plot right axis ax2", "float(0.55)) pairs = [] for pair in list(product(*peaks)): ### added", "args['no_plot'] window, slide = args['w'], args['s'] min_len = args['l'] if", "length \"\"\" # convert to G - C replacements =", "in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence) else: for", "slide, plot_skew) if ori == False: ori, ter = 'n/a',", "y distance between one another \"\"\" # if ori/ter peaks", "with differnt y axes title = title for chart A", "this to make sure gets origin and ter right tr,", "subtitle, [skew], [c_skew], labels, \\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\", "'{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin: %s Terminus: %s' \\ %", "= 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist() #", "len(c_skew_min) == 0 or len(c_skew_min) == 0: return [False, False]", "ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c = next(a_colors), marker = 'o', ms", "based on # cumulative gc skew min and max peaks", "if single is True, combine sequences in multifasta file \"\"\"", "= [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())]", "not too close or too far apart and have greatest", "GC Skew to stdout') args = vars(parser.parse_args()) fastas = open_files(args['f'])", "slide = args['w'], args['s'] min_len = args['l'] if min_len is", "plot left axis and x - axis for a in", "order = 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist()", "0 or len(c_skew_min) == 0: return [False, False] else: c_skew_min", "for a in A: x, y = a ax1.set_ylabel(labels[0], labelpad", "[] for pair in list(product(*peaks)): ### added this to make", "for base in seq: try: gmc.append(replacements[base]) except: gmc.append(0) # convert", "seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence) else:", "gc skew and find Ori and Ter of replication') parser.add_argument(\\", "args['l'] if min_len is False: min_len = 10 * window", "skew and cumulative sum of gc skew over sequence windows", "GC Skew' % (name) subtitle = '(window = %s, slide", "b = (pk[0] - tr[0]) % length pt = abs(tr[1]", "= 'upper left', \\ bbox_to_anchor=(0.55, -0.125), \\ prop = {'size':8},", "signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order", "size * genome length \"\"\" # convert to G -", "= [(ori, 'r'), (ter, 'b')]) return ori, ter, skew, c_skew", "cummulative gc skew sum skew = [[], []] # x", "\\ help = 'combine multi-fasta sequences into single genome') parser.add_argument(\\", "-> Origin: %s Terminus: %s' \\ % (name, ori, ter),", "of min and max that are not too close or", "gc skew cummulative sums cs = 0 # cummulative sum", "\\ help = 'minimum contig length (default = 10 x", "return [False, False] else: c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i", "G + C gpc = [abs(i) for i in gmc]", "on slide for i, m in gmc[0::slide]: p = gpc[i][1]", "= 0 else: gcs = m/p cs += gcs skew[0].append(i)", "# select windows to use based on slide for i,", "axes title = title for chart A = data for", "for i, pos in enumerate(skew[0]): out = [name, pos, skew[1][i],", "for calculating gc skew <NAME> <EMAIL> \"\"\" # python modules", "C) / (G + C)) * window size * genome", "in fastas: sequence = [] for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper()))", "# save pdf = PdfPages('%s.pdf' % title.replace(' ', '_')) pdf.savefig(bbox_inches", "for i in B]) xmax = max([max(i[0]) for i in", "(default = 10 x window)') parser.add_argument(\\ '-w', default = 1000,", "= ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g'] a_colors", "as mplUse mplUse('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import", "gc skew = ((G - C) / (G + C))", "[]] # x and y for gc skew c_skew =", "points for plotting (approx. 1,000 datapoints) N = int(len(skew[0])/1000) if", "sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence) else: for genome in", "x - axis for a in A: x, y =", "import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages plt.rcParams['pdf.fonttype'] =", "a = (tr[0] - pk[0]) % length b = (pk[0]", "plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0]], [labels[1]]]) else: plot_two(title,", "wrong closest, farthest = int(length * float(0.45)), int(length * float(0.55))", "single is True, combine sequences in multifasta file \"\"\" if", "legend], [right legend]] \"\"\" fig, ax1 = plt.subplots() colors =", "'window length (default = 1000)') parser.add_argument(\\ '-s', default = 10,", "and y for gc skew c_skew = [[], []] #", "cumulative sum of gc skew over sequence windows gc skew", "+ C gpc = [abs(i) for i in gmc] #", "cycle, product # plotting modules from matplotlib import use as", "print GC Skew to stdout') args = vars(parser.parse_args()) fastas =", "= m/p cs += gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori,", "(open(i) for i in files) if __name__ == '__main__': parser", "= 'upper right', \\ bbox_to_anchor=(0.45, -0.125), \\ prop = {'size':8},", "# G + C # calculate sliding windows for (G", "\\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]],", "4, label = next(a_label)) # add vertical lines if vert", "not False: for i in vert: x, c = i", "in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate gc skew and cummulative", "N = int(len(skew[0])/1000) if N != 0: skew = [skew[0][0::N],", "== '-': return (sys.stdin) return (open(i) for i in files)", "% (name, ori, ter), file=sys.stderr) if plot_skew is False: print('\\t'.join(['#", "plot_skew): \"\"\" calculate gc skew and cumulative sum of gc", "except: gmc.append(0) # convert to G + C gpc =", "for seq in parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0] yield (ID,", "check_peaks([c_skew_min, c_skew_max], length) return ori, ter def gc_skew(name, length, seq,", "'-f', nargs = '*', action = 'store', required = True,", "length (default = 10 x window)') parser.add_argument(\\ '-w', default =", "= find_ori_ter(c_skew, length) # plot data if plot_skew is True:", "list is '-' \"\"\" if files is None: return files", "title plt.suptitle(title, fontsize = 16) plt.title(subtitle, fontsize = 10) #", "gpc[i][1] if p == 0: gcs = 0 else: gcs", "= signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist() c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater,", "# trough and peak a = (tr[0] - pk[0]) %", "itertools import cycle, product # plotting modules from matplotlib import", "between values if (a <= farthest and a >= closest)", "ori is False: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0]],", "in files) if __name__ == '__main__': parser = argparse.ArgumentParser(description =", "to G - C replacements = {'G':1, 'C':-1, 'A':0, 'T':0,", "gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights,", "= 1000, type = int, \\ help = 'window length", "== 0 or len(c_skew_min) == 0: return [False, False] else:", "False: for i in vert: x, c = i ax1.axvline(x", "skew <NAME> <EMAIL> \"\"\" # python modules import os import", "A: x, y = a ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1])", "i in B]) xmax = max([max(i[0]) for i in A]", "one another \"\"\" # if ori/ter peaks are too close", "= %s)' % (window, slide) labels = ['GC Skew', 'Cumulative", "'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\ vert = [(ori, 'r'), (ter,", "single genome') parser.add_argument(\\ '--no-plot', action = 'store_false', \\ help =", "(genome.name.rsplit('.', 1)[0], len(sequence), sequence) else: for genome in fastas: for", "for (G - C) and (G + C) weights =", "gc skew and cummulative gc skew sum skew = [[],", "G - C for base in seq: try: gmc.append(replacements[base]) except:", "'-': return (sys.stdin) return (open(i) for i in files) if", "# convert to G + C gpc = [abs(i) for", "2, label = next(b_label)) xmin = min([min(i[1]) for i in", "label] legend = [[left legend], [right legend]] \"\"\" fig, ax1", "y for gc skew c_skew = [[], []] # x", "= [[left legend], [right legend]] \"\"\" fig, ax1 = plt.subplots()", "cs += gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter =", "open_files(args['f']) single, plot_skew = args['single'], args['no_plot'] window, slide = args['w'],", "len(pairs) == 0: return [False, False] pt, tr, pk =", "[skew], [c_skew], labels, \\ [[labels[0]], [labels[1]]]) else: plot_two(title, subtitle, [skew],", "i in A] + [min(i[0]) for i in B]) xmax", "-0.125), \\ prop = {'size':8}, \\ framealpha = 0.0\\ )", "in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc = [[i, c] for i,", "list, use stdin if first item in list is '-'", "c = c, label = next(a_label), linewidth = 2) #", "'Cumulative GC Skew', 'Position on Genome (bp)'] # remove some", "[[x], [y]] B = data for right axis lables =", "gets origin and ter right tr, pk = sorted(list(pair), key", "sum of gc skew over sequence windows gc skew =", "= 1)[0].tolist() # return False if no peaks were detected", "'C':-1, 'A':0, 'T':0, 'N':0} gmc = [] # G -", "+= gcs skew[0].append(i) c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew,", "lambda x: x[1], reverse = False) # trough and peak", "fontsize = 16) plt.title(subtitle, fontsize = 10) # legend ax1.legend(loc", "no peaks were detected if len(c_skew_min) == 0 or len(c_skew_min)", "for name, length, seq in parse_genomes(fastas, single): if length <", "not generate plots, print GC Skew to stdout') args =", "out = [name, pos, skew[1][i], c_skew[1][i]] print('\\t'.join([str(i) for i in", "for i in files) if __name__ == '__main__': parser =", "stdin if first item in list is '-' \"\"\" if", "- C) / (G + C)) * window size *", "and max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist()", "plot with differnt y axes title = title for chart", "# calculate gc skew and cummulative gc skew sum skew", "far apart, they are probably wrong closest, farthest = int(length", "B]) xmax = max([max(i[0]) for i in A] + [max(i[0])", "farthest = int(length * float(0.45)), int(length * float(0.55)) pairs =", "parser.add_argument(\\ '--single', action = 'store_true', \\ help = 'combine multi-fasta", "sums cs = 0 # cummulative sum # select windows", "= cycle(colors[::-1]) a_label = cycle(legend[0]) b_label = cycle(legend[1]) # plot", "i in vert: x, c = i ax1.axvline(x = x,", "= next(b_colors), linewidth = 2, label = next(b_label)) xmin =", "= [] for pair in list(product(*peaks)): ### added this to", "numpy as np from scipy import signal from itertools import", "0 else: gcs = m/p cs += gcs skew[0].append(i) c_skew[0].append(i)", "skew c_skew = [[], []] # x and y for", "Skew', 'Position on Genome (bp)'] # remove some points for", "import os import sys import argparse import numpy as np", "parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence) else: for genome", "ID = seq[0].split('>', 1)[1].split()[0] yield (ID, len(seq[1]), list(seq[1].upper())) def open_files(files):", "'tight') plt.close() pdf.close() def check_peaks(peaks, length): \"\"\" select pair of", "windows to use based on slide for i, m in", "'n/a' else: ori, ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin:", "in fastas: for seq in parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0]", "'fasta(s)') parser.add_argument(\\ '-l', default = False, type = int, \\", "window, slide, plot_skew) if ori == False: ori, ter =", "= '{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin: %s Terminus: %s' \\", "framealpha = 0.0 ) plt.legend(loc = 'upper right', \\ bbox_to_anchor=(0.45,", "i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate gc skew", "\"\"\" calculate gc skew and cumulative sum of gc skew", "title.replace(' ', '_')) pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close() def check_peaks(peaks,", "False: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0]], [labels[1]]]) else:", "'-l', default = False, type = int, \\ help =", "a >= closest) or (b <=farthest and b >= closest):", "x: x[1], reverse = False) # trough and peak a", "Skew to stdout') args = vars(parser.parse_args()) fastas = open_files(args['f']) single,", "ori, ter def gc_skew(name, length, seq, window, slide, plot_skew): \"\"\"", "- C replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0} gmc", "ori, ter = find_ori_ter(c_skew, length) # plot data if plot_skew", "gc skew c_skew = [[], []] # x and y", "c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())] gpc = [[i, c] for", "help = 'minimum contig length (default = 10 x window)')", "c = next(a_colors), marker = 'o', ms = 4, label", "np.ones(window)/window gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc,", "for pair in list(product(*peaks)): ### added this to make sure", ") # save pdf = PdfPages('%s.pdf' % title.replace(' ', '_'))", "ori == False: ori, ter = 'n/a', 'n/a' else: ori,", "i in files) if __name__ == '__main__': parser = argparse.ArgumentParser(description", "labels, legend, vert = False): \"\"\" plot with differnt y", "plots, print GC Skew to stdout') args = vars(parser.parse_args()) fastas", "pair in list(product(*peaks)): ### added this to make sure gets", "peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist() c_skew_max =", "cycle(legend[1]) # plot left axis and x - axis for", "'combine multi-fasta sequences into single genome') parser.add_argument(\\ '--no-plot', action =", "close or too far apart, they are probably wrong closest,", "GC Skew \"\"\" # find origin and terminus of replication", "b >= closest): pairs.append([pt, tr, pk]) if len(pairs) == 0:", "prop = {'size':8}, \\ framealpha = 0.0 ) plt.legend(loc =", "= abs(tr[1] - pk[1]) # distance between values if (a", "[(ori, 'r'), (ter, 'b')]) return ori, ter, skew, c_skew def", "action = 'store', required = True, \\ help = 'fasta(s)')", "sequences in multifasta file \"\"\" if single is True: for", "farthest and a >= closest) or (b <=farthest and b", "from matplotlib import rc rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) # ctb from ctbBio.fasta import", "'c', 'y', 'm', 'k', 'g'] a_colors = cycle(colors) b_colors =", "peaks were detected if len(c_skew_min) == 0 or len(c_skew_min) ==", "skew cummulative sums cs = 0 # cummulative sum #", "replication based on cumulative GC Skew \"\"\" # find origin", "%s Terminus: %s' \\ % (name, ori, ter), file=sys.stderr) if", "c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate", "labels, \\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))],", "# x and y for gc skew cummulative sums cs", "modules import os import sys import argparse import numpy as", "int(len(skew[0])/1000) if N != 0: skew = [skew[0][0::N], skew[1][0::N]] if", "= cycle(colors) b_colors = cycle(colors[::-1]) a_label = cycle(legend[0]) b_label =", "as np from scipy import signal from itertools import cycle,", "linewidth = 2) # plot right axis ax2 = ax1.twinx()", "xmax = max([max(i[0]) for i in A] + [max(i[0]) for", "B]) ax2.set_xlim(xmin, xmax) # title plt.suptitle(title, fontsize = 16) plt.title(subtitle,", "x and y for gc skew cummulative sums cs =", "next(b_colors), linewidth = 2, label = next(b_label)) xmin = min([min(i[1])", "in B]) ax2.set_xlim(xmin, xmax) # title plt.suptitle(title, fontsize = 16)", "(G + C)) * window size * genome length \"\"\"", "length, seq, window, slide, plot_skew) if ori == False: ori,", "3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c = next(a_colors), marker = 'o',", "[left label, right label, x label] legend = [[left legend],", "else: plot_two(title, subtitle, [skew], [c_skew], labels, \\ [[labels[0], 'Ori:%s' %", "b_colors = cycle(colors[::-1]) a_label = cycle(legend[0]) b_label = cycle(legend[1]) #", "# G - C for base in seq: try: gmc.append(replacements[base])", "ax2.set_ylabel(labels[1], labelpad = 8) ax2.plot(x, y, c = next(b_colors), linewidth", "some points for plotting (approx. 1,000 datapoints) N = int(len(skew[0])/1000)", "= 2, label = next(b_label)) xmin = min([min(i[1]) for i", "tr, pk]) if len(pairs) == 0: return [False, False] pt,", "window, slide = args['w'], args['s'] min_len = args['l'] if min_len", "= 0 # cummulative sum # select windows to use", "mplUse mplUse('Agg') import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages", "is False: min_len = 10 * window for name, length,", "<NAME> <EMAIL> \"\"\" # python modules import os import sys", "fastas if single is True, combine sequences in multifasta file", "genome length \"\"\" # convert to G - C replacements", "weights, 'same').tolist())] gpc = [[i, c] for i, c in", "print('%s -> Origin: %s Terminus: %s' \\ % (name, ori,", "def plot_two(title, subtitle, A, B, labels, legend, vert = False):", "single): \"\"\" generator for parsing fastas if single is True,", "single is True: for genome in fastas: sequence = []", "files if files[0] == '-': return (sys.stdin) return (open(i) for", "ax1.twinx() for b in B: x, y = b ax2.set_ylabel(labels[1],", "for i in A] + [max(i[0]) for i in B])", "\\ help = 'slide length (default = 10)') parser.add_argument(\\ '--single',", "'upper left', \\ bbox_to_anchor=(0.55, -0.125), \\ prop = {'size':8}, \\", "right label, x label] legend = [[left legend], [right legend]]", "legend = [[left legend], [right legend]] \"\"\" fig, ax1 =", "= 0.0\\ ) # save pdf = PdfPages('%s.pdf' % title.replace('", "modules from matplotlib import use as mplUse mplUse('Agg') import matplotlib.pyplot", "max that are not too close or too far apart", "[False, False] pt, tr, pk = sorted(pairs, reverse = True)[0]", "calculating gc skew <NAME> <EMAIL> \"\"\" # python modules import", "in gmc[0::slide]: p = gpc[i][1] if p == 0: gcs", "parse_genomes(fastas, single): \"\"\" generator for parsing fastas if single is", "\\ '# calculate gc skew and find Ori and Ter", "% ('{:,}'.format(ori)), \\ 'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \\ vert =", "= [name, pos, skew[1][i], c_skew[1][i]] print('\\t'.join([str(i) for i in out]))", "def gc_skew(name, length, seq, window, slide, plot_skew): \"\"\" calculate gc", "[skew], [c_skew], labels, \\ [[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \\ 'Ter:%s'", "\\ help = 'window length (default = 1000)') parser.add_argument(\\ '-s',", "length (default = 10)') parser.add_argument(\\ '--single', action = 'store_true', \\", "is False: print('\\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew']))", "find Ori and Ter of replication') parser.add_argument(\\ '-f', nargs =", "((G - C) / (G + C)) * window size", "= {'size':8}, \\ framealpha = 0.0\\ ) # save pdf", "True, combine sequences in multifasta file \"\"\" if single is", "b in B: x, y = b ax2.set_ylabel(labels[1], labelpad =", "tr, pk = sorted(pairs, reverse = True)[0] return [tr[0], pk[0]]", "% ('{:,}'.format(ter))], [labels[1]]], \\ vert = [(ori, 'r'), (ter, 'b')])", "return ori, ter def gc_skew(name, length, seq, window, slide, plot_skew):", "= 'store', required = True, \\ help = 'fasta(s)') parser.add_argument(\\", "[y]] B = data for right axis lables = [left", "axis for a in A: x, y = a ax1.set_ylabel(labels[0],", "% (window, slide) labels = ['GC Skew', 'Cumulative GC Skew',", "= 10, type = int, \\ help = 'slide length", "import numpy as np from scipy import signal from itertools", "\"\"\" open files in list, use stdin if first item", "c_skew_max], length) return ori, ter def gc_skew(name, length, seq, window,", "skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew) if", "int(length * float(0.55)) pairs = [] for pair in list(product(*peaks)):", "max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist() c_skew_max", "y axes title = title for chart A = data", "(name) subtitle = '(window = %s, slide = %s)' %", "cycle(legend[0]) b_label = cycle(legend[1]) # plot left axis and x", "def check_peaks(peaks, length): \"\"\" select pair of min and max", "if ori == False: ori, ter = 'n/a', 'n/a' else:", "if len(pairs) == 0: return [False, False] pt, tr, pk", "= 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c = next(a_colors), marker =", "C replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0} gmc =", "cummulative sums cs = 0 # cummulative sum # select", "first item in list is '-' \"\"\" if files is", "enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())] # calculate gc skew and cummulative gc", "[labels[1]]], \\ vert = [(ori, 'r'), (ter, 'b')]) return ori,", "sorted(list(pair), key = lambda x: x[1], reverse = False) #", "windows gc skew = ((G - C) / (G +", "seq in parse_genomes(fastas, single): if length < min_len: print('%s: Too", "'m', 'k', 'g'] a_colors = cycle(colors) b_colors = cycle(colors[::-1]) a_label", "pt, tr, pk = sorted(pairs, reverse = True)[0] return [tr[0],", "gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights,", "data if plot_skew is True: title = '%s GC Skew'", "= int, \\ help = 'window length (default = 1000)')", "sliding windows for (G - C) and (G + C)", "vert: x, c = i ax1.axvline(x = x, c =", "G - C replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0}", "combine sequences in multifasta file \"\"\" if single is True:", "apart, they are probably wrong closest, farthest = int(length *", "= 'do not generate plots, print GC Skew to stdout')", "% length b = (pk[0] - tr[0]) % length pt", "skew = [skew[0][0::N], skew[1][0::N]] if ori is False: plot_two(title, subtitle,", "len(seq[1]), list(seq[1].upper())) def open_files(files): \"\"\" open files in list, use", "int(length * float(0.45)), int(length * float(0.55)) pairs = [] for", "default = 1000, type = int, \\ help = 'window", "0.0 ) plt.legend(loc = 'upper right', \\ bbox_to_anchor=(0.45, -0.125), \\", "seq: try: gmc.append(replacements[base]) except: gmc.append(0) # convert to G +", "% title.replace(' ', '_')) pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close() def", "Skew', 'Cumulative GC Skew', 'Position on Genome (bp)'] # remove", "use stdin if first item in list is '-' \"\"\"", "1000)') parser.add_argument(\\ '-s', default = 10, type = int, \\", "'--single', action = 'store_true', \\ help = 'combine multi-fasta sequences", "c_skew[0].append(i) skew[1].append(gcs) c_skew[1].append(cs) ori, ter = find_ori_ter(c_skew, length) # plot", "pk = sorted(list(pair), key = lambda x: x[1], reverse =", "tr[0]) % length pt = abs(tr[1] - pk[1]) # distance", "have greatest y distance between one another \"\"\" # if", "= int, \\ help = 'minimum contig length (default =", "= title for chart A = data for left axis", "left axis and x - axis for a in A:", "'same').tolist())] # calculate gc skew and cummulative gc skew sum", "length pt = abs(tr[1] - pk[1]) # distance between values", "int, \\ help = 'slide length (default = 10)') parser.add_argument(\\", "'b', 'r', 'c', 'y', 'm', 'k', 'g'] a_colors = cycle(colors)", "to G + C gpc = [abs(i) for i in", "and terminus of replication based on cumulative GC Skew \"\"\"", "window)') parser.add_argument(\\ '-w', default = 1000, type = int, \\", "/ (G + C)) * window size * genome length", "1000, type = int, \\ help = 'window length (default", "file=sys.stderr) if plot_skew is False: print('\\t'.join(['# Name', 'Position', 'GC Skew',", "reverse = True)[0] return [tr[0], pk[0]] def find_ori_ter(c_skew, length): \"\"\"", "next(a_colors), marker = 'o', ms = 4, label = next(a_label))", "on # cumulative gc skew min and max peaks c_skew_min", "= True, \\ help = 'fasta(s)') parser.add_argument(\\ '-l', default =", "is None: return files if files[0] == '-': return (sys.stdin)", "ctb from ctbBio.fasta import iterate_fasta as parse_fasta def plot_two(title, subtitle,", "find origin and terminus of replication based on # cumulative", "= [abs(i) for i in gmc] # G + C", "fastas: for seq in parse_fasta(genome): ID = seq[0].split('>', 1)[1].split()[0] yield", "list(product(*peaks)): ### added this to make sure gets origin and", "files is None: return files if files[0] == '-': return", "for genome in fastas: for seq in parse_fasta(genome): ID =", "for right axis lables = [left label, right label, x", "nargs = '*', action = 'store', required = True, \\", "skew and cummulative gc skew sum skew = [[], []]", "# title plt.suptitle(title, fontsize = 16) plt.title(subtitle, fontsize = 10)", "list(seq[1].upper())) def open_files(files): \"\"\" open files in list, use stdin", "else: ori, ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s -> Origin: %s", "= next(a_label)) # add vertical lines if vert is not", "gc skew min and max peaks c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less,", "plt.title(subtitle, fontsize = 10) # legend ax1.legend(loc = 'upper left',", "'N':0} gmc = [] # G - C for base", "ax1.set_ylabel(labels[0], labelpad = 3) ax1.set_xlabel(labels[-1]) ax1.plot(x, y, c = next(a_colors),", "'T':0, 'N':0} gmc = [] # G - C for", "next(b_label)) xmin = min([min(i[1]) for i in A] + [min(i[0])", "skew = [[], []] # x and y for gc", "for plotting (approx. 1,000 datapoints) N = int(len(skew[0])/1000) if N", "len(sequence), sequence) else: for genome in fastas: for seq in", "ter = 'n/a', 'n/a' else: ori, ter = '{:,}'.format(ori), '{:,}'.format(ter)", "= 'n/a', 'n/a' else: ori, ter = '{:,}'.format(ori), '{:,}'.format(ter) print('%s", "= {'size':8}, \\ framealpha = 0.0 ) plt.legend(loc = 'upper", "[[], []] # x and y for gc skew c_skew", "#!/usr/bin/env python3 \"\"\" script for calculating gc skew <NAME> <EMAIL>", "file \"\"\" if single is True: for genome in fastas:", "were detected if len(c_skew_min) == 0 or len(c_skew_min) == 0:", "= '*', action = 'store', required = True, \\ help", "10)') parser.add_argument(\\ '--single', action = 'store_true', \\ help = 'combine", "print('\\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew'])) for i,", "c_skew[1][i]] for i in c_skew_min] c_skew_max = [[c_skew[0][i], c_skew[1][i]] for", "c_skew[1][i]] for i in c_skew_max] ori, ter = check_peaks([c_skew_min, c_skew_max],", "= [] for seq in parse_fasta(genome): sequence.extend(list(seq[1].upper())) yield (genome.name.rsplit('.', 1)[0],", "use based on slide for i, m in gmc[0::slide]: p", "1)[0], len(sequence), sequence) else: for genome in fastas: for seq", "', '_')) pdf.savefig(bbox_inches = 'tight') plt.close() pdf.close() def check_peaks(peaks, length):", "[[left legend], [right legend]] \"\"\" fig, ax1 = plt.subplots() colors" ]
[ "import print_market_state, print_user_state ### run setup.py before proceeding. make sure", "print(\"Processing send_governance_vote_transaction transaction for vault address \" + vault_address) print(\"~\"*100)", "storage_mnemonic. # Hardcoding account keys is not a great practice.", "mnemonic + storage_mnemonic. # Hardcoding account keys is not a", "to confirm successful vote in voting session # print final", "is not intended for production use. # This example does", "only. # See the README & Docs for alternative signing", "note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) # After sending, check", "from dotenv import dotenv_values from algosdk import mnemonic, account from", "check your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful", "demonstration purposes only. # See the README & Docs for", "# under \"sign_up_address\" for the relevant governance period # Specify", "from example_utils import print_market_state, print_user_state ### run setup.py before proceeding.", "is set with mnemonic + storage_mnemonic. # Hardcoding account keys", "address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) # After sending, check your", "final state print(\"~\"*100) print(\"Final State\") print(\"Sent governance transaction with note:", "print(\"Final State\") print(\"Sent governance transaction with note: \" + str(vote_note))", "with mnemonic + storage_mnemonic. # Hardcoding account keys is not", "get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils import print_market_state, print_user_state ### run", "os from dotenv import dotenv_values from algosdk import mnemonic, account", "AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils", "mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET = False client = AlgofiMainnetClient(user_address=sender) if", "# This example does not constitute trading advice. import os", "import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account from", "vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod,", "# NOTE: Get the live governance address at https://governance.algorand.foundation/api/periods/ #", "IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE: Get the live governance address", "from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import get_ordered_symbols, prepare_payment_transaction,", "to be used in live voting necessarily vault_address = client.manager.get_storage_address(address)", "key) txn.submit(client.algod, wait=True) # After sending, check your vote at", "ENV_PATH = os.path.join(my_path, \".env\") # load user passphrase user =", "permissible in the Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get", "= False client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender) #", "proceeding. make sure the .env file is set with mnemonic", "See the README & Docs for alternative signing methods. my_path", "= \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example, not", "voting necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for", "= mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET = False", "client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) # After sending,", "alternative signing methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\")", "sure the .env file is set with mnemonic + storage_mnemonic.", "b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example, not to be used in", "example, not to be used in live voting necessarily vault_address", "It is not intended for production use. # This example", "+ vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key)", "live voting necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction", "account from algofi.v1.asset import Asset from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient", "print_user_state ### run setup.py before proceeding. make sure the .env", "great practice. This is for demonstration purposes only. # See", "purposes only. # It is not intended for production use.", "txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) # After sending, check your vote", "vote choices based on the relevant voting session from https://governance.algorand.foundation/api/periods/", "sender governance_address = \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an", "client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE: Get", "a great practice. This is for demonstration purposes only. #", "use. # This example does not constitute trading advice. import", "= dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET", "IS_MAINNET = False client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender)", "vote according to the formats that are permissible in the", "vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example, not to be", "dotenv import dotenv_values from algosdk import mnemonic, account from algofi.v1.asset", "purposes only. # See the README & Docs for alternative", "NOTE: an example, not to be used in live voting", "successful vote in voting session # print final state print(\"~\"*100)", "vault address \" + vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note,", "vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for vault address", "dotenv_values from algosdk import mnemonic, account from algofi.v1.asset import Asset", "After sending, check your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to", "# Specify your vote according to the formats that are", "to the formats that are permissible in the Algorand Foundation", "https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful vote in voting session #", "the live governance address at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for", "<reponame>Algofiorg/algofi-py-sdk # This sample is provided for demonstration purposes only.", "address at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for the relevant governance", "formats that are permissible in the Algorand Foundation Spec #", "Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx, vote choices based", "address \" + vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address)", "for demonstration purposes only. # It is not intended for", "governance address at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for the relevant", "\"sign_up_address\" for the relevant governance period # Specify your vote", "period # Specify your vote according to the formats that", "sample is provided for demonstration purposes only. # It is", "keys is not a great practice. This is for demonstration", "voting session from https://governance.algorand.foundation/api/periods/ address = sender governance_address = \"\"", "for vault address \" + vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address,", "https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for the relevant governance period #", "the relevant governance period # Specify your vote according to", "constitute trading advice. import os from dotenv import dotenv_values from", "be used in live voting necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100)", "print_market_state, print_user_state ### run setup.py before proceeding. make sure the", "algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils import print_market_state, print_user_state", "before proceeding. make sure the .env file is set with", "\" + vault_address) print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender,", "import dotenv_values from algosdk import mnemonic, account from algofi.v1.asset import", "Hardcoding account keys is not a great practice. This is", "mnemonic, account from algofi.v1.asset import Asset from algofi.v1.client import AlgofiTestnetClient,", "relevant voting session from https://governance.algorand.foundation/api/periods/ address = sender governance_address =", "idx, vote choices based on the relevant voting session from", "file is set with mnemonic + storage_mnemonic. # Hardcoding account", "confirm successful vote in voting session # print final state", "does not constitute trading advice. import os from dotenv import", "= os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\") # load user passphrase", "are permissible in the Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md #", "live governance address at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for the", "Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx, vote choices", "# Get the idx, vote choices based on the relevant", "# https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful vote in voting session", "State\") print(\"Sent governance transaction with note: \" + str(vote_note)) print(\"~\"*100)", "if IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE: Get the live governance", "AlgofiMainnetClient from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils import", "for production use. # This example does not constitute trading", "# load user passphrase user = dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic'])", "is not a great practice. This is for demonstration purposes", "practice. This is for demonstration purposes only. # See the", "from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils import print_market_state,", "sender = mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET =", "= b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example, not to be used", "# See the README & Docs for alternative signing methods.", "vote in voting session # print final state print(\"~\"*100) print(\"Final", "This sample is provided for demonstration purposes only. # It", "mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET = False client", "txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) #", "+ storage_mnemonic. # Hardcoding account keys is not a great", "This is for demonstration purposes only. # See the README", "send_governance_vote_transaction transaction for vault address \" + vault_address) print(\"~\"*100) txn", "not a great practice. This is for demonstration purposes only.", "make sure the .env file is set with mnemonic +", "# to confirm successful vote in voting session # print", "client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for vault address \" +", "os.path.join(my_path, \".env\") # load user passphrase user = dotenv_values(ENV_PATH) sender", "Get the live governance address at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\"", "# After sending, check your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> #", "This example does not constitute trading advice. import os from", "= client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True) # After", "# This sample is provided for demonstration purposes only. #", "Specify your vote according to the formats that are permissible", "governance period # Specify your vote according to the formats", "import Asset from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import", "Get the idx, vote choices based on the relevant voting", "# Hardcoding account keys is not a great practice. This", "used in live voting necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing", "signing methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\") #", "from algosdk import mnemonic, account from algofi.v1.asset import Asset from", "based on the relevant voting session from https://governance.algorand.foundation/api/periods/ address =", "wait=True) # After sending, check your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address>", "print final state print(\"~\"*100) print(\"Final State\") print(\"Sent governance transaction with", "algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account", "voting session # print final state print(\"~\"*100) print(\"Final State\") print(\"Sent", "not to be used in live voting necessarily vault_address =", "= client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for vault address \"", "at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful vote in voting", "that are permissible in the Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md", "# IS_MAINNET IS_MAINNET = False client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET", "load user passphrase user = dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key", "example_utils import print_market_state, print_user_state ### run setup.py before proceeding. make", "= AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE: Get the", "only. # It is not intended for production use. #", "production use. # This example does not constitute trading advice.", "not intended for production use. # This example does not", "for the relevant governance period # Specify your vote according", "advice. import os from dotenv import dotenv_values from algosdk import", "= mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET = False client = AlgofiMainnetClient(user_address=sender)", "# https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx, vote choices based on", "IS_MAINNET IS_MAINNET = False client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else", "the idx, vote choices based on the relevant voting session", "provided for demonstration purposes only. # It is not intended", "from algofi.v1.asset import Asset from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from", "an example, not to be used in live voting necessarily", ".env file is set with mnemonic + storage_mnemonic. # Hardcoding", "algosdk import mnemonic, account from algofi.v1.asset import Asset from algofi.v1.client", "in live voting necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction", "necessarily vault_address = client.manager.get_storage_address(address) print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for vault", "\"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example, not to", "dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET", "os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\") # load user passphrase user", "the formats that are permissible in the Algorand Foundation Spec", "session # print final state print(\"~\"*100) print(\"Final State\") print(\"Sent governance", "my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\") # load user", "the README & Docs for alternative signing methods. my_path =", "is provided for demonstration purposes only. # It is not", "# print final state print(\"~\"*100) print(\"Final State\") print(\"Sent governance transaction", "session from https://governance.algorand.foundation/api/periods/ address = sender governance_address = \"\" vote_note", "account keys is not a great practice. This is for", "for demonstration purposes only. # See the README & Docs", "at https://governance.algorand.foundation/api/periods/ # under \"sign_up_address\" for the relevant governance period", "AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE: Get the live", "address = sender governance_address = \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' #", "from https://governance.algorand.foundation/api/periods/ address = sender governance_address = \"\" vote_note =", "txn.submit(client.algod, wait=True) # After sending, check your vote at #", "state print(\"~\"*100) print(\"Final State\") print(\"Sent governance transaction with note: \"", "AlgofiTestnetClient(user_address=sender) # NOTE: Get the live governance address at https://governance.algorand.foundation/api/periods/", "NOTE: Get the live governance address at https://governance.algorand.foundation/api/periods/ # under", "else AlgofiTestnetClient(user_address=sender) # NOTE: Get the live governance address at", "= os.path.join(my_path, \".env\") # load user passphrase user = dotenv_values(ENV_PATH)", "user passphrase user = dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key =", "governance_address = \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE: an example,", "# NOTE: an example, not to be used in live", "your vote according to the formats that are permissible in", "choices based on the relevant voting session from https://governance.algorand.foundation/api/periods/ address", "in voting session # print final state print(\"~\"*100) print(\"Final State\")", "sending, check your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm", "prepare_payment_transaction, get_new_account from example_utils import print_market_state, print_user_state ### run setup.py", "set with mnemonic + storage_mnemonic. # Hardcoding account keys is", "is for demonstration purposes only. # See the README &", "the Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx,", "Asset from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils import get_ordered_symbols,", "import os from dotenv import dotenv_values from algosdk import mnemonic,", "print(\"~\"*100) print(\"Processing send_governance_vote_transaction transaction for vault address \" + vault_address)", "False client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender) # NOTE:", "your vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful vote", "get_new_account from example_utils import print_market_state, print_user_state ### run setup.py before", "under \"sign_up_address\" for the relevant governance period # Specify your", "example does not constitute trading advice. import os from dotenv", "import mnemonic, account from algofi.v1.asset import Asset from algofi.v1.client import", "vote at # https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address> # to confirm successful vote in", "Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx, vote", "user = dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic']) #", "https://governance.algorand.foundation/api/periods/ address = sender governance_address = \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]'", "import get_ordered_symbols, prepare_payment_transaction, get_new_account from example_utils import print_market_state, print_user_state ###", "run setup.py before proceeding. make sure the .env file is", "methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path, \".env\") # load", "print(\"~\"*100) txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address) txn.sign_with_private_key(sender, key) txn.submit(client.algod, wait=True)", "in the Algorand Foundation Spec # https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the", "not constitute trading advice. import os from dotenv import dotenv_values", "setup.py before proceeding. make sure the .env file is set", "key = mnemonic.to_private_key(user['mnemonic']) # IS_MAINNET IS_MAINNET = False client =", "https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md # Get the idx, vote choices based on the", "passphrase user = dotenv_values(ENV_PATH) sender = mnemonic.to_public_key(user['mnemonic']) key = mnemonic.to_private_key(user['mnemonic'])", "intended for production use. # This example does not constitute", "algofi.v1.asset import Asset from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient from algofi.utils", "transaction for vault address \" + vault_address) print(\"~\"*100) txn =", "demonstration purposes only. # It is not intended for production", "the relevant voting session from https://governance.algorand.foundation/api/periods/ address = sender governance_address", "on the relevant voting session from https://governance.algorand.foundation/api/periods/ address = sender", "print(\"~\"*100) print(\"Final State\") print(\"Sent governance transaction with note: \" +", "according to the formats that are permissible in the Algorand", "for alternative signing methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH = os.path.join(my_path,", "\".env\") # load user passphrase user = dotenv_values(ENV_PATH) sender =", "# It is not intended for production use. # This", "### run setup.py before proceeding. make sure the .env file", "trading advice. import os from dotenv import dotenv_values from algosdk", "& Docs for alternative signing methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH", "README & Docs for alternative signing methods. my_path = os.path.abspath(os.path.dirname(__file__))", "the .env file is set with mnemonic + storage_mnemonic. #", "Docs for alternative signing methods. my_path = os.path.abspath(os.path.dirname(__file__)) ENV_PATH =", "= sender governance_address = \"\" vote_note = b'af/gov1:j[6,\"a\",\"c\"]' # NOTE:", "relevant governance period # Specify your vote according to the" ]
[ "{\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType(", "'--servers'): servers = arg else: raise AttributeError(\"Recognized but unimplemented option", "Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1", "option also. -?, --help display this usage -d, --debug turn", "'f' invert = 0 succeed = 0 printtrace = 0", "num in (methodnums): if num > len(DEFAULT_METHODS): break total +=", "mandatory, it's mandatory for the equivalent short option also. -?,", "sys.exc_info()[1]) failok += 1 else: if 'f' in output: print(title,", "print a stack trace on each unexpected failure -T, --always-stacktrace", "serv._sa (sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName = \"thing\") #", "> len(DEFAULT_METHODS): break total += 1 name = DEFAULT_METHODS[num -", "!= '': t += ', ' + s['nonfunctional'][name] print(title, \"failed", "a stack trace on each unexpected failure -T, --always-stacktrace print", "Failed as expected: %d (%3.2f%%)\" % \\ (failok, 100.0 *", "%s [options] [server ...] If a long option shows an", "*not* in the list of servers given -m, --method=METHOD#[,METHOD#...] call", "continue cur = {'nonfunctional': {}} tag = None servers.append (cur)", "notimp / total)) return fail + notimp if __name__ ==", "end=' ') if i + half < len (DEFAULT_METHODS): print(\"%4d.", "print(title, \"succeeded despite marked nonfunctional\") elif 's' in output: print(title,", "notimp += 1 continue try: res = fn (serv, s['soapaction'],", "total)) if stats > 0 or notimp > 0: print(\"", "/ total)) if stats > 0 or fail > 0:", "turn on output, TYPE is one or more of s(uccess),", "= \"this is my address\") #JHawk, Phalanx require this order", "(ATL), GLUE, Aumsoft, # HP, EasySoap, and Jake (Frontier). [Actzero", "in list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"})", "cur = {'nonfunctional': {}} tag = None servers.append (cur) while", "return l def SimpleBuy(serv, sa, epname): serv = serv._sa (sa", "(succeed, 100.0 * succeed / total)) if stats > 0", "DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error = None): sys.stdout", "name in s['nonfunctional']: if 'F' in output: t = 'as", "{'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName = \"thing\") # for Phalanx, JHawk", "#JHawk, Phalanx require this order of params def RequestForQuote(serv, sa,", "fail > 0: print(\"Failed unexpectedly: %d (%3.2f%%)\" % \\ (fail,", "= 0 fail = 0 failok = 0 notimp =", "else: value = cur[tag] value += ' ' + line.strip", "i.split ('-') for i in range (int (i[0]),int (i[1]) +", "fail += 1 if stats: print(\" Tests ended at:\", time.ctime", "readServers (file): servers = [] f = open (file, 'r')", "be \" \\ \"specified using a\\ncomma-separated list of numbers or", "output = 'fFns' servers = readServers(servers) if methodnums == None:", "> 0: print(\" Total tests: %d\" % total) print(\" Successes:", "a METHOD# of ? for the list of method numbers", "num) try: fn = globals ()[name] except KeyboardInterrupt: raise except:", "print(title, \"failed -\", str (sys.exc_info()[1])) fail += 1 if stats:", "from SOAPpy import SOAP import traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS", "\"} for k,v in list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1 =", "(%3.2f%%)\" % \\ (succeed, 100.0 * succeed / total)) if", "total)) if stats > 0 or fail > 0: print(\"Failed", "stats > 0 or fail > 0: print(\"Failed unexpectedly: %d", "end=' ') print() sys.exit (0) def readServers (file): servers =", "line == '' or line[0] == '\\n': break return servers", "output = 'f' invert = 0 succeed = 0 printtrace", "library -i, --invert test servers *not* in the list of", "York \", \"state\":\"NY \", \"zipCode\":\"10000 \"} for k,v in list(shipTo_d.items()):", "(file, 'r') while 1: line = f.readline () if line", "Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft, # HP,", "= SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d", "s['nonfunctional'][name] print(title, \"failed (%s) -\" %t, sys.exc_info()[1]) failok += 1", "\"failed (%s) -\" %t, sys.exc_info()[1]) failok += 1 else: if", "+ line.strip () else: tag, value = line.split (':', 1)", "elif opt in ('-i', '--invert'): invert = 1 elif opt", "'./inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error = None):", "arg elif opt in ('-s', '--servers'): servers = arg else:", "= 1 elif opt in ('-i', '--invert'): invert = 1", "{'nonfunctional': {}} tag = None servers.append (cur) while 1: if", "DEFAULT_METHODS[i]), end=' ') if i + half < len (DEFAULT_METHODS):", "value[1:-1] if tag == 'nonfunctional': value = value.split (' ',", "str2list (s): l = {} for i in s.split (','):", "', 1) + [''] method = value[0] cur[tag][method] = value[1]", "('-d', '--debug'): SOAP.Config.debug = 1 elif opt in ('-i', '--invert'):", "print(\" Successes: %d (%3.2f%%)\" % \\ (succeed, 100.0 * succeed", "\\ (failok, 100.0 * failok / total)) if stats >", "6, and 8.\\n\") print(\"The available methods are:\\n\") half = (len", "{'methodname':'Ping'}) return serv.Ping() def main(): servers = DEFAULT_SERVERS_FILE methodnums =", "SOAP Toolkit), JHawk (.NET Remoting), # Idoox WASP, Paul (SOAP::Lite),", "epname): serv = serv._sa (sa % {'methodname':'Ping'}) return serv.Ping() def", "numbers or ranges. \" \\ \"For example 1,4-6,8 specifies\\nmethods 1,", "serv.Buy(PO=po_d) except: # called PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d) def", "s.split (','): if i.find ('-') != -1: i = i.split", "-\", str (sys.exc_info()[1])) fail += 1 if stats: print(\" Tests", "numbers -o, --output=TYPE turn on output, TYPE is one or", "(DEFAULT_METHODS) + 1)) limitre = re.compile ('|'.join (args), re.IGNORECASE) for", "1: line = f.readline () if line == '': break", "raise AttributeError(\"Recognized but unimplemented option `%s'\" % opt) except SystemExit:", "return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address = \"this is my", "If a long option shows an argument is mandatory, it's", "value[0] cur[tag][method] = value[1] else: cur[tag] = value line =", "any failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit (0)", "serv._sa (sa % {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st Street\",", "more of s(uccess), f(ailure), n(ot implemented), F(ailed (as expected)), a(ll)", "SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for num in (methodnums): if num", "title = '%s: %s (#%d)' % (s['name'], name, num) try:", "value = value[1:-1] if tag == 'nonfunctional': value = value.split", "SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: # it's called", "0 or fail > 0: print(\"Failed unexpectedly: %d (%3.2f%%)\" %", "= None): sys.stdout = sys.stderr if error != None: print(error)", "servers: if (not not limitre.match (s['name'])) == invert: continue serv", "servers = [] f = open (file, 'r') while 1:", "/ total)) if stats > 0 or notimp > 0:", "it's mandatory for the equivalent short option also. -?, --help", "is one or more of s(uccess), f(ailure), n(ot implemented), F(ailed", "(.NET Remoting), # Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE,", "output: t = 'as expected' if s['nonfunctional'][name] != '': t", "example 1,4-6,8 specifies\\nmethods 1, 4, 5, 6, and 8.\\n\") print(\"The", "servers to test [%s] -t, --stacktrace print a stack trace", "the equivalent short option also. -?, --help display this usage", "== '\\n': break return servers def str2list (s): l =", "50, Address = \"this is my address\") #JHawk, Phalanx require", "%d (%3.2f%%)\" % \\ (succeed, 100.0 * succeed / total))", "if stats > 0 or fail > 0: print(\"Failed unexpectedly:", "(serv, s['soapaction'], s['name']) if name in s['nonfunctional']: print(title, \"succeeded despite", "in ('-o', '--output'): output = arg elif opt in ('-s',", "1, DEFAULT_METHODS[i]), end=' ') if i + half < len", "on debugging in the SOAP library -i, --invert test servers", "sa, epname): serv = serv._sa (sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3,", "[%s] -t, --stacktrace print a stack trace on each unexpected", "i.find ('-') != -1: i = i.split ('-') for i", "' + line.strip () else: tag, value = line.split (':',", "`%s'\" % opt) except SystemExit: raise except: usage (sys.exc_info ()[1])", "servers.append (cur) while 1: if line[0] in string.whitespace: if tag", "= fn (serv, s['soapaction'], s['name']) if name in s['nonfunctional']: print(title,", "on each unexpected failure -T, --always-stacktrace print a stack trace", "= 0 stats = 1 total = 0 fail =", "print(\"fail\") raise except: if name in s['nonfunctional']: if 'F' in", "ProductName = \"thing\") # for Phalanx, JHawk def Buy(serv, sa,", "arg in opts: if opt in ('-?', '--help'): usage ()", "KeyboardInterrupt: raise except: if 'n' in output: print(title, \"test not", "except: if name in s['nonfunctional']: if 'F' in output: t", "accepts either] return serv.Buy(PO=po_d) except: # called PurchaseOrder by KeithBa", "if s['nonfunctional'][name] != '': t += ', ' + s['nonfunctional'][name]", "total)) if stats > 0: print(\" Failed as expected: %d", "print(\"%4d. %-25s\" % (i + 1, DEFAULT_METHODS[i]), end=' ') if", "method + ' ' + cur[tag][method] else: value = cur[tag]", "and Jake (Frontier). [Actzero accepts either] return serv.Buy(PO=po_d) except: #", "name in s['nonfunctional']: print(title, \"succeeded despite marked nonfunctional\") elif 's'", "s(uccess), f(ailure), n(ot implemented), F(ailed (as expected)), a(ll) [f] -s,", "(half): print(\"%4d. %-25s\" % (i + 1, DEFAULT_METHODS[i]), end=' ')", "debugging in the SOAP library -i, --invert test servers *not*", "string.whitespace: continue cur = {'nonfunctional': {}} tag = None servers.append", "of servers to test [%s] -t, --stacktrace print a stack", "globals ()[name] except KeyboardInterrupt: raise except: if 'n' in output:", "('-?', '--help'): usage () elif opt in ('-d', '--debug'): SOAP.Config.debug", "(sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit (0) def methodUsage (): sys.stdout", "only the given methods, specify a METHOD# of ? for", "return serv.Buy(PO=po_d) except: # called PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d)", "try: # it's called PO by MST (MS SOAP Toolkit),", "value[-1] == '\"': value = value[1:-1] if tag == 'nonfunctional':", "(as expected)), a(ll) [f] -s, --servers=FILE use FILE as list", "arg == '?': methodUsage () methodnums = str2list (arg) elif", "fn (serv, s['soapaction'], s['name']) if name in s['nonfunctional']: print(title, \"succeeded", "break total += 1 name = DEFAULT_METHODS[num - 1] title", "if opt in ('-?', '--help'): usage () elif opt in", "'n' in output: print(title, \"test not yet implemented\") notimp +=", "shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1 1st Street \", \"city\":\"New York", "also. -?, --help display this usage -d, --debug turn on", "value.strip () if value[0] == '\"' and value[-1] == '\"':", "def usage (error = None): sys.stdout = sys.stderr if error", "servers = DEFAULT_SERVERS_FILE methodnums = None output = 'f' invert", "(args), re.IGNORECASE) for s in servers: if (not not limitre.match", "[''] method = value[0] cur[tag][method] = value[1] else: cur[tag] =", "/ total)) return fail + notimp if __name__ == \"__main__\":", "and 8.\\n\") print(\"The available methods are:\\n\") half = (len (DEFAULT_METHODS)", "line[0] == '\\n': break return servers def str2list (s): l", "(i[1]) + 1): l[i] = 1 else: l[int (i)] =", "i in range (half): print(\"%4d. %-25s\" % (i + 1,", "i in range (int (i[0]),int (i[1]) + 1): l[i] =", "methodnums == None: methodnums = list(range(1, len (DEFAULT_METHODS) + 1))", "-1: i = i.split ('-') for i in range (int", "1st Street \", \"city\":\"New York \", \"state\":\"NY \", \"zipCode\":\"10000 \"}", "elif opt in ('-o', '--output'): output = arg elif opt", "s['soapaction'], s['name']) if name in s['nonfunctional']: print(title, \"succeeded despite marked", "= f.readline () if line == '' or line[0] ==", "def RequestForQuote(serv, sa, epname): serv = serv._sa (sa % {'methodname':'RequestForQuote'})", "(sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName = \"thing\") # for", "= line.split (':', 1) tag = tag.strip ().lower () value", "= arg else: raise AttributeError(\"Recognized but unimplemented option `%s'\" %", "if methodnums == None: methodnums = list(range(1, len (DEFAULT_METHODS) +", "method numbers -o, --output=TYPE turn on output, TYPE is one", "for s in servers: if (not not limitre.match (s['name'])) ==", "Jake (Frontier). [Actzero accepts either] return serv.Buy(PO=po_d) except: # called", "def methodUsage (): sys.stdout = sys.stderr print(\"Methods are specified by", "-\" %t, sys.exc_info()[1]) failok += 1 else: if 'f' in", "() if line == '': break if line[0] in ('#',", "in output: t = 'as expected' if s['nonfunctional'][name] != '':", "+ 1 + half, DEFAULT_METHODS[i + half]), end=' ') print()", "<gh_stars>1-10 #!/usr/bin/env python import getopt import sys import string import", "+= ' ' + line.strip () else: tag, value =", "specify a METHOD# of ? for the list of method", "* fail / total)) if stats > 0: print(\" Failed", "\", \"zipCode\":\"10000 \"} for k,v in list(shipTo_d.items()): shipTo_d[k] = v[:-1]", "len(DEFAULT_METHODS): break total += 1 name = DEFAULT_METHODS[num - 1]", "- 1] title = '%s: %s (#%d)' % (s['name'], name,", "serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname): serv = serv._sa (sa %", "== '?': methodUsage () methodnums = str2list (arg) elif opt", "'\\n': break return servers def str2list (s): l = {}", "failok += 1 else: if 'f' in output: print(title, \"failed", "= (len (DEFAULT_METHODS) + 1) / 2 for i in", "opt, arg in opts: if opt in ('-?', '--help'): usage", "in s.split (','): if i.find ('-') != -1: i =", "Successes: %d (%3.2f%%)\" % \\ (succeed, 100.0 * succeed /", "use FILE as list of servers to test [%s] -t,", "line.strip () else: tag, value = line.split (':', 1) tag", "PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting), #", "for the equivalent short option also. -?, --help display this", "{\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1, itemd2] ) items_d._ns =", "= value.strip () if value[0] == '\"' and value[-1] ==", "+ 1, DEFAULT_METHODS[i]), end=' ') if i + half <", "s['nonfunctional'][name] != '': t += ', ' + s['nonfunctional'][name] print(title,", "if line == '' or line[0] == '\\n': break return", "if name in s['nonfunctional']: print(title, \"succeeded despite marked nonfunctional\") elif", "break if line[0] in ('#', '\\n') or line[0] in string.whitespace:", "GLUE, Aumsoft, # HP, EasySoap, and Jake (Frontier). [Actzero accepts", "FILE as list of servers to test [%s] -t, --stacktrace", "() return l def SimpleBuy(serv, sa, epname): serv = serv._sa", "continue serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for num in", "Total tests: %d\" % total) print(\" Successes: %d (%3.2f%%)\" %", "methods, specify a METHOD# of ? for the list of", "(#%d)' % (s['name'], name, num) try: fn = globals ()[name]", "option shows an argument is mandatory, it's mandatory for the", "> 0: print(\"Failed unexpectedly: %d (%3.2f%%)\" % \\ (fail, 100.0", "= SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for num in (methodnums): if", "except SystemExit: raise except: usage (sys.exc_info ()[1]) if 'a' in", "'?dm:io:s:t', ['help', 'method', 'debug', 'invert', 'output', 'servers=']) for opt, arg", "itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1, itemd2]", "if line[0] in ('#', '\\n') or line[0] in string.whitespace: continue", "specifies\\nmethods 1, 4, 5, 6, and 8.\\n\") print(\"The available methods", "= 1 elif opt in ('-m', '--method'): if arg ==", "SOAP import traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping')", "in ('-i', '--invert'): invert = 1 elif opt in ('-m',", "= value line = f.readline () if line == ''", "100.0 * succeed / total)) if stats > 0 or", "methods can be \" \\ \"specified using a\\ncomma-separated list of", "usage (error = None): sys.stdout = sys.stderr if error !=", "in output: print(title, \"test not yet implemented\") notimp += 1", "by number. Multiple methods can be \" \\ \"specified using", "range (half): print(\"%4d. %-25s\" % (i + 1, DEFAULT_METHODS[i]), end='", "in ('#', '\\n') or line[0] in string.whitespace: continue cur =", "-d, --debug turn on debugging in the SOAP library -i,", "or line[0] in string.whitespace: continue cur = {'nonfunctional': {}} tag", "Toolkit), JHawk (.NET Remoting), # Idoox WASP, Paul (SOAP::Lite), PranishK", "notimp = 0 try: opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help',", "if stats > 0: print(\" Failed as expected: %d (%3.2f%%)\"", "of numbers or ranges. \" \\ \"For example 1,4-6,8 specifies\\nmethods", "or fail > 0: print(\"Failed unexpectedly: %d (%3.2f%%)\" % \\", "list(l.keys ()) l.sort () return l def SimpleBuy(serv, sa, epname):", "Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft, # HP, EasySoap, and", "Aumsoft, # HP, EasySoap, and Jake (Frontier). [Actzero accepts either]", "'--output'): output = arg elif opt in ('-s', '--servers'): servers", "serv = serv._sa (sa % {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1", "expected' if s['nonfunctional'][name] != '': t += ', ' +", "'debug', 'invert', 'output', 'servers=']) for opt, arg in opts: if", "(%s) -\" %t, sys.exc_info()[1]) failok += 1 else: if 'f'", "> 0 or notimp > 0: print(\" Not implemented: %d", "try: res = fn (serv, s['soapaction'], s['name']) if name in", "epname): serv = serv._sa (sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName", "time sys.path.insert(1,\"..\") from SOAPpy import SOAP import traceback DEFAULT_SERVERS_FILE =", "def SimpleBuy(serv, sa, epname): serv = serv._sa (sa % {'methodname':'SimpleBuy'})", "for Phalanx, JHawk def Buy(serv, sa, epname): import copy serv", "\", \"city\":\"New York \", \"state\":\"NY \", \"zipCode\":\"10000 \"} for k,v", "1 elif opt in ('-m', '--method'): if arg == '?':", "') print() sys.exit (0) def readServers (file): servers = []", "value += ' ' + line.strip () else: tag, value", "'\"' and value[-1] == '\"': value = value[1:-1] if tag", "line = f.readline () if line == '' or line[0]", "shipTo_d[k] = v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 =", "import sys import string import re import time sys.path.insert(1,\"..\") from", "print(title, \"succeeded \") succeed += 1 except KeyboardInterrupt: print(\"fail\") raise", "raise except: if name in s['nonfunctional']: if 'F' in output:", "total) print(\" Successes: %d (%3.2f%%)\" % \\ (succeed, 100.0 *", "(i)] = 1 l = list(l.keys ()) l.sort () return", "-?, --help display this usage -d, --debug turn on debugging", "\"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit (0) def methodUsage", "= 0 failok = 0 notimp = 0 try: opts,args", "sa, epname): serv = serv._sa (sa % {'methodname':'Ping'}) return serv.Ping()", "(cur) while 1: if line[0] in string.whitespace: if tag ==", "(s['name'], name, num) try: fn = globals ()[name] except KeyboardInterrupt:", "0: print(\" Failed as expected: %d (%3.2f%%)\" % \\ (failok,", "'as expected' if s['nonfunctional'][name] != '': t += ', '", "a\\ncomma-separated list of numbers or ranges. \" \\ \"For example", "METHOD# of ? for the list of method numbers -o,", "succeed += 1 except KeyboardInterrupt: print(\"fail\") raise except: if name", "at:\", time.ctime (time.time())) if stats > 0: print(\" Total tests:", "% {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName = \"thing\") # for Phalanx,", "while 1: if line[0] in string.whitespace: if tag == 'nonfunctional':", "= 1 total = 0 fail = 0 failok =", "return serv.RequestForQuote(Quantity=3, ProductName = \"thing\") # for Phalanx, JHawk def", "\", \"address\":\"1 1st Street \", \"city\":\"New York \", \"state\":\"NY \",", "(error = None): sys.stdout = sys.stderr if error != None:", "po_d = SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: #", "serv.Ping() def main(): servers = DEFAULT_SERVERS_FILE methodnums = None output", "0 try: opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug',", "'%s: %s (#%d)' % (s['name'], name, num) try: fn =", "if stats: print(\" Tests ended at:\", time.ctime (time.time())) if stats", "succeed = 0 printtrace = 0 stats = 1 total", "usage -d, --debug turn on debugging in the SOAP library", "\"zipCode\":\"10000 \"} for k,v in list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1", "stack trace on each unexpected failure -T, --always-stacktrace print a", "() elif opt in ('-d', '--debug'): SOAP.Config.debug = 1 elif", "a stack trace on any failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE),", "% opt) except SystemExit: raise except: usage (sys.exc_info ()[1]) if", "if i + half < len (DEFAULT_METHODS): print(\"%4d. %-25s\" %", "tag, value = line.split (':', 1) tag = tag.strip ().lower", "servers given -m, --method=METHOD#[,METHOD#...] call only the given methods, specify", "for num in (methodnums): if num > len(DEFAULT_METHODS): break total", "as expected: %d (%3.2f%%)\" % \\ (failok, 100.0 * failok", "line[0] in string.whitespace: if tag == 'nonfunctional': value = method", "\"items\":items_d}) try: # it's called PO by MST (MS SOAP", "+= 1 continue try: res = fn (serv, s['soapaction'], s['name'])", "the SOAP library -i, --invert test servers *not* in the", "test [%s] -t, --stacktrace print a stack trace on each", "for k,v in list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1 = SOAP.structType(", "AttributeError(\"Recognized but unimplemented option `%s'\" % opt) except SystemExit: raise", "l = {} for i in s.split (','): if i.find", "\"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1, itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\"", "servers = readServers(servers) if methodnums == None: methodnums = list(range(1,", "line == '': break if line[0] in ('#', '\\n') or", "i in s.split (','): if i.find ('-') != -1: i", "DEFAULT_METHODS[num - 1] title = '%s: %s (#%d)' % (s['name'],", "[itemd1, itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data", "0 or notimp > 0: print(\" Not implemented: %d (%3.2f%%)\"", "1): l[i] = 1 else: l[int (i)] = 1 l", "('-m', '--method'): if arg == '?': methodUsage () methodnums =", "in range (half): print(\"%4d. %-25s\" % (i + 1, DEFAULT_METHODS[i]),", "= method + ' ' + cur[tag][method] else: value =", "tag = tag.strip ().lower () value = value.strip () if", "data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: # it's called PO", "succeed / total)) if stats > 0 or fail >", "= {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: # it's called PO by", "(not not limitre.match (s['name'])) == invert: continue serv = SOAP.SOAPProxy(s['endpoint'],", "+ half]), end=' ') print() sys.exit (0) def readServers (file):", "params def RequestForQuote(serv, sa, epname): serv = serv._sa (sa %", "== 'nonfunctional': value = value.split (' ', 1) + ['']", "= 0 succeed = 0 printtrace = 0 stats =", "on any failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit", "SOAP library -i, --invert test servers *not* in the list", "sa, epname): serv = serv._sa (sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\",", "!= None: print(error) print(\"\"\"usage: %s [options] [server ...] If a", "sys.stdout = sys.stderr if error != None: print(error) print(\"\"\"usage: %s", "() methodnums = str2list (arg) elif opt in ('-o', '--output'):", "num > len(DEFAULT_METHODS): break total += 1 name = DEFAULT_METHODS[num", "re import time sys.path.insert(1,\"..\") from SOAPpy import SOAP import traceback", "* failok / total)) if stats > 0 or notimp", "York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1 1st Street", "open (file, 'r') while 1: line = f.readline () if", "require this order of params def RequestForQuote(serv, sa, epname): serv", "serv.RequestForQuote(Quantity=3, ProductName = \"thing\") # for Phalanx, JHawk def Buy(serv,", "unimplemented option `%s'\" % opt) except SystemExit: raise except: usage", "1 l = list(l.keys ()) l.sort () return l def", "HP, EasySoap, and Jake (Frontier). [Actzero accepts either] return serv.Buy(PO=po_d)", "SOAP.arrayType( [itemd1, itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType(", "'--invert'): invert = 1 elif opt in ('-m', '--method'): if", "and value[-1] == '\"': value = value[1:-1] if tag ==", "items_d._ns = \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d,", "sys.exit (0) def readServers (file): servers = [] f =", "('-o', '--output'): output = arg elif opt in ('-s', '--servers'):", "-T, --always-stacktrace print a stack trace on any failure \"\"\"", "total)) return fail + notimp if __name__ == \"__main__\": main()", "len (DEFAULT_METHODS): print(\"%4d. %-25s\" % (i + 1 + half,", "'fFns' servers = readServers(servers) if methodnums == None: methodnums =", "= re.compile ('|'.join (args), re.IGNORECASE) for s in servers: if", "= f.readline () if line == '': break if line[0]", "for the list of method numbers -o, --output=TYPE turn on", "{\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d =", "v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45),", "1, 4, 5, 6, and 8.\\n\") print(\"The available methods are:\\n\")", "8.\\n\") print(\"The available methods are:\\n\") half = (len (DEFAULT_METHODS) +", "argument is mandatory, it's mandatory for the equivalent short option", "(':', 1) tag = tag.strip ().lower () value = value.strip", "\\ (fail, 100.0 * fail / total)) if stats >", ") items_d._ns = \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d,", "def readServers (file): servers = [] f = open (file,", "= list(range(1, len (DEFAULT_METHODS) + 1)) limitre = re.compile ('|'.join", "if arg == '?': methodUsage () methodnums = str2list (arg)", "= serv._sa (sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName = \"thing\")", "'--method'): if arg == '?': methodUsage () methodnums = str2list", "(0) def methodUsage (): sys.stdout = sys.stderr print(\"Methods are specified", "turn on debugging in the SOAP library -i, --invert test", "= SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1, itemd2] )", "cur[tag] value += ' ' + line.strip () else: tag,", "'r') while 1: line = f.readline () if line ==", "while 1: line = f.readline () if line == '':", "output = arg elif opt in ('-s', '--servers'): servers =", "\\ \"For example 1,4-6,8 specifies\\nmethods 1, 4, 5, 6, and", "list of method numbers -o, --output=TYPE turn on output, TYPE", "the list of servers given -m, --method=METHOD#[,METHOD#...] call only the", "= 'f' invert = 0 succeed = 0 printtrace =", "\") succeed += 1 except KeyboardInterrupt: print(\"fail\") raise except: if", "import string import re import time sys.path.insert(1,\"..\") from SOAPpy import", "1st Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \",", "servers = arg else: raise AttributeError(\"Recognized but unimplemented option `%s'\"", "= readServers(servers) if methodnums == None: methodnums = list(range(1, len", "# for Phalanx, JHawk def Buy(serv, sa, epname): import copy", "['help', 'method', 'debug', 'invert', 'output', 'servers=']) for opt, arg in", "except KeyboardInterrupt: raise except: if 'n' in output: print(title, \"test", "output: print(title, \"test not yet implemented\") notimp += 1 continue", "k,v in list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99),", "opts: if opt in ('-?', '--help'): usage () elif opt", "output, TYPE is one or more of s(uccess), f(ailure), n(ot", "invert: continue serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for num", "5, 6, and 8.\\n\") print(\"The available methods are:\\n\") half =", "'nonfunctional': value = value.split (' ', 1) + [''] method", "in range (int (i[0]),int (i[1]) + 1): l[i] = 1", "called PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname):", "!= -1: i = i.split ('-') for i in range", "\", \"state\":\"NY \", \"zipCode\":\"10000 \"} for k,v in list(shipTo_d.items()): shipTo_d[k]", "'': break if line[0] in ('#', '\\n') or line[0] in", "'': t += ', ' + s['nonfunctional'][name] print(title, \"failed (%s)", "test servers *not* in the list of servers given -m,", "'s' in output: print(title, \"succeeded \") succeed += 1 except", "if line == '': break if line[0] in ('#', '\\n')", "range (int (i[0]),int (i[1]) + 1): l[i] = 1 else:", "# Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft, #", "0 notimp = 0 try: opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t',", "fn = globals ()[name] except KeyboardInterrupt: raise except: if 'n'", "print a stack trace on any failure \"\"\" % (sys.argv[0],", "'nonfunctional': value = method + ' ' + cur[tag][method] else:", "2 for i in range (half): print(\"%4d. %-25s\" % (i", "else: l[int (i)] = 1 l = list(l.keys ()) l.sort", "(sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug', 'invert', 'output', 'servers=']) for opt,", "SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d =", "\"this is my address\") #JHawk, Phalanx require this order of", "print(\"%4d. %-25s\" % (i + 1 + half, DEFAULT_METHODS[i +", "except: # called PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv,", "shows an argument is mandatory, it's mandatory for the equivalent", "or ranges. \" \\ \"For example 1,4-6,8 specifies\\nmethods 1, 4,", "0 fail = 0 failok = 0 notimp = 0", "% {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New York\",", "+ 1) / 2 for i in range (half): print(\"%4d.", "Address = \"this is my address\") #JHawk, Phalanx require this", "PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname): serv", "KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname): serv = serv._sa", "= s['namespace']) for num in (methodnums): if num > len(DEFAULT_METHODS):", "value[0] == '\"' and value[-1] == '\"': value = value[1:-1]", "serv = serv._sa (sa % {'methodname':'RequestForQuote'}) return serv.RequestForQuote(Quantity=3, ProductName =", "()[1]) if 'a' in output: output = 'fFns' servers =", "sys import string import re import time sys.path.insert(1,\"..\") from SOAPpy", "[f] -s, --servers=FILE use FILE as list of servers to", "specified by number. Multiple methods can be \" \\ \"specified", "' + cur[tag][method] else: value = cur[tag] value += '", "== '' or line[0] == '\\n': break return servers def", "half < len (DEFAULT_METHODS): print(\"%4d. %-25s\" % (i + 1", "not limitre.match (s['name'])) == invert: continue serv = SOAP.SOAPProxy(s['endpoint'], namespace", "(sa % {'methodname':'Ping'}) return serv.Ping() def main(): servers = DEFAULT_SERVERS_FILE", "can be \" \\ \"specified using a\\ncomma-separated list of numbers", "= v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType(", "1 if stats: print(\" Tests ended at:\", time.ctime (time.time())) if", "of s(uccess), f(ailure), n(ot implemented), F(ailed (as expected)), a(ll) [f]", "else: cur[tag] = value line = f.readline () if line", "expected)), a(ll) [f] -s, --servers=FILE use FILE as list of", "+ 1)) limitre = re.compile ('|'.join (args), re.IGNORECASE) for s", "stats > 0: print(\" Total tests: %d\" % total) print(\"", "print(error) print(\"\"\"usage: %s [options] [server ...] If a long option", "value = value.strip () if value[0] == '\"' and value[-1]", "(i[0]),int (i[1]) + 1): l[i] = 1 else: l[int (i)]", "'invert', 'output', 'servers=']) for opt, arg in opts: if opt", "sys.stdout = sys.stderr print(\"Methods are specified by number. Multiple methods", "methodnums = None output = 'f' invert = 0 succeed", "= str2list (arg) elif opt in ('-o', '--output'): output =", "= ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error = None): sys.stdout =", "().lower () value = value.strip () if value[0] == '\"'", "t += ', ' + s['nonfunctional'][name] print(title, \"failed (%s) -\"", "half, DEFAULT_METHODS[i + half]), end=' ') print() sys.exit (0) def", "> 0 or fail > 0: print(\"Failed unexpectedly: %d (%3.2f%%)\"", "> 0: print(\" Not implemented: %d (%3.2f%%)\" % \\ (notimp,", "servers def str2list (s): l = {} for i in", "if name in s['nonfunctional']: if 'F' in output: t =", "= 0 notimp = 0 try: opts,args = getopt.getopt (sys.argv[1:],", "string.whitespace: if tag == 'nonfunctional': value = method + '", "% (i + 1 + half, DEFAULT_METHODS[i + half]), end='", "limitre.match (s['name'])) == invert: continue serv = SOAP.SOAPProxy(s['endpoint'], namespace =", "%-25s\" % (i + 1, DEFAULT_METHODS[i]), end=' ') if i", "= 1 else: l[int (i)] = 1 l = list(l.keys", "+= 1 else: if 'f' in output: print(title, \"failed -\",", "{} for i in s.split (','): if i.find ('-') !=", "Multiple methods can be \" \\ \"specified using a\\ncomma-separated list", "failok / total)) if stats > 0 or notimp >", "None output = 'f' invert = 0 succeed = 0", "methodUsage () methodnums = str2list (arg) elif opt in ('-o',", "l.sort () return l def SimpleBuy(serv, sa, epname): serv =", "(DEFAULT_METHODS): print(\"%4d. %-25s\" % (i + 1 + half, DEFAULT_METHODS[i", "value line = f.readline () if line == '' or", "l def SimpleBuy(serv, sa, epname): serv = serv._sa (sa %", "is my address\") #JHawk, Phalanx require this order of params", "serv._sa (sa % {'methodname':'Ping'}) return serv.Ping() def main(): servers =", "or line[0] == '\\n': break return servers def str2list (s):", "print(\"Methods are specified by number. Multiple methods can be \"", "= SOAP.arrayType( [itemd1, itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\" po_d =", "'F' in output: t = 'as expected' if s['nonfunctional'][name] !=", "= sys.stderr print(\"Methods are specified by number. Multiple methods can", "s['namespace']) for num in (methodnums): if num > len(DEFAULT_METHODS): break", "stats: print(\" Tests ended at:\", time.ctime (time.time())) if stats >", "\"thing\") # for Phalanx, JHawk def Buy(serv, sa, epname): import", "= None output = 'f' invert = 0 succeed =", "servers *not* in the list of servers given -m, --method=METHOD#[,METHOD#...]", "(Frontier). [Actzero accepts either] return serv.Buy(PO=po_d) except: # called PurchaseOrder", "len (DEFAULT_METHODS) + 1)) limitre = re.compile ('|'.join (args), re.IGNORECASE)", "'?': methodUsage () methodnums = str2list (arg) elif opt in", "for i in range (int (i[0]),int (i[1]) + 1): l[i]", "== '\"' and value[-1] == '\"': value = value[1:-1] if", "JHawk (.NET Remoting), # Idoox WASP, Paul (SOAP::Lite), PranishK (ATL),", "= tag.strip ().lower () value = value.strip () if value[0]", "elif 's' in output: print(title, \"succeeded \") succeed += 1", "in s['nonfunctional']: print(title, \"succeeded despite marked nonfunctional\") elif 's' in", "str2list (arg) elif opt in ('-o', '--output'): output = arg", "1 name = DEFAULT_METHODS[num - 1] title = '%s: %s", "name, num) try: fn = globals ()[name] except KeyboardInterrupt: raise", "= sys.stderr if error != None: print(error) print(\"\"\"usage: %s [options]", "') if i + half < len (DEFAULT_METHODS): print(\"%4d. %-25s\"", "implemented\") notimp += 1 continue try: res = fn (serv,", "? for the list of method numbers -o, --output=TYPE turn", "('-i', '--invert'): invert = 1 elif opt in ('-m', '--method'):", "print() sys.exit (0) def readServers (file): servers = [] f", "def str2list (s): l = {} for i in s.split", "n(ot implemented), F(ailed (as expected)), a(ll) [f] -s, --servers=FILE use", "0 stats = 1 total = 0 fail = 0", "usage (sys.exc_info ()[1]) if 'a' in output: output = 'fFns'", "% (s['name'], name, num) try: fn = globals ()[name] except", "% \\ (fail, 100.0 * fail / total)) if stats", "+ [''] method = value[0] cur[tag][method] = value[1] else: cur[tag]", "import SOAP import traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS = ('SimpleBuy',", "f(ailure), n(ot implemented), F(ailed (as expected)), a(ll) [f] -s, --servers=FILE", "the given methods, specify a METHOD# of ? for the", "% total) print(\" Successes: %d (%3.2f%%)\" % \\ (succeed, 100.0", "one or more of s(uccess), f(ailure), n(ot implemented), F(ailed (as", "('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error = None): sys.stdout = sys.stderr", "1 else: l[int (i)] = 1 l = list(l.keys ())", "of ? for the list of method numbers -o, --output=TYPE", "= {\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d", "elif opt in ('-d', '--debug'): SOAP.Config.debug = 1 elif opt", "# HP, EasySoap, and Jake (Frontier). [Actzero accepts either] return", "print(\" Not implemented: %d (%3.2f%%)\" % \\ (notimp, 100.0 *", "Not implemented: %d (%3.2f%%)\" % \\ (notimp, 100.0 * notimp", "if 'f' in output: print(title, \"failed -\", str (sys.exc_info()[1])) fail", "short option also. -?, --help display this usage -d, --debug", "--invert test servers *not* in the list of servers given", "\"address\":\"1 1st Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME>", "Tests ended at:\", time.ctime (time.time())) if stats > 0: print(\"", "() else: tag, value = line.split (':', 1) tag =", "0: print(\"Failed unexpectedly: %d (%3.2f%%)\" % \\ (fail, 100.0 *", "...] If a long option shows an argument is mandatory,", "\"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try:", "0 succeed = 0 printtrace = 0 stats = 1", "trace on each unexpected failure -T, --always-stacktrace print a stack", "0: print(\" Not implemented: %d (%3.2f%%)\" % \\ (notimp, 100.0", "of params def RequestForQuote(serv, sa, epname): serv = serv._sa (sa", "methodnums = str2list (arg) elif opt in ('-o', '--output'): output", "i = i.split ('-') for i in range (int (i[0]),int", "return serv.Ping() def main(): servers = DEFAULT_SERVERS_FILE methodnums = None", "' + s['nonfunctional'][name] print(title, \"failed (%s) -\" %t, sys.exc_info()[1]) failok", "if value[0] == '\"' and value[-1] == '\"': value =", "value = line.split (':', 1) tag = tag.strip ().lower ()", "= value[0] cur[tag][method] = value[1] else: cur[tag] = value line", "value = cur[tag] value += ' ' + line.strip ()", "serv = serv._sa (sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity =", "display this usage -d, --debug turn on debugging in the", "expected: %d (%3.2f%%)\" % \\ (failok, 100.0 * failok /", "the list of method numbers -o, --output=TYPE turn on output,", "if line[0] in string.whitespace: if tag == 'nonfunctional': value =", "to test [%s] -t, --stacktrace print a stack trace on", "Buy(serv, sa, epname): import copy serv = serv._sa (sa %", "Remoting), # Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft,", "if num > len(DEFAULT_METHODS): break total += 1 name =", "in (methodnums): if num > len(DEFAULT_METHODS): break total += 1", "(notimp, 100.0 * notimp / total)) return fail + notimp", "ranges. \" \\ \"For example 1,4-6,8 specifies\\nmethods 1, 4, 5,", "continue try: res = fn (serv, s['soapaction'], s['name']) if name", "by KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname): serv =", "in output: print(title, \"failed -\", str (sys.exc_info()[1])) fail += 1", "== '': break if line[0] in ('#', '\\n') or line[0]", "except: usage (sys.exc_info ()[1]) if 'a' in output: output =", "= globals ()[name] except KeyboardInterrupt: raise except: if 'n' in", "implemented), F(ailed (as expected)), a(ll) [f] -s, --servers=FILE use FILE", "()) l.sort () return l def SimpleBuy(serv, sa, epname): serv", "\"specified using a\\ncomma-separated list of numbers or ranges. \" \\", "RequestForQuote(serv, sa, epname): serv = serv._sa (sa % {'methodname':'RequestForQuote'}) return", "line[0] in ('#', '\\n') or line[0] in string.whitespace: continue cur", "print(\"Failed unexpectedly: %d (%3.2f%%)\" % \\ (fail, 100.0 * fail", "if stats > 0 or notimp > 0: print(\" Not", "% \\ (succeed, 100.0 * succeed / total)) if stats", "0: print(\" Total tests: %d\" % total) print(\" Successes: %d", "{}} tag = None servers.append (cur) while 1: if line[0]", "sys.path.insert(1,\"..\") from SOAPpy import SOAP import traceback DEFAULT_SERVERS_FILE = './inventory.servers'", "either] return serv.Buy(PO=po_d) except: # called PurchaseOrder by KeithBa return", "arg else: raise AttributeError(\"Recognized but unimplemented option `%s'\" % opt)", "are:\\n\") half = (len (DEFAULT_METHODS) + 1) / 2 for", "call only the given methods, specify a METHOD# of ?", "= [] f = open (file, 'r') while 1: line", "except KeyboardInterrupt: print(\"fail\") raise except: if name in s['nonfunctional']: if", "--output=TYPE turn on output, TYPE is one or more of", "opt in ('-?', '--help'): usage () elif opt in ('-d',", "F(ailed (as expected)), a(ll) [f] -s, --servers=FILE use FILE as", "= getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug', 'invert', 'output', 'servers='])", "in s['nonfunctional']: if 'F' in output: t = 'as expected'", "import time sys.path.insert(1,\"..\") from SOAPpy import SOAP import traceback DEFAULT_SERVERS_FILE", "print(title, \"failed (%s) -\" %t, sys.exc_info()[1]) failok += 1 else:", "[server ...] If a long option shows an argument is", "l[i] = 1 else: l[int (i)] = 1 l =", "\\ (notimp, 100.0 * notimp / total)) return fail +", "epname): serv = serv._sa (sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity", "def Ping(serv, sa, epname): serv = serv._sa (sa % {'methodname':'Ping'})", "', ' + s['nonfunctional'][name] print(title, \"failed (%s) -\" %t, sys.exc_info()[1])", "= SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: # it's", "opt) except SystemExit: raise except: usage (sys.exc_info ()[1]) if 'a'", "methodnums = list(range(1, len (DEFAULT_METHODS) + 1)) limitre = re.compile", "\" \\ \"For example 1,4-6,8 specifies\\nmethods 1, 4, 5, 6,", "'\\n') or line[0] in string.whitespace: continue cur = {'nonfunctional': {}}", "%d (%3.2f%%)\" % \\ (failok, 100.0 * failok / total))", "Phalanx, JHawk def Buy(serv, sa, epname): import copy serv =", "()[name] except KeyboardInterrupt: raise except: if 'n' in output: print(title,", "None: print(error) print(\"\"\"usage: %s [options] [server ...] If a long", "res = fn (serv, s['soapaction'], s['name']) if name in s['nonfunctional']:", "' ' + line.strip () else: tag, value = line.split", "MST (MS SOAP Toolkit), JHawk (.NET Remoting), # Idoox WASP,", "on output, TYPE is one or more of s(uccess), f(ailure),", "opt in ('-o', '--output'): output = arg elif opt in", "list of servers to test [%s] -t, --stacktrace print a", "\"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1 1st", "return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa, epname): serv = serv._sa (sa", "1) tag = tag.strip ().lower () value = value.strip ()", "> 0: print(\" Failed as expected: %d (%3.2f%%)\" % \\", "a(ll) [f] -s, --servers=FILE use FILE as list of servers", "(s['name'])) == invert: continue serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace'])", "end=' ') sys.exit (0) def methodUsage (): sys.stdout = sys.stderr", "print(\" Tests ended at:\", time.ctime (time.time())) if stats > 0:", "value[1] else: cur[tag] = value line = f.readline () if", "%-25s\" % (i + 1 + half, DEFAULT_METHODS[i + half]),", "+ half, DEFAULT_METHODS[i + half]), end=' ') print() sys.exit (0)", "{\"name\":\"<NAME> \", \"address\":\"1 1st Street \", \"city\":\"New York \", \"state\":\"NY", "-s, --servers=FILE use FILE as list of servers to test", "1) + [''] method = value[0] cur[tag][method] = value[1] else:", "= serv._sa (sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50,", "-i, --invert test servers *not* in the list of servers", "option `%s'\" % opt) except SystemExit: raise except: usage (sys.exc_info", "else: tag, value = line.split (':', 1) tag = tag.strip", "= value[1] else: cur[tag] = value line = f.readline ()", "re.compile ('|'.join (args), re.IGNORECASE) for s in servers: if (not", "\"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1 1st Street \", \"city\":\"New", "sys.stderr if error != None: print(error) print(\"\"\"usage: %s [options] [server", "+= 1 name = DEFAULT_METHODS[num - 1] title = '%s:", "else: if 'f' in output: print(title, \"failed -\", str (sys.exc_info()[1]))", "tests: %d\" % total) print(\" Successes: %d (%3.2f%%)\" % \\", "as list of servers to test [%s] -t, --stacktrace print", "SOAPpy import SOAP import traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS =", "print(\"The available methods are:\\n\") half = (len (DEFAULT_METHODS) + 1)", "list(range(1, len (DEFAULT_METHODS) + 1)) limitre = re.compile ('|'.join (args),", "TYPE is one or more of s(uccess), f(ailure), n(ot implemented),", "cur[tag][method] else: value = cur[tag] value += ' ' +", "called PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting),", "= {'nonfunctional': {}} tag = None servers.append (cur) while 1:", "value = value.split (' ', 1) + [''] method =", "in output: print(title, \"succeeded \") succeed += 1 except KeyboardInterrupt:", "total = 0 fail = 0 failok = 0 notimp", "\\ (succeed, 100.0 * succeed / total)) if stats >", "import traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def", "s['nonfunctional']: print(title, \"succeeded despite marked nonfunctional\") elif 's' in output:", "f.readline () if line == '': break if line[0] in", "< len (DEFAULT_METHODS): print(\"%4d. %-25s\" % (i + 1 +", "in ('-m', '--method'): if arg == '?': methodUsage () methodnums", "return servers def str2list (s): l = {} for i", "* succeed / total)) if stats > 0 or fail", "an argument is mandatory, it's mandatory for the equivalent short", "= value.split (' ', 1) + [''] method = value[0]", "cur[tag][method] = value[1] else: cur[tag] = value line = f.readline", "s in servers: if (not not limitre.match (s['name'])) == invert:", "nonfunctional\") elif 's' in output: print(title, \"succeeded \") succeed +=", "sa, epname): import copy serv = serv._sa (sa % {'methodname':'Buy'})", "100.0 * notimp / total)) return fail + notimp if", "Phalanx require this order of params def RequestForQuote(serv, sa, epname):", "== 'nonfunctional': value = method + ' ' + cur[tag][method]", "= cur[tag] value += ' ' + line.strip () else:", "in ('-d', '--debug'): SOAP.Config.debug = 1 elif opt in ('-i',", "%d (%3.2f%%)\" % \\ (fail, 100.0 * fail / total))", "main(): servers = DEFAULT_SERVERS_FILE methodnums = None output = 'f'", "[] f = open (file, 'r') while 1: line =", "fail = 0 failok = 0 notimp = 0 try:", "this order of params def RequestForQuote(serv, sa, epname): serv =", "of method numbers -o, --output=TYPE turn on output, TYPE is", "# called PurchaseOrder by KeithBa return serv.Buy(PurchaseOrder=po_d) def Ping(serv, sa,", "\" \\ \"specified using a\\ncomma-separated list of numbers or ranges.", "opt in ('-d', '--debug'): SOAP.Config.debug = 1 elif opt in", "1] title = '%s: %s (#%d)' % (s['name'], name, num)", "+ 1): l[i] = 1 else: l[int (i)] = 1", "= 50, Address = \"this is my address\") #JHawk, Phalanx", "import re import time sys.path.insert(1,\"..\") from SOAPpy import SOAP import", "\"For example 1,4-6,8 specifies\\nmethods 1, 4, 5, 6, and 8.\\n\")", "line[0] in string.whitespace: continue cur = {'nonfunctional': {}} tag =", "\"city\":\"New York \", \"state\":\"NY \", \"zipCode\":\"10000 \"} for k,v in", "(sa % {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New", "given -m, --method=METHOD#[,METHOD#...] call only the given methods, specify a", "usage () elif opt in ('-d', '--debug'): SOAP.Config.debug = 1", "tag == 'nonfunctional': value = value.split (' ', 1) +", "marked nonfunctional\") elif 's' in output: print(title, \"succeeded \") succeed", "') sys.exit (0) def methodUsage (): sys.stdout = sys.stderr print(\"Methods", "% {'methodname':'Ping'}) return serv.Ping() def main(): servers = DEFAULT_SERVERS_FILE methodnums", "('|'.join (args), re.IGNORECASE) for s in servers: if (not not", "notimp > 0: print(\" Not implemented: %d (%3.2f%%)\" % \\", "== '\"': value = value[1:-1] if tag == 'nonfunctional': value", "line = f.readline () if line == '': break if", "(sys.exc_info()[1])) fail += 1 if stats: print(\" Tests ended at:\",", "raise except: usage (sys.exc_info ()[1]) if 'a' in output: output", "failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit (0) def", "if 'a' in output: output = 'fFns' servers = readServers(servers)", "f.readline () if line == '' or line[0] == '\\n':", "address\") #JHawk, Phalanx require this order of params def RequestForQuote(serv,", "by MST (MS SOAP Toolkit), JHawk (.NET Remoting), # Idoox", "1 except KeyboardInterrupt: print(\"fail\") raise except: if name in s['nonfunctional']:", "+ half < len (DEFAULT_METHODS): print(\"%4d. %-25s\" % (i +", "for i in range (half): print(\"%4d. %-25s\" % (i +", "WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft, # HP, EasySoap,", "in the list of servers given -m, --method=METHOD#[,METHOD#...] call only", "total += 1 name = DEFAULT_METHODS[num - 1] title =", "SystemExit: raise except: usage (sys.exc_info ()[1]) if 'a' in output:", "in string.whitespace: if tag == 'nonfunctional': value = method +", "--stacktrace print a stack trace on each unexpected failure -T,", "None): sys.stdout = sys.stderr if error != None: print(error) print(\"\"\"usage:", "= arg elif opt in ('-s', '--servers'): servers = arg", "stats > 0 or notimp > 0: print(\" Not implemented:", "1 continue try: res = fn (serv, s['soapaction'], s['name']) if", "1,4-6,8 specifies\\nmethods 1, 4, 5, 6, and 8.\\n\") print(\"The available", "despite marked nonfunctional\") elif 's' in output: print(title, \"succeeded \")", "+ s['nonfunctional'][name] print(title, \"failed (%s) -\" %t, sys.exc_info()[1]) failok +=", "(int (i[0]),int (i[1]) + 1): l[i] = 1 else: l[int", "+ ' ' + cur[tag][method] else: value = cur[tag] value", "getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug', 'invert', 'output', 'servers=']) for", "-m, --method=METHOD#[,METHOD#...] call only the given methods, specify a METHOD#", "time.ctime (time.time())) if stats > 0: print(\" Total tests: %d\"", "or notimp > 0: print(\" Not implemented: %d (%3.2f%%)\" %", "PranishK (ATL), GLUE, Aumsoft, # HP, EasySoap, and Jake (Frontier).", "serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address = \"this is my address\")", "[Actzero accepts either] return serv.Buy(PO=po_d) except: # called PurchaseOrder by", "raise except: if 'n' in output: print(title, \"test not yet", "print(title, \"test not yet implemented\") notimp += 1 continue try:", "+= ', ' + s['nonfunctional'][name] print(title, \"failed (%s) -\" %t,", "unexpectedly: %d (%3.2f%%)\" % \\ (fail, 100.0 * fail /", "getopt import sys import string import re import time sys.path.insert(1,\"..\")", "0 printtrace = 0 stats = 1 total = 0", "(time.time())) if stats > 0: print(\" Total tests: %d\" %", "list of servers given -m, --method=METHOD#[,METHOD#...] call only the given", "error != None: print(error) print(\"\"\"usage: %s [options] [server ...] If", "= list(l.keys ()) l.sort () return l def SimpleBuy(serv, sa,", "my address\") #JHawk, Phalanx require this order of params def", "() value = value.strip () if value[0] == '\"' and", "1 else: if 'f' in output: print(title, \"failed -\", str", "1 + half, DEFAULT_METHODS[i + half]), end=' ') print() sys.exit", "'servers=']) for opt, arg in opts: if opt in ('-?',", "{\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d}) try: # it's called PO by MST", "= open (file, 'r') while 1: line = f.readline ()", "= 'as expected' if s['nonfunctional'][name] != '': t += ',", "== invert: continue serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for", "tag.strip ().lower () value = value.strip () if value[0] ==", "not yet implemented\") notimp += 1 continue try: res =", "in string.whitespace: continue cur = {'nonfunctional': {}} tag = None", "%d\" % total) print(\" Successes: %d (%3.2f%%)\" % \\ (succeed,", "'a' in output: output = 'fFns' servers = readServers(servers) if", "Street \", \"city\":\"New York \", \"state\":\"NY \", \"zipCode\":\"10000 \"} for", "--servers=FILE use FILE as list of servers to test [%s]", "except: if 'n' in output: print(title, \"test not yet implemented\")", "= \"thing\") # for Phalanx, JHawk def Buy(serv, sa, epname):", "(sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address =", "s['nonfunctional']: if 'F' in output: t = 'as expected' if", "1: if line[0] in string.whitespace: if tag == 'nonfunctional': value", "stats = 1 total = 0 fail = 0 failok", "= 'fFns' servers = readServers(servers) if methodnums == None: methodnums", "(%3.2f%%)\" % \\ (fail, 100.0 * fail / total)) if", "f = open (file, 'r') while 1: line = f.readline", "'output', 'servers=']) for opt, arg in opts: if opt in", "in servers: if (not not limitre.match (s['name'])) == invert: continue", "DEFAULT_SERVERS_FILE methodnums = None output = 'f' invert = 0", "= {} for i in s.split (','): if i.find ('-')", "('#', '\\n') or line[0] in string.whitespace: continue cur = {'nonfunctional':", "elif opt in ('-s', '--servers'): servers = arg else: raise", "= value[1:-1] if tag == 'nonfunctional': value = value.split ('", "'' or line[0] == '\\n': break return servers def str2list", "(DEFAULT_METHODS) + 1) / 2 for i in range (half):", "0 failok = 0 notimp = 0 try: opts,args =", "(%3.2f%%)\" % \\ (failok, 100.0 * failok / total)) if", "for i in s.split (','): if i.find ('-') != -1:", "= 1 l = list(l.keys ()) l.sort () return l", "s['name']) if name in s['nonfunctional']: print(title, \"succeeded despite marked nonfunctional\")", "opt in ('-m', '--method'): if arg == '?': methodUsage ()", "in ('-s', '--servers'): servers = arg else: raise AttributeError(\"Recognized but", "'--debug'): SOAP.Config.debug = 1 elif opt in ('-i', '--invert'): invert", "if 'F' in output: t = 'as expected' if s['nonfunctional'][name]", "serv = serv._sa (sa % {'methodname':'Ping'}) return serv.Ping() def main():", "this usage -d, --debug turn on debugging in the SOAP", "\\ \"specified using a\\ncomma-separated list of numbers or ranges. \"", "% {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address = \"this", "sys.stderr print(\"Methods are specified by number. Multiple methods can be", "(i + 1 + half, DEFAULT_METHODS[i + half]), end=' ')", "line.split (':', 1) tag = tag.strip ().lower () value =", "% (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ') sys.exit (0) def methodUsage ():", "(s): l = {} for i in s.split (','): if", "%s (#%d)' % (s['name'], name, num) try: fn = globals", "\"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1,", "if tag == 'nonfunctional': value = method + ' '", "if 'n' in output: print(title, \"test not yet implemented\") notimp", "%t, sys.exc_info()[1]) failok += 1 else: if 'f' in output:", "% \\ (failok, 100.0 * failok / total)) if stats", "in opts: if opt in ('-?', '--help'): usage () elif", "4, 5, 6, and 8.\\n\") print(\"The available methods are:\\n\") half", "items_d = SOAP.arrayType( [itemd1, itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\" po_d", "opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug', 'invert', 'output',", "billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New York\", \"state\":\"NY\", \"zipCode\":\"10000\"}", "opt in ('-i', '--invert'): invert = 1 elif opt in", "= serv._sa (sa % {'methodname':'Ping'}) return serv.Ping() def main(): servers", "1 total = 0 fail = 0 failok = 0", "each unexpected failure -T, --always-stacktrace print a stack trace on", "are specified by number. Multiple methods can be \" \\", "+ cur[tag][method] else: value = cur[tag] value += ' '", "+= 1 if stats: print(\" Tests ended at:\", time.ctime (time.time()))", "given methods, specify a METHOD# of ? for the list", "str (sys.exc_info()[1])) fail += 1 if stats: print(\" Tests ended", "EasySoap, and Jake (Frontier). [Actzero accepts either] return serv.Buy(PO=po_d) except:", "' ' + cur[tag][method] else: value = cur[tag] value +=", "() if line == '' or line[0] == '\\n': break", "def main(): servers = DEFAULT_SERVERS_FILE methodnums = None output =", "= 0 printtrace = 0 stats = 1 total =", "'f' in output: print(title, \"failed -\", str (sys.exc_info()[1])) fail +=", "DEFAULT_METHODS[i + half]), end=' ') print() sys.exit (0) def readServers", "1) / 2 for i in range (half): print(\"%4d. %-25s\"", "list of numbers or ranges. \" \\ \"For example 1,4-6,8", "\"failed -\", str (sys.exc_info()[1])) fail += 1 if stats: print(\"", "limitre = re.compile ('|'.join (args), re.IGNORECASE) for s in servers:", "cur[tag] = value line = f.readline () if line ==", "python import getopt import sys import string import re import", "tag == 'nonfunctional': value = method + ' ' +", "if stats > 0: print(\" Total tests: %d\" % total)", "ended at:\", time.ctime (time.time())) if stats > 0: print(\" Total", "opt in ('-s', '--servers'): servers = arg else: raise AttributeError(\"Recognized", "KeyboardInterrupt: print(\"fail\") raise except: if name in s['nonfunctional']: if 'F'", "= 0 try: opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method',", "[options] [server ...] If a long option shows an argument", "{'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address = \"this is", "trace on any failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ')", "invert = 1 elif opt in ('-m', '--method'): if arg", "--debug turn on debugging in the SOAP library -i, --invert", "DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error", "else: raise AttributeError(\"Recognized but unimplemented option `%s'\" % opt) except", "is mandatory, it's mandatory for the equivalent short option also.", "-t, --stacktrace print a stack trace on each unexpected failure", "= \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data = {\"poID\":\"myord\",\"createDate\":SOAP.dateTimeType(),\"shipTo\":shipTo_d, \"billTo\":billTo_d, \"items\":items_d})", "SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"}) items_d = SOAP.arrayType( [itemd1, itemd2] ) items_d._ns", "or more of s(uccess), f(ailure), n(ot implemented), F(ailed (as expected)),", "% \\ (notimp, 100.0 * notimp / total)) return fail", "output: print(title, \"failed -\", str (sys.exc_info()[1])) fail += 1 if", "number. Multiple methods can be \" \\ \"specified using a\\ncomma-separated", "% (i + 1, DEFAULT_METHODS[i]), end=' ') if i +", "l = list(l.keys ()) l.sort () return l def SimpleBuy(serv,", "yet implemented\") notimp += 1 continue try: res = fn", "i + half < len (DEFAULT_METHODS): print(\"%4d. %-25s\" % (i", "equivalent short option also. -?, --help display this usage -d,", "long option shows an argument is mandatory, it's mandatory for", "value = method + ' ' + cur[tag][method] else: value", "+= 1 except KeyboardInterrupt: print(\"fail\") raise except: if name in", "\"succeeded \") succeed += 1 except KeyboardInterrupt: print(\"fail\") raise except:", "t = 'as expected' if s['nonfunctional'][name] != '': t +=", "(len (DEFAULT_METHODS) + 1) / 2 for i in range", "'--help'): usage () elif opt in ('-d', '--debug'): SOAP.Config.debug =", "methodUsage (): sys.stdout = sys.stderr print(\"Methods are specified by number.", "elif opt in ('-m', '--method'): if arg == '?': methodUsage", "stats > 0: print(\" Failed as expected: %d (%3.2f%%)\" %", "('-') for i in range (int (i[0]),int (i[1]) + 1):", "epname): import copy serv = serv._sa (sa % {'methodname':'Buy'}) billTo_d", "(MS SOAP Toolkit), JHawk (.NET Remoting), # Idoox WASP, Paul", "\"state\":\"NY \", \"zipCode\":\"10000 \"} for k,v in list(shipTo_d.items()): shipTo_d[k] =", "failok = 0 notimp = 0 try: opts,args = getopt.getopt", "output: output = 'fFns' servers = readServers(servers) if methodnums ==", "100.0 * failok / total)) if stats > 0 or", "(0) def readServers (file): servers = [] f = open", "methods are:\\n\") half = (len (DEFAULT_METHODS) + 1) / 2", "def Buy(serv, sa, epname): import copy serv = serv._sa (sa", "SimpleBuy(serv, sa, epname): serv = serv._sa (sa % {'methodname':'SimpleBuy'}) return", "(sys.exc_info ()[1]) if 'a' in output: output = 'fFns' servers", "('-s', '--servers'): servers = arg else: raise AttributeError(\"Recognized but unimplemented", "DEFAULT_SERVERS_FILE), end=' ') sys.exit (0) def methodUsage (): sys.stdout =", "= serv._sa (sa % {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st", "(file): servers = [] f = open (file, 'r') while", "('-') != -1: i = i.split ('-') for i in", "'RequestForQuote','Buy','Ping') def usage (error = None): sys.stdout = sys.stderr if", "it's called PO by MST (MS SOAP Toolkit), JHawk (.NET", "= DEFAULT_METHODS[num - 1] title = '%s: %s (#%d)' %", "print(\"\"\"usage: %s [options] [server ...] If a long option shows", "--always-stacktrace print a stack trace on any failure \"\"\" %", "= i.split ('-') for i in range (int (i[0]),int (i[1])", "invert = 0 succeed = 0 printtrace = 0 stats", "list(shipTo_d.items()): shipTo_d[k] = v[:-1] itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2", "= None servers.append (cur) while 1: if line[0] in string.whitespace:", "printtrace = 0 stats = 1 total = 0 fail", "%d (%3.2f%%)\" % \\ (notimp, 100.0 * notimp / total))", "(i + 1, DEFAULT_METHODS[i]), end=' ') if i + half", "sys.exit (0) def methodUsage (): sys.stdout = sys.stderr print(\"Methods are", "half]), end=' ') print() sys.exit (0) def readServers (file): servers", "None: methodnums = list(range(1, len (DEFAULT_METHODS) + 1)) limitre =", "re.IGNORECASE) for s in servers: if (not not limitre.match (s['name']))", "= './inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage (error =", "itemd1 = SOAP.structType( {\"name\":\"widg1\",\"quantity\":200,\"price\":SOAP.decimalType(45.99), \"_typename\":\"LineItem\"}) itemd2 = SOAP.structType( {\"name\":\"widg2\",\"quantity\":400,\"price\":SOAP.decimalType(33.45), \"_typename\":\"LineItem\"})", "try: opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t', ['help', 'method', 'debug', 'invert',", "namespace = s['namespace']) for num in (methodnums): if num >", "#!/usr/bin/env python import getopt import sys import string import re", "order of params def RequestForQuote(serv, sa, epname): serv = serv._sa", "serv._sa (sa % {'methodname':'SimpleBuy'}) return serv.SimpleBuy(ProductName=\"widget\", Quantity = 50, Address", "unexpected failure -T, --always-stacktrace print a stack trace on any", "name = DEFAULT_METHODS[num - 1] title = '%s: %s (#%d)'", "100.0 * fail / total)) if stats > 0: print(\"", "but unimplemented option `%s'\" % opt) except SystemExit: raise except:", "(' ', 1) + [''] method = value[0] cur[tag][method] =", "if i.find ('-') != -1: i = i.split ('-') for", "failure -T, --always-stacktrace print a stack trace on any failure", "print(\" Total tests: %d\" % total) print(\" Successes: %d (%3.2f%%)\"", "import copy serv = serv._sa (sa % {'methodname':'Buy'}) billTo_d =", "(fail, 100.0 * fail / total)) if stats > 0:", "(%3.2f%%)\" % \\ (notimp, 100.0 * notimp / total)) return", "string import re import time sys.path.insert(1,\"..\") from SOAPpy import SOAP", "# it's called PO by MST (MS SOAP Toolkit), JHawk", "value.split (' ', 1) + [''] method = value[0] cur[tag][method]", "\"state\":\"NY\", \"zipCode\":\"10000\"} shipTo_d = {\"name\":\"<NAME> \", \"address\":\"1 1st Street \",", "using a\\ncomma-separated list of numbers or ranges. \" \\ \"For", "None servers.append (cur) while 1: if line[0] in string.whitespace: if", "copy serv = serv._sa (sa % {'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\",", "(methodnums): if num > len(DEFAULT_METHODS): break total += 1 name", "\"test not yet implemented\") notimp += 1 continue try: res", "Quantity = 50, Address = \"this is my address\") #JHawk,", "* notimp / total)) return fail + notimp if __name__", "Ping(serv, sa, epname): serv = serv._sa (sa % {'methodname':'Ping'}) return", "tag = None servers.append (cur) while 1: if line[0] in", "--help display this usage -d, --debug turn on debugging in", "= '%s: %s (#%d)' % (s['name'], name, num) try: fn", "() if value[0] == '\"' and value[-1] == '\"': value", "method = value[0] cur[tag][method] = value[1] else: cur[tag] = value", "(','): if i.find ('-') != -1: i = i.split ('-')", "\"billTo\":billTo_d, \"items\":items_d}) try: # it's called PO by MST (MS", "== None: methodnums = list(range(1, len (DEFAULT_METHODS) + 1)) limitre", "1 elif opt in ('-i', '--invert'): invert = 1 elif", "l[int (i)] = 1 l = list(l.keys ()) l.sort ()", "/ 2 for i in range (half): print(\"%4d. %-25s\" %", "'\"': value = value[1:-1] if tag == 'nonfunctional': value =", "fail / total)) if stats > 0: print(\" Failed as", "in output: output = 'fFns' servers = readServers(servers) if methodnums", "(): sys.stdout = sys.stderr print(\"Methods are specified by number. Multiple", "(SOAP::Lite), PranishK (ATL), GLUE, Aumsoft, # HP, EasySoap, and Jake", "1)) limitre = re.compile ('|'.join (args), re.IGNORECASE) for s in", "/ total)) if stats > 0: print(\" Failed as expected:", "of servers given -m, --method=METHOD#[,METHOD#...] call only the given methods,", "readServers(servers) if methodnums == None: methodnums = list(range(1, len (DEFAULT_METHODS)", "\"succeeded despite marked nonfunctional\") elif 's' in output: print(title, \"succeeded", "(arg) elif opt in ('-o', '--output'): output = arg elif", "if error != None: print(error) print(\"\"\"usage: %s [options] [server ...]", "import getopt import sys import string import re import time", "SOAP.Config.debug = 1 elif opt in ('-i', '--invert'): invert =", "available methods are:\\n\") half = (len (DEFAULT_METHODS) + 1) /", "print(\" Failed as expected: %d (%3.2f%%)\" % \\ (failok, 100.0", "mandatory for the equivalent short option also. -?, --help display", "if tag == 'nonfunctional': value = value.split (' ', 1)", "traceback DEFAULT_SERVERS_FILE = './inventory.servers' DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping') def usage", "in the SOAP library -i, --invert test servers *not* in", "JHawk def Buy(serv, sa, epname): import copy serv = serv._sa", "'method', 'debug', 'invert', 'output', 'servers=']) for opt, arg in opts:", "implemented: %d (%3.2f%%)\" % \\ (notimp, 100.0 * notimp /", "= DEFAULT_SERVERS_FILE methodnums = None output = 'f' invert =", "= {\"name\":\"<NAME> \", \"address\":\"1 1st Street \", \"city\":\"New York \",", "\"address\":\"1 1st Street \", \"city\":\"New York \", \"state\":\"NY \", \"zipCode\":\"10000", "stack trace on any failure \"\"\" % (sys.argv[0], DEFAULT_SERVERS_FILE), end='", "--method=METHOD#[,METHOD#...] call only the given methods, specify a METHOD# of", "break return servers def str2list (s): l = {} for", "if (not not limitre.match (s['name'])) == invert: continue serv =", "serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace']) for num in (methodnums):", "for opt, arg in opts: if opt in ('-?', '--help'):", "-o, --output=TYPE turn on output, TYPE is one or more", "{'methodname':'Buy'}) billTo_d = {\"name\":\"<NAME>\", \"address\":\"1 1st Street\", \"city\":\"New York\", \"state\":\"NY\",", "itemd2] ) items_d._ns = \"http://www.soapinterop.org/Bid\" po_d = SOAP.structType( data =", "half = (len (DEFAULT_METHODS) + 1) / 2 for i", "output: print(title, \"succeeded \") succeed += 1 except KeyboardInterrupt: print(\"fail\")", "in ('-?', '--help'): usage () elif opt in ('-d', '--debug'):", "(failok, 100.0 * failok / total)) if stats > 0", "try: fn = globals ()[name] except KeyboardInterrupt: raise except: if", "a long option shows an argument is mandatory, it's mandatory" ]
[ "if file not in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\",", "entry: entry = json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0]", "= startcmd[0] fileName = startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else:", "\"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\",", "\"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(),", "\"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(),", "not in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\",", "file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory)", "shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\",", "json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName = startcmd[1]", "with open(\"entry.tp\") as entry: entry = json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\")", "os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\", format='zip', root_dir=\"temp\", base_dir=\"WinTools\") os.rename(\"WinTools.zip\",", "for file in os.listdir(\".\"): if file not in [\"compile.py\", \"utils\",", "in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]:", "print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"),", "os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"): if", "\"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"): if file not", "as entry: entry = json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory =", "\"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file),", "\"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory))", "\"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\")", "filedirectory = startcmd[0] fileName = startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\"))", "os import json import shutil with open(\"entry.tp\") as entry: entry", "import shutil with open(\"entry.tp\") as entry: entry = json.loads(entry.read()) startcmd", "= json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName =", "file not in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\",", "[\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\",", "import json import shutil with open(\"entry.tp\") as entry: entry =", "os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"): if file not in [\"compile.py\",", "file), os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\", format='zip',", "if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"):", "fileName = startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for", "\"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file) shutil.copy(os.path.join(os.getcwd(), file), os.path.join(\"temp\",", "shutil with open(\"entry.tp\") as entry: entry = json.loads(entry.read()) startcmd =", "open(\"entry.tp\") as entry: entry = json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory", "json import shutil with open(\"entry.tp\") as entry: entry = json.loads(entry.read())", "file in os.listdir(\".\"): if file not in [\"compile.py\", \"utils\", \"requirements.txt\",", "\"utils\", \"requirements.txt\", \"build\", \"dist\", \"main.py\", \"main.spec\", \"__pycache__\", \"temp\"]: print(\"copying\", file)", "= entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName = startcmd[1] if os.path.exists(filedirectory):", "entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName = startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(),", "\"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\", format='zip', root_dir=\"temp\", base_dir=\"WinTools\") os.rename(\"WinTools.zip\", \"WinTools.tpp\")", "entry = json.loads(entry.read()) startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName", "startcmd = entry['plugin_start_cmd'].split(\"%TP_PLUGIN_FOLDER%\")[1].split(\"\\\\\") filedirectory = startcmd[0] fileName = startcmd[1] if", "else: os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"): if file not in", "startcmd[0] fileName = startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory)", "os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file in os.listdir(\".\"): if file", "in os.listdir(\".\"): if file not in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\",", "os.listdir(\".\"): if file not in [\"compile.py\", \"utils\", \"requirements.txt\", \"build\", \"dist\",", "startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file in", "= startcmd[1] if os.path.exists(filedirectory): os.remove(os.path.join(os.getcwd(), \"WinTools\")) else: os.makedirs(\"temp/\"+filedirectory) for file", "filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\", format='zip', root_dir=\"temp\", base_dir=\"WinTools\")", "import os import json import shutil with open(\"entry.tp\") as entry:", "os.path.join(\"temp\", filedirectory)) os.rename(\"dist\\Main.exe\", \"dist\\WinTools.exe\") shutil.copy(os.path.join(os.getcwd(), r\"dist\\WinTools.exe\"), \"temp/\"+filedirectory) shutil.make_archive(base_name=\"WinTools\", format='zip', root_dir=\"temp\"," ]
[ "+ i + 41 for j in range(2, n): if", "<reponame>tusikalanse/acm-icpc for _ in range(int(input())): x, y = list(map(int, input().split()))", "if n % j == 0: flag = 0 break", "y = list(map(int, input().split())) flag = 1 for i in", "list(map(int, input().split())) flag = 1 for i in range(x, y", "flag = 1 for i in range(x, y + 1):", "+ 1): n = i * i + i +", "i * i + i + 41 for j in", "for j in range(2, n): if j * j >", "+ 41 for j in range(2, n): if j *", "j * j > n: break if n % j", "1): n = i * i + i + 41", "in range(x, y + 1): n = i * i", "range(2, n): if j * j > n: break if", "* j > n: break if n % j ==", "break if n % j == 0: flag = 0", "1 for i in range(x, y + 1): n =", "0: flag = 0 break if flag == 0: break", "j > n: break if n % j == 0:", "n = i * i + i + 41 for", "range(int(input())): x, y = list(map(int, input().split())) flag = 1 for", "i in range(x, y + 1): n = i *", "j == 0: flag = 0 break if flag ==", "= list(map(int, input().split())) flag = 1 for i in range(x,", "break if flag == 0: break if flag: print(\"OK\") else:", "y + 1): n = i * i + i", "for _ in range(int(input())): x, y = list(map(int, input().split())) flag", "* i + i + 41 for j in range(2,", "0 break if flag == 0: break if flag: print(\"OK\")", "in range(2, n): if j * j > n: break", "flag = 0 break if flag == 0: break if", "_ in range(int(input())): x, y = list(map(int, input().split())) flag =", "> n: break if n % j == 0: flag", "41 for j in range(2, n): if j * j", "== 0: flag = 0 break if flag == 0:", "n: break if n % j == 0: flag =", "j in range(2, n): if j * j > n:", "range(x, y + 1): n = i * i +", "x, y = list(map(int, input().split())) flag = 1 for i", "= i * i + i + 41 for j", "for i in range(x, y + 1): n = i", "% j == 0: flag = 0 break if flag", "= 0 break if flag == 0: break if flag:", "= 1 for i in range(x, y + 1): n", "in range(int(input())): x, y = list(map(int, input().split())) flag = 1", "if flag == 0: break if flag: print(\"OK\") else: print(\"Sorry\")", "n % j == 0: flag = 0 break if", "i + i + 41 for j in range(2, n):", "input().split())) flag = 1 for i in range(x, y +", "if j * j > n: break if n %", "i + 41 for j in range(2, n): if j", "n): if j * j > n: break if n" ]
[ "'sources': [ 'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"],", "], 'defines': [ '_GNU_SOURCE' ], 'cflags': [ '-Wall', '-O3' ]", "'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE' ], 'cflags': [ '-Wall', '-O3'", "[ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE' ], 'cflags': [ '-Wall',", "\\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE' ], 'cflags':", "'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE' ], 'cflags': [", "[ { 'target_name': 'hiredis', 'sources': [ 'src/hiredis.cc' , 'src/reader.cc' ],", "'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [", "'_GNU_SOURCE' ], 'cflags': [ '-Wall', '-O3' ] } ] }", "'targets': [ { 'target_name': 'hiredis', 'sources': [ 'src/hiredis.cc' , 'src/reader.cc'", "{ 'target_name': 'hiredis', 'sources': [ 'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs':", "'hiredis', 'sources': [ 'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs': [\"<!(node -e", "'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [", ", 'src/reader.cc' ], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c'", "'target_name': 'hiredis', 'sources': [ 'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs': [\"<!(node", "-e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE' ],", "[ 'src/hiredis.cc' , 'src/reader.cc' ], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies':", "'defines': [ '_GNU_SOURCE' ], 'cflags': [ '-Wall', '-O3' ] }", "{ 'targets': [ { 'target_name': 'hiredis', 'sources': [ 'src/hiredis.cc' ,", "'src/reader.cc' ], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ],", "], 'include_dirs': [\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines':", "[ '_GNU_SOURCE' ], 'cflags': [ '-Wall', '-O3' ] } ]", "[\"<!(node -e \\\"require('nan')\\\")\"], 'dependencies': [ 'deps/hiredis.gyp:hiredis-c' ], 'defines': [ '_GNU_SOURCE'" ]
[ "__init__(self, a, b): b.drive(invert, a) width = 4 a =", "return True class Inverter: def __init__(self, a, b): b.drive(invert, a)", "= Signal(width, io=\"in\") b = Signal(width, io=\"out\") Inverter(a, b) build()", "* def invert(signal): if signal: return False else: return True", "else: return True class Inverter: def __init__(self, a, b): b.drive(invert,", "<reponame>Verkhovskaya/PyDL from pywire import * def invert(signal): if signal: return", "4 a = Signal(width, io=\"in\") b = Signal(width, io=\"out\") Inverter(a,", "b): b.drive(invert, a) width = 4 a = Signal(width, io=\"in\")", "return False else: return True class Inverter: def __init__(self, a,", "True class Inverter: def __init__(self, a, b): b.drive(invert, a) width", "a = Signal(width, io=\"in\") b = Signal(width, io=\"out\") Inverter(a, b)", "signal: return False else: return True class Inverter: def __init__(self,", "pywire import * def invert(signal): if signal: return False else:", "b.drive(invert, a) width = 4 a = Signal(width, io=\"in\") b", "from pywire import * def invert(signal): if signal: return False", "a, b): b.drive(invert, a) width = 4 a = Signal(width,", "class Inverter: def __init__(self, a, b): b.drive(invert, a) width =", "invert(signal): if signal: return False else: return True class Inverter:", "False else: return True class Inverter: def __init__(self, a, b):", "width = 4 a = Signal(width, io=\"in\") b = Signal(width,", "def invert(signal): if signal: return False else: return True class", "def __init__(self, a, b): b.drive(invert, a) width = 4 a", "import * def invert(signal): if signal: return False else: return", "a) width = 4 a = Signal(width, io=\"in\") b =", "Inverter: def __init__(self, a, b): b.drive(invert, a) width = 4", "if signal: return False else: return True class Inverter: def", "= 4 a = Signal(width, io=\"in\") b = Signal(width, io=\"out\")" ]
[ "# Find the best mapping from labels to bases. all_dists", "np.expand_dims(label_counts, axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label,", "out_dir, postfix=None): # dists_info: (point_cloud_index, label, basis_index, distance) dists =", "pred_KP): n_data = P.shape[0] n_points = P.shape[1] n_labels = KP.shape[1]", "j, all_dists[k,i,j])) dists_info = np.array(dists_info) return dists_info def save_results(dists_info, out_dir,", "for i in range(x_list.size): counts[i] = np.sum(dists <= x_list[i]) y_list", "is not None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file =", "1) counts = np.zeros(x_list.size, dtype=int) for i in range(x_list.size): counts[i]", "dists_info = np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data", "out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file)", "n_points = P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1] #", "all_dists[k,i,j] += np.linalg.norm(p_i - p_j) mean_dists = np.sum(all_dists, axis=0) /", "None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir, 'pck.png')", "data, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP =", "# Skip if the keypoint does not exist. if KP[k,i]", "# Find the closest prediction (w/o matching). for i, label", "sess, net) assert(A.shape[0] == data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2] ==", "K)) label_counts = np.zeros(n_labels) for k in range(n_data): for i", "pred_KP.shape[1] # Find the best mapping from labels to bases.", "np.zeros(K) idx_i = KP[k,label] assert(idx_i < n_points) p_i = P[k,idx_i]", "- p_j) j = np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j])) dists_info", "= os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import * from", "np.linspace(0.0, 0.1, 20 + 1) counts = np.zeros(x_list.size, dtype=int) for", "< 0: continue dists_info.append((k, i, j, all_dists[k,i,j])) dists_info = np.array(dists_info)", "postfix is not None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file", "def evaluate(sess, net, data, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) P,", "distance) dists_info = [] for k in range(n_data): # NOTE:", "assert(A.shape[2] == net.K) pred_KP = np.argmax(A, axis=1) return P, KP,", "continue dists_info.append((k, i, j, all_dists[k,i,j])) dists_info = np.array(dists_info) return dists_info", "# Draw plot. n_matches = dists.size x_list = np.linspace(0.0, 0.1,", "keypoint does not exist. if KP[k,i] < 0: continue idx_i", "np.zeros(n_labels) for k in range(n_data): for i in range(n_labels): #", "= np.linalg.norm(p_i - p_j) j = np.argmin(all_dists) dists_info.append((k, i, j,", "<= x_list[i]) y_list = counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list, y_list)", "range(n_data): for i in range(n_labels): # NOTE: # Skip if", "pred_KP.shape[1] # dists_info: (point_cloud_index, label, basis_index, distance) dists_info = []", "i, j, all_dists[k,i,j])) dists_info = np.array(dists_info) return dists_info def save_results(dists_info,", "= dists.size x_list = np.linspace(0.0, 0.1, 20 + 1) counts", "label_counts[i] += 1. for j in range(K): idx_j = pred_KP[k,j]", "dists_info def save_results(dists_info, out_dir, postfix=None): # dists_info: (point_cloud_index, label, basis_index,", "+ 1)) if postfix is not None: out_file = os.path.join(out_dir,", "for k in range(n_data): # NOTE: # Skip if the", "= data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1] == data.n_labels) A =", "pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[j] = np.linalg.norm(p_i", "- p_j) mean_dists = np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts, axis=-1)", "= np.zeros(x_list.size, dtype=int) for i in range(x_list.size): counts[i] = np.sum(dists", "if postfix is not None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else:", "Find the best mapping from labels to bases. all_dists =", "import * from scipy.optimize import linear_sum_assignment #import matplotlib.pyplot as plt", "out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess,", "dists_info = [] for k in range(n_data): # NOTE: #", "= P[k,idx_j] all_dists[j] = np.linalg.norm(p_i - p_j) j = np.argmin(all_dists)", "range(K): idx_j = pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j]", "(i, j) in zip(row_ind, col_ind): if KP[k,i] < 0: continue", "= [] for k in range(n_data): # NOTE: # Skip", "plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10 + 1)) if", "np.zeros(x_list.size, dtype=int) for i in range(x_list.size): counts[i] = np.sum(dists <=", "from labels to bases. all_dists = np.zeros((n_data, n_labels, K)) label_counts", "if KP[k,i] < 0: continue idx_i = KP[k,i] assert(idx_i <", "print(\"Saved '{}'.\".format(out_file)) ''' # Draw plot. n_matches = dists.size x_list", "''' # Draw plot. n_matches = dists.size x_list = np.linspace(0.0,", "all_dists[j])) dists_info = np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP):", "KP, pred_KP) save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P, KP, pred_KP)", "data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2] == net.K) pred_KP = np.argmax(A,", "1.) plt.yticks(np.linspace(0., 1., 10 + 1)) if postfix is not", "KP[k,label] assert(idx_i < n_points) p_i = P[k,idx_i] for j in", "distance) dists = dists_info[:,3] if postfix is not None: out_file", "save_results(dists_info, out_dir, postfix=None): # dists_info: (point_cloud_index, label, basis_index, distance) dists", "j) in zip(row_ind, col_ind): if KP[k,i] < 0: continue dists_info.append((k,", "< 0: continue idx_i = KP[k,i] assert(idx_i < n_points) p_i", "counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1.,", "from datasets import * from generate_outputs import * from scipy.optimize", "os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import * from generate_outputs", "KP[k,i] < 0: continue idx_i = KP[k,i] assert(idx_i < n_points)", "# April 2018 import os, sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__))))", "assert(A.shape[0] == data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2] == net.K) pred_KP", "= np.linspace(0.0, 0.1, 20 + 1) counts = np.zeros(x_list.size, dtype=int)", "not exist. labels = [i for i in range(n_labels) if", "evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P, KP,", "KP[k,i] < 0: continue dists_info.append((k, i, j, all_dists[k,i,j])) dists_info =", "* from scipy.optimize import linear_sum_assignment #import matplotlib.pyplot as plt import", "the closest prediction (w/o matching). for i, label in enumerate(labels):", "closest prediction (w/o matching). for i, label in enumerate(labels): all_dists", "KP[k,i] >= 0] # Find the closest prediction (w/o matching).", "NOTE: # Skip if the keypoint does not exist. labels", "os.makedirs(out_dir) P, KP, pred_KP = compute_all_keypoints(sess, net, data) dists =", "A = predict_A(P, sess, net) assert(A.shape[0] == data.n_data) assert(A.shape[1] ==", "net.K) pred_KP = np.argmax(A, axis=1) return P, KP, pred_KP def", "counts[i] = np.sum(dists <= x_list[i]) y_list = counts.astype(x_list.dtype) / float(n_matches)", "labels to bases. all_dists = np.zeros((n_data, n_labels, K)) label_counts =", "basis_index, distance) dists_info = [] for k in range(n_data): #", "< n_points) p_i = P[k,idx_i] for j in range(K): idx_j", "= np.zeros((n_data, n_labels, K)) label_counts = np.zeros(n_labels) for k in", "idx_j = pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[k,i,j]", "= P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i - p_j) mean_dists = np.sum(all_dists,", "os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) ''' # Draw plot.", "dists.size x_list = np.linspace(0.0, 0.1, 20 + 1) counts =", "col_ind): if KP[k,i] < 0: continue dists_info.append((k, i, j, all_dists[k,i,j]))", "= np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data =", "dists_info.append((k, i, j, all_dists[j])) dists_info = np.array(dists_info) return dists_info def", "= KP[k,i] assert(idx_i < n_points) p_i = P[k,idx_i] label_counts[i] +=", "np.array(dists_info) return dists_info def save_results(dists_info, out_dir, postfix=None): # dists_info: (point_cloud_index,", "axis=1) return P, KP, pred_KP def evaluate_PCK(P, KP, pred_KP): n_data", "plt import numpy as np def compute_all_keypoints(sess, net, data): P", "# <NAME> (<EMAIL>) # April 2018 import os, sys BASE_DIR", "save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P, KP, pred_KP) save_results(dists_after_matching, out_dir,", "for k in range(n_data): for i in range(n_labels): # NOTE:", "0: continue idx_i = KP[k,i] assert(idx_i < n_points) p_i =", "# dists_info: (point_cloud_index, label, basis_index, distance) dists_info = [] for", "data.n_points) KP = data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1] == data.n_labels)", "not exist. if KP[k,i] < 0: continue idx_i = KP[k,i]", "< n_points) p_j = P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i - p_j)", "else: out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) '''", "out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P, KP, pred_KP) save_results(dists_after_matching, out_dir, postfix='after_matching')", "idx_i = KP[k,label] assert(idx_i < n_points) p_i = P[k,idx_i] for", "= np.sum(dists <= x_list[i]) y_list = counts.astype(x_list.dtype) / float(n_matches) plt.clf()", "os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import * from generate_outputs import", "in range(n_data): for (i, j) in zip(row_ind, col_ind): if KP[k,i]", "for i, label in enumerate(labels): all_dists = np.zeros(K) idx_i =", "dists) print(\"Saved '{}'.\".format(out_file)) ''' # Draw plot. n_matches = dists.size", "data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1] == data.n_labels) A = predict_A(P,", "import os, sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from", "== data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2] == net.K) pred_KP =", "mapping from labels to bases. all_dists = np.zeros((n_data, n_labels, K))", "for (i, j) in zip(row_ind, col_ind): if KP[k,i] < 0:", "n_points) p_j = P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i - p_j) mean_dists", "= P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1] # Find", "the keypoint does not exist. labels = [i for i", "= pred_KP.shape[1] # Find the best mapping from labels to", "idx_j = pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[j]", "out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file,", "os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP = compute_all_keypoints(sess, net, data) dists", "for i in range(n_labels) if KP[k,i] >= 0] # Find", "= pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[k,i,j] +=", "# dists_info: (point_cloud_index, label, basis_index, distance) dists = dists_info[:,3] if", "datasets import * from generate_outputs import * from scipy.optimize import", "not os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP = compute_all_keypoints(sess, net, data)", "np def compute_all_keypoints(sess, net, data): P = data.point_clouds assert(P.shape[0] ==", "in range(n_labels): # NOTE: # Skip if the keypoint does", "for j in range(K): idx_j = pred_KP[k,j] assert(idx_j < n_points)", "pred_KP = compute_all_keypoints(sess, net, data) dists = evaluate_PCK(P, KP, pred_KP)", "20 + 1) counts = np.zeros(x_list.size, dtype=int) for i in", "(w/o matching). for i, label in enumerate(labels): all_dists = np.zeros(K)", "p_i = P[k,idx_i] label_counts[i] += 1. for j in range(K):", "i, j, all_dists[j])) dists_info = np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P,", "assert(P.shape[1] == data.n_points) KP = data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1]", "= os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess, net,", "label, basis_index, distance) dists_info = [] for k in range(n_data):", "plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess, net, data, out_dir): if", "#import matplotlib.pyplot as plt import numpy as np def compute_all_keypoints(sess,", "NOTE: # Skip if the keypoint does not exist. if", "def evaluate_PCK(P, KP, pred_KP): n_data = P.shape[0] n_points = P.shape[1]", "= [i for i in range(n_labels) if KP[k,i] >= 0]", "K = pred_KP.shape[1] # Find the best mapping from labels", "os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file))", "i in range(x_list.size): counts[i] = np.sum(dists <= x_list[i]) y_list =", "None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir, 'distances.npy')", "dists = evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching(", "2018 import os, sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..'))", "in zip(row_ind, col_ind): if KP[k,i] < 0: continue dists_info.append((k, i,", "exist. labels = [i for i in range(n_labels) if KP[k,i]", "does not exist. if KP[k,i] < 0: continue idx_i =", "range(n_data): # NOTE: # Skip if the keypoint does not", "Skip if the keypoint does not exist. labels = [i", "< n_points) p_j = P[k,idx_j] all_dists[j] = np.linalg.norm(p_i - p_j)", "net, data, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP", "dists_info: (point_cloud_index, label, basis_index, distance) dists = dists_info[:,3] if postfix", "n_labels = KP.shape[1] K = pred_KP.shape[1] # dists_info: (point_cloud_index, label,", "j = np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j])) dists_info = np.array(dists_info)", "n_labels = KP.shape[1] K = pred_KP.shape[1] # Find the best", "KP[k,i] assert(idx_i < n_points) p_i = P[k,idx_i] label_counts[i] += 1.", "+ 1) counts = np.zeros(x_list.size, dtype=int) for i in range(x_list.size):", "plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10 + 1))", "n_matches = dists.size x_list = np.linspace(0.0, 0.1, 20 + 1)", "assert(idx_i < n_points) p_i = P[k,idx_i] label_counts[i] += 1. for", "data.n_points) assert(A.shape[2] == net.K) pred_KP = np.argmax(A, axis=1) return P,", "= np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind =", "assert(A.shape[1] == data.n_points) assert(A.shape[2] == net.K) pred_KP = np.argmax(A, axis=1)", "matplotlib.pyplot as plt import numpy as np def compute_all_keypoints(sess, net,", "as np def compute_all_keypoints(sess, net, data): P = data.point_clouds assert(P.shape[0]", "= np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j])) dists_info = np.array(dists_info) return", "= KP[k,label] assert(idx_i < n_points) p_i = P[k,idx_i] for j", "Find the closest prediction (w/o matching). for i, label in", "Draw plot. n_matches = dists.size x_list = np.linspace(0.0, 0.1, 20", "P = data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1] == data.n_points) KP", "os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved", "range(x_list.size): counts[i] = np.sum(dists <= x_list[i]) y_list = counts.astype(x_list.dtype) /", "dists_info = [] for k in range(n_data): for (i, j)", "KP, pred_KP = compute_all_keypoints(sess, net, data) dists = evaluate_PCK(P, KP,", "== data.n_data) assert(P.shape[1] == data.n_points) KP = data.keypoints assert(KP.shape[0] ==", "i, label in enumerate(labels): all_dists = np.zeros(K) idx_i = KP[k,label]", "the keypoint does not exist. if KP[k,i] < 0: continue", ">= 0] # Find the closest prediction (w/o matching). for", "j in range(K): idx_j = pred_KP[k,j] assert(idx_j < n_points) p_j", "k in range(n_data): for (i, j) in zip(row_ind, col_ind): if", "all_dists = np.zeros(K) idx_i = KP[k,label] assert(idx_i < n_points) p_i", "bases. all_dists = np.zeros((n_data, n_labels, K)) label_counts = np.zeros(n_labels) for", "evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data = P.shape[0] n_points = P.shape[1] n_labels", "K = pred_KP.shape[1] # dists_info: (point_cloud_index, label, basis_index, distance) dists_info", "in enumerate(labels): all_dists = np.zeros(K) idx_i = KP[k,label] assert(idx_i <", "''' def evaluate(sess, net, data, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir)", "col_ind = linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label, basis_index, distance) dists_info", "linear_sum_assignment #import matplotlib.pyplot as plt import numpy as np def", "P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1] # dists_info: (point_cloud_index,", "if postfix is not None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else:", "# Skip if the keypoint does not exist. labels =", "= np.zeros(K) idx_i = KP[k,label] assert(idx_i < n_points) p_i =", "y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10 + 1)) if postfix", "April 2018 import os, sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR,", "KP = data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1] == data.n_labels) A", "= KP.shape[1] K = pred_KP.shape[1] # Find the best mapping", "n_points) p_i = P[k,idx_i] label_counts[i] += 1. for j in", "= np.argmax(A, axis=1) return P, KP, pred_KP def evaluate_PCK(P, KP,", "dists_info.append((k, i, j, all_dists[k,i,j])) dists_info = np.array(dists_info) return dists_info def", "= P[k,idx_i] label_counts[i] += 1. for j in range(K): idx_j", "n_points) p_i = P[k,idx_i] for j in range(K): idx_j =", "1)) if postfix is not None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix))", "data.n_data) assert(P.shape[1] == data.n_points) KP = data.keypoints assert(KP.shape[0] == data.n_data)", "<NAME> (<EMAIL>) # April 2018 import os, sys BASE_DIR =", "pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i", "in range(n_data): # NOTE: # Skip if the keypoint does", "* from generate_outputs import * from scipy.optimize import linear_sum_assignment #import", "= compute_all_keypoints(sess, net, data) dists = evaluate_PCK(P, KP, pred_KP) save_results(dists,", "return dists_info def save_results(dists_info, out_dir, postfix=None): # dists_info: (point_cloud_index, label,", "0] # Find the closest prediction (w/o matching). for i,", "linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label, basis_index, distance) dists_info = []", "'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) '''", "labels = [i for i in range(n_labels) if KP[k,i] >=", "row_ind, col_ind = linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label, basis_index, distance)", "net) assert(A.shape[0] == data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2] == net.K)", "= [] for k in range(n_data): for (i, j) in", "0.1, 20 + 1) counts = np.zeros(x_list.size, dtype=int) for i", "= os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file, dists)", "range(n_labels): # NOTE: # Skip if the keypoint does not", "dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data = P.shape[0] n_points =", "(<EMAIL>) # April 2018 import os, sys BASE_DIR = os.path.normpath(", "k in range(n_data): for i in range(n_labels): # NOTE: #", "basis_index, distance) dists_info = [] for k in range(n_data): for", "if not os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP = compute_all_keypoints(sess, net,", "== data.n_points) KP = data.keypoints assert(KP.shape[0] == data.n_data) assert(KP.shape[1] ==", "postfix=None): # dists_info: (point_cloud_index, label, basis_index, distance) dists = dists_info[:,3]", "does not exist. labels = [i for i in range(n_labels)", "# NOTE: # Skip if the keypoint does not exist.", "'{}'.\".format(out_file)) ''' def evaluate(sess, net, data, out_dir): if not os.path.exists(out_dir):", "P.shape[0] n_points = P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1]", "all_dists = np.zeros((n_data, n_labels, K)) label_counts = np.zeros(n_labels) for k", "n_labels, K)) label_counts = np.zeros(n_labels) for k in range(n_data): for", "np.linalg.norm(p_i - p_j) mean_dists = np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts,", "all_dists[k,i,j])) dists_info = np.array(dists_info) return dists_info def save_results(dists_info, out_dir, postfix=None):", "counts = np.zeros(x_list.size, dtype=int) for i in range(x_list.size): counts[i] =", "plot. n_matches = dists.size x_list = np.linspace(0.0, 0.1, 20 +", "== data.n_labels) A = predict_A(P, sess, net) assert(A.shape[0] == data.n_data)", "assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i -", "= np.array(dists_info) return dists_info def save_results(dists_info, out_dir, postfix=None): # dists_info:", "= np.zeros(n_labels) for k in range(n_data): for i in range(n_labels):", "j, all_dists[j])) dists_info = np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P, KP,", "sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import * from generate_outputs import *", "x_list[i]) y_list = counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0.,", "return P, KP, pred_KP def evaluate_PCK(P, KP, pred_KP): n_data =", "P, KP, pred_KP = compute_all_keypoints(sess, net, data) dists = evaluate_PCK(P,", "is not None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file =", "return dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data = P.shape[0] n_points", "Skip if the keypoint does not exist. if KP[k,i] <", "i in range(n_labels) if KP[k,i] >= 0] # Find the", "p_i = P[k,idx_i] for j in range(K): idx_j = pred_KP[k,j]", "P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i - p_j) mean_dists = np.sum(all_dists, axis=0)", "predict_A(P, sess, net) assert(A.shape[0] == data.n_data) assert(A.shape[1] == data.n_points) assert(A.shape[2]", "p_j = P[k,idx_j] all_dists[k,i,j] += np.linalg.norm(p_i - p_j) mean_dists =", "'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess, net, data, out_dir):", "p_j) j = np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j])) dists_info =", "P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1] # Find the", "np.argmax(A, axis=1) return P, KP, pred_KP def evaluate_PCK(P, KP, pred_KP):", "for k in range(n_data): for (i, j) in zip(row_ind, col_ind):", "pred_KP def evaluate_PCK(P, KP, pred_KP): n_data = P.shape[0] n_points =", "n_points) p_j = P[k,idx_j] all_dists[j] = np.linalg.norm(p_i - p_j) j", "label in enumerate(labels): all_dists = np.zeros(K) idx_i = KP[k,label] assert(idx_i", "if KP[k,i] < 0: continue dists_info.append((k, i, j, all_dists[k,i,j])) dists_info", "/ float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10", "KP, pred_KP): n_data = P.shape[0] n_points = P.shape[1] n_labels =", "data.n_labels) A = predict_A(P, sess, net) assert(A.shape[0] == data.n_data) assert(A.shape[1]", "sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import", "if the keypoint does not exist. if KP[k,i] < 0:", "< n_points) p_i = P[k,idx_i] label_counts[i] += 1. for j", "net, data): P = data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1] ==", "assert(KP.shape[0] == data.n_data) assert(KP.shape[1] == data.n_labels) A = predict_A(P, sess,", "np.array(dists_info) return dists_info def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data = P.shape[0]", "plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10 + 1)) if postfix is", "= pred_KP.shape[1] # dists_info: (point_cloud_index, label, basis_index, distance) dists_info =", "dists_info[:,3] if postfix is not None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix))", "numpy as np def compute_all_keypoints(sess, net, data): P = data.point_clouds", "KP.shape[1] K = pred_KP.shape[1] # Find the best mapping from", "if KP[k,i] >= 0] # Find the closest prediction (w/o", "p_j) mean_dists = np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts, axis=-1) row_ind,", "os, sys BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets", "np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j])) dists_info = np.array(dists_info) return dists_info", "def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP): n_data = P.shape[0] n_points = P.shape[1]", "= predict_A(P, sess, net) assert(A.shape[0] == data.n_data) assert(A.shape[1] == data.n_points)", "10 + 1)) if postfix is not None: out_file =", "P, KP, pred_KP def evaluate_PCK(P, KP, pred_KP): n_data = P.shape[0]", "x_list = np.linspace(0.0, 0.1, 20 + 1) counts = np.zeros(x_list.size,", "/ \\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists) # dists_info:", "0: continue dists_info.append((k, i, j, all_dists[k,i,j])) dists_info = np.array(dists_info) return", "BASE_DIR = os.path.normpath( os.path.join(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.join(BASE_DIR, '..')) from datasets import *", "KP, pred_KP def evaluate_PCK(P, KP, pred_KP): n_data = P.shape[0] n_points", "KP.shape[1] K = pred_KP.shape[1] # dists_info: (point_cloud_index, label, basis_index, distance)", "= os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved", "from scipy.optimize import linear_sum_assignment #import matplotlib.pyplot as plt import numpy", "'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) ''' # Draw plot. n_matches", "pred_KP) save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P, KP, pred_KP) save_results(dists_after_matching,", "not None: out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix)) else: out_file = os.path.join(out_dir,", "else: out_file = os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def", "= linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label, basis_index, distance) dists_info =", "y_list = counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.)", "n_data = P.shape[0] n_points = P.shape[1] n_labels = KP.shape[1] K", "[] for k in range(n_data): # NOTE: # Skip if", "np.linalg.norm(p_i - p_j) j = np.argmin(all_dists) dists_info.append((k, i, j, all_dists[j]))", "dists_info = np.array(dists_info) return dists_info def save_results(dists_info, out_dir, postfix=None): #", "assert(idx_i < n_points) p_i = P[k,idx_i] for j in range(K):", "= P.shape[0] n_points = P.shape[1] n_labels = KP.shape[1] K =", "os.path.join(out_dir, 'pck.png') plt.savefig(out_file) print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess, net, data,", "== data.n_points) assert(A.shape[2] == net.K) pred_KP = np.argmax(A, axis=1) return", "'{}'.\".format(out_file)) ''' # Draw plot. n_matches = dists.size x_list =", "keypoint does not exist. labels = [i for i in", "basis_index, distance) dists = dists_info[:,3] if postfix is not None:", "range(n_labels) if KP[k,i] >= 0] # Find the closest prediction", "generate_outputs import * from scipy.optimize import linear_sum_assignment #import matplotlib.pyplot as", "\\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index,", "P[k,idx_i] for j in range(K): idx_j = pred_KP[k,j] assert(idx_j <", "i in range(n_labels): # NOTE: # Skip if the keypoint", "(point_cloud_index, label, basis_index, distance) dists_info = [] for k in", "enumerate(labels): all_dists = np.zeros(K) idx_i = KP[k,label] assert(idx_i < n_points)", "evaluate_PCK(P, KP, pred_KP): n_data = P.shape[0] n_points = P.shape[1] n_labels", "= P[k,idx_i] for j in range(K): idx_j = pred_KP[k,j] assert(idx_j", "label, basis_index, distance) dists = dists_info[:,3] if postfix is not", "the best mapping from labels to bases. all_dists = np.zeros((n_data,", "net, data) dists = evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir) dists_after_matching", "prediction (w/o matching). for i, label in enumerate(labels): all_dists =", "evaluate(sess, net, data, out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) P, KP,", "continue idx_i = KP[k,i] assert(idx_i < n_points) p_i = P[k,idx_i]", "exist. if KP[k,i] < 0: continue idx_i = KP[k,i] assert(idx_i", "def compute_all_keypoints(sess, net, data): P = data.point_clouds assert(P.shape[0] == data.n_data)", "matching). for i, label in enumerate(labels): all_dists = np.zeros(K) idx_i", "zip(row_ind, col_ind): if KP[k,i] < 0: continue dists_info.append((k, i, j,", "= counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0.,", "mean_dists = np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind", "as plt import numpy as np def compute_all_keypoints(sess, net, data):", "print(\"Saved '{}'.\".format(out_file)) ''' def evaluate(sess, net, data, out_dir): if not", "= KP.shape[1] K = pred_KP.shape[1] # dists_info: (point_cloud_index, label, basis_index,", "+= 1. for j in range(K): idx_j = pred_KP[k,j] assert(idx_j", "range(n_data): for (i, j) in zip(row_ind, col_ind): if KP[k,i] <", "'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file))", "axis=0) / \\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists) #", "in range(n_labels) if KP[k,i] >= 0] # Find the closest", "[i for i in range(n_labels) if KP[k,i] >= 0] #", "'..')) from datasets import * from generate_outputs import * from", "dists_info: (point_cloud_index, label, basis_index, distance) dists_info = [] for k", "= data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1] == data.n_points) KP =", "float(n_matches) plt.clf() plt.plot(x_list, y_list) plt.ylim(0., 1.) plt.yticks(np.linspace(0., 1., 10 +", "if the keypoint does not exist. labels = [i for", "1., 10 + 1)) if postfix is not None: out_file", "compute_all_keypoints(sess, net, data) dists = evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir)", "best mapping from labels to bases. all_dists = np.zeros((n_data, n_labels,", "not None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file = os.path.join(out_dir,", "all_dists[j] = np.linalg.norm(p_i - p_j) j = np.argmin(all_dists) dists_info.append((k, i,", "(point_cloud_index, label, basis_index, distance) dists = dists_info[:,3] if postfix is", "+= np.linalg.norm(p_i - p_j) mean_dists = np.sum(all_dists, axis=0) / \\", "= dists_info[:,3] if postfix is not None: out_file = os.path.join(out_dir,", "for i in range(n_labels): # NOTE: # Skip if the", "dtype=int) for i in range(x_list.size): counts[i] = np.sum(dists <= x_list[i])", "= evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir) dists_after_matching = evaluate_PCK_after_label_basis_matching( P,", "postfix is not None: out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix)) else: out_file", "import * from generate_outputs import * from scipy.optimize import linear_sum_assignment", "import linear_sum_assignment #import matplotlib.pyplot as plt import numpy as np", "[] for k in range(n_data): for (i, j) in zip(row_ind,", "pred_KP = np.argmax(A, axis=1) return P, KP, pred_KP def evaluate_PCK(P,", "= os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) ''' # Draw", "P[k,idx_i] label_counts[i] += 1. for j in range(K): idx_j =", "def save_results(dists_info, out_dir, postfix=None): # dists_info: (point_cloud_index, label, basis_index, distance)", "in range(n_data): for i in range(n_labels): # NOTE: # Skip", "= pred_KP[k,j] assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[j] =", "in range(x_list.size): counts[i] = np.sum(dists <= x_list[i]) y_list = counts.astype(x_list.dtype)", "to bases. all_dists = np.zeros((n_data, n_labels, K)) label_counts = np.zeros(n_labels)", "label_counts = np.zeros(n_labels) for k in range(n_data): for i in", "scipy.optimize import linear_sum_assignment #import matplotlib.pyplot as plt import numpy as", "data) dists = evaluate_PCK(P, KP, pred_KP) save_results(dists, out_dir) dists_after_matching =", "np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) ''' # Draw plot. n_matches =", "out_dir): if not os.path.exists(out_dir): os.makedirs(out_dir) P, KP, pred_KP = compute_all_keypoints(sess,", "plt.yticks(np.linspace(0., 1., 10 + 1)) if postfix is not None:", "in range(K): idx_j = pred_KP[k,j] assert(idx_j < n_points) p_j =", "k in range(n_data): # NOTE: # Skip if the keypoint", "P[k,idx_j] all_dists[j] = np.linalg.norm(p_i - p_j) j = np.argmin(all_dists) dists_info.append((k,", "assert(P.shape[0] == data.n_data) assert(P.shape[1] == data.n_points) KP = data.keypoints assert(KP.shape[0]", "data): P = data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1] == data.n_points)", "idx_i = KP[k,i] assert(idx_i < n_points) p_i = P[k,idx_i] label_counts[i]", "data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1] == data.n_points) KP = data.keypoints", "axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists) # dists_info: (point_cloud_index, label, basis_index,", "distance) dists_info = [] for k in range(n_data): for (i,", "out_file = os.path.join(out_dir, 'distances.npy') np.save(out_file, dists) print(\"Saved '{}'.\".format(out_file)) ''' #", "from generate_outputs import * from scipy.optimize import linear_sum_assignment #import matplotlib.pyplot", "assert(KP.shape[1] == data.n_labels) A = predict_A(P, sess, net) assert(A.shape[0] ==", "p_j = P[k,idx_j] all_dists[j] = np.linalg.norm(p_i - p_j) j =", "== net.K) pred_KP = np.argmax(A, axis=1) return P, KP, pred_KP", "dists = dists_info[:,3] if postfix is not None: out_file =", "np.sum(dists <= x_list[i]) y_list = counts.astype(x_list.dtype) / float(n_matches) plt.clf() plt.plot(x_list,", "import numpy as np def compute_all_keypoints(sess, net, data): P =", "data.n_data) assert(KP.shape[1] == data.n_labels) A = predict_A(P, sess, net) assert(A.shape[0]", "compute_all_keypoints(sess, net, data): P = data.point_clouds assert(P.shape[0] == data.n_data) assert(P.shape[1]", "== data.n_data) assert(KP.shape[1] == data.n_labels) A = predict_A(P, sess, net)", "assert(idx_j < n_points) p_j = P[k,idx_j] all_dists[j] = np.linalg.norm(p_i -", "np.zeros((n_data, n_labels, K)) label_counts = np.zeros(n_labels) for k in range(n_data):", "np.sum(all_dists, axis=0) / \\ np.expand_dims(label_counts, axis=-1) row_ind, col_ind = linear_sum_assignment(mean_dists)", "= P.shape[1] n_labels = KP.shape[1] K = pred_KP.shape[1] # dists_info:", "1. for j in range(K): idx_j = pred_KP[k,j] assert(idx_j <" ]
[ "recipe lacks information about the {} compiler support.\".format( self.name, self.settings.compiler))", "self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def source(self):", "class CxxOptsConan(ConanFile): name = \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url =", "homepage = \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++", "default_options = { \"unicode\": False } no_copy_source = True @property", "GNU style syntax for options.\" license = \"MIT\" topics =", "= True @property def _source_subfolder(self): return \"source_subfolder\" @property def _minimum_cpp_standard(self):", "description = \"Lightweight C++ option parser library, supporting the standard", "no_copy_source = True @property def _source_subfolder(self): return \"source_subfolder\" @property def", "= \"Lightweight C++ option parser library, supporting the standard GNU", "self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only()", "source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)", "from conans import ConanFile, tools from conans.errors import ConanInvalidConfiguration class", "compiler {} {} does not support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler,", "\"clang\": \"3.9\", \"apple-clang\": \"8\", } def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self,", "requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)", "package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self):", "\", \"header-only\") settings = \"compiler\" options = { \"unicode\": [True,", "support. The current compiler {} {} does not support it.\".format(", "[True, False] } default_options = { \"unicode\": False } no_copy_source", "self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{}", "the {} compiler support.\".format( self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) <", "def _minimum_compilers_version(self): return { \"Visual Studio\": \"14\", \"gcc\": \"5\", \"clang\":", "C++ option parser library, supporting the standard GNU style syntax", "\"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\", } def configure(self): if self.settings.compiler.get_safe(\"cppstd\"):", "min_version: raise ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler {}", "= \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description =", "\"option-parser\", \"positional-arguments \", \"header-only\") settings = \"compiler\" options = {", "current compiler {} {} does not support it.\".format( self.name, self._minimum_cpp_standard,", "@property def _minimum_cpp_standard(self): return 11 @property def _minimum_compilers_version(self): return {", "tools from conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name = \"cxxopts\"", "\"14\", \"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\", } def configure(self):", "topics = (\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\") settings = \"compiler\"", "{} compiler support.\".format( self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) < min_version:", "} default_options = { \"unicode\": False } no_copy_source = True", "\"include\")) def package_id(self): self.info.header_only() def package_info(self): if self.options.unicode: self.cpp_info.defines =", "self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{} recipe lacks information about the", "return { \"Visual Studio\": \"14\", \"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\":", "= \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++ option", "it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\")", "def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def", "= \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++ option parser library, supporting", "} no_copy_source = True @property def _source_subfolder(self): return \"source_subfolder\" @property", "settings = \"compiler\" options = { \"unicode\": [True, False] }", "\"source_subfolder\" @property def _minimum_cpp_standard(self): return 11 @property def _minimum_compilers_version(self): return", "False } no_copy_source = True @property def _source_subfolder(self): return \"source_subfolder\"", "src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only() def package_info(self): if self.options.unicode: self.cpp_info.defines", "import os from conans import ConanFile, tools from conans.errors import", "def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if", "= { \"unicode\": [True, False] } default_options = { \"unicode\":", "\"header-only\") settings = \"compiler\" options = { \"unicode\": [True, False]", "self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def", "about the {} compiler support.\".format( self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version)", "self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{} recipe lacks", "{} does not support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def", "option parser library, supporting the standard GNU style syntax for", "src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only() def package_info(self):", "os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\",", "self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self):", "raise ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler {} {}", "@property def _minimum_compilers_version(self): return { \"Visual Studio\": \"14\", \"gcc\": \"5\",", "compiler support.\".format( self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) < min_version: raise", "conans import ConanFile, tools from conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile):", "\"Lightweight C++ option parser library, supporting the standard GNU style", "_source_subfolder(self): return \"source_subfolder\" @property def _minimum_cpp_standard(self): return 11 @property def", "support.\".format( self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{}", "self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{} requires C++{}", "The current compiler {} {} does not support it.\".format( self.name,", "Studio\": \"14\", \"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\", } def", "{ \"unicode\": [True, False] } default_options = { \"unicode\": False", "the standard GNU style syntax for options.\" license = \"MIT\"", "= (\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\") settings = \"compiler\" options", "def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version),", "library, supporting the standard GNU style syntax for options.\" license", "CxxOptsConan(ConanFile): name = \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\"", "\"Visual Studio\": \"14\", \"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\", }", "dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only() def package_info(self): if self.options.unicode:", "self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only() def package_info(self): if", "11 @property def _minimum_compilers_version(self): return { \"Visual Studio\": \"14\", \"gcc\":", "def _source_subfolder(self): return \"source_subfolder\" @property def _minimum_cpp_standard(self): return 11 @property", "return \"source_subfolder\" @property def _minimum_cpp_standard(self): return 11 @property def _minimum_compilers_version(self):", "= \"compiler\" options = { \"unicode\": [True, False] } default_options", "support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self): if self.options.unicode:", "if not min_version: self.output.warn(\"{} recipe lacks information about the {}", "min_version: self.output.warn(\"{} recipe lacks information about the {} compiler support.\".format(", "self.output.warn(\"{} recipe lacks information about the {} compiler support.\".format( self.name,", "self.name, self.settings.compiler)) else: if tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{} requires", "\"3.9\", \"apple-clang\": \"8\", } def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard)", "tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{} recipe", "url = \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++ option parser library,", "standard GNU style syntax for options.\" license = \"MIT\" topics", "license = \"MIT\" topics = (\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\")", "import ConanFile, tools from conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name", "lacks information about the {} compiler support.\".format( self.name, self.settings.compiler)) else:", "{ \"Visual Studio\": \"14\", \"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\",", "tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{} requires C++{} support. The current", "(\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\") settings = \"compiler\" options =", "options = { \"unicode\": [True, False] } default_options = {", "def package_id(self): self.info.header_only() def package_info(self): if self.options.unicode: self.cpp_info.defines = [\"CXXOPTS_USE_UNICODE\"]", "ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler {} {} does", "\"compiler\" options = { \"unicode\": [True, False] } default_options =", "name = \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description", "def _minimum_cpp_standard(self): return 11 @property def _minimum_compilers_version(self): return { \"Visual", "\"8\", } def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version =", "if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version:", "supporting the standard GNU style syntax for options.\" license =", "self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder,", "C++{} support. The current compiler {} {} does not support", "_minimum_compilers_version(self): return { \"Visual Studio\": \"14\", \"gcc\": \"5\", \"clang\": \"3.9\",", "self.settings.compiler, self.settings.compiler.version)) def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version])", "\"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++ option parser", "tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name),", "syntax for options.\" license = \"MIT\" topics = (\"conan\", \"option-parser\",", "conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name = \"cxxopts\" homepage =", "ConanInvalidConfiguration class CxxOptsConan(ConanFile): name = \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url", "style syntax for options.\" license = \"MIT\" topics = (\"conan\",", "\"unicode\": [True, False] } default_options = { \"unicode\": False }", "options.\" license = \"MIT\" topics = (\"conan\", \"option-parser\", \"positional-arguments \",", "self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\",", "dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\")) def package_id(self): self.info.header_only() def", "\"https://github.com/conan-io/conan-center-index\" description = \"Lightweight C++ option parser library, supporting the", "\"unicode\": False } no_copy_source = True @property def _source_subfolder(self): return", "does not support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self):", "{} {} does not support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))", "{ \"unicode\": False } no_copy_source = True @property def _source_subfolder(self):", "os from conans import ConanFile, tools from conans.errors import ConanInvalidConfiguration", "return 11 @property def _minimum_compilers_version(self): return { \"Visual Studio\": \"14\",", "requires C++{} support. The current compiler {} {} does not", "\"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\" url = \"https://github.com/conan-io/conan-center-index\" description = \"Lightweight", "= \"MIT\" topics = (\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\") settings", "for options.\" license = \"MIT\" topics = (\"conan\", \"option-parser\", \"positional-arguments", "@property def _source_subfolder(self): return \"source_subfolder\" @property def _minimum_cpp_standard(self): return 11", "= self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{} recipe lacks information about", "not min_version: self.output.warn(\"{} recipe lacks information about the {} compiler", "False] } default_options = { \"unicode\": False } no_copy_source =", "\"apple-clang\": \"8\", } def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version", "self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder) self.copy(\"{}.hpp\".format(self.name), dst=\"include\", src=os.path.join(self._source_subfolder, \"include\"))", "ConanFile, tools from conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name =", "\"MIT\" topics = (\"conan\", \"option-parser\", \"positional-arguments \", \"header-only\") settings =", "\"positional-arguments \", \"header-only\") settings = \"compiler\" options = { \"unicode\":", "\"gcc\": \"5\", \"clang\": \"3.9\", \"apple-clang\": \"8\", } def configure(self): if", "= { \"unicode\": False } no_copy_source = True @property def", "} def configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler))", "if self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def", "not support it.\".format( self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version)) def requirements(self): if", "min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not min_version: self.output.warn(\"{} recipe lacks information", "from conans.errors import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name = \"cxxopts\" homepage", "parser library, supporting the standard GNU style syntax for options.\"", "< min_version: raise ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler", "configure(self): if self.settings.compiler.get_safe(\"cppstd\"): tools.check_min_cppstd(self, self._minimum_cpp_standard) min_version = self._minimum_compilers_version.get(str(self.settings.compiler)) if not", "_minimum_cpp_standard(self): return 11 @property def _minimum_compilers_version(self): return { \"Visual Studio\":", "import ConanInvalidConfiguration class CxxOptsConan(ConanFile): name = \"cxxopts\" homepage = \"https://github.com/jarro2783/cxxopts\"", "self.settings.compiler.version)) def requirements(self): if self.options.unicode: self.requires(\"icu/64.2\") def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name,", "def source(self): tools.get(**self.conan_data[\"sources\"][self.version]) os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder) def package(self): self.copy(\"LICENSE\", dst=\"licenses\",", "if tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{} requires C++{} support. The", "else: if tools.Version(self.settings.compiler.version) < min_version: raise ConanInvalidConfiguration(\"{} requires C++{} support.", "True @property def _source_subfolder(self): return \"source_subfolder\" @property def _minimum_cpp_standard(self): return", "information about the {} compiler support.\".format( self.name, self.settings.compiler)) else: if" ]
[ "{2,2}] # etc way_sets = [None] for i in range(1,", "coins: remainder = i - coin if remainder == 0:", "hash(\" \".join([str(i) for i in self])) def main(): \"\"\" Entry", "coin alone - but no larger coins coin_count = [0", "Building up from 1 means we can re-use earlier results", "way_sets.append(way_set_i) print(f\"Number of ways of making £2: {len(way_sets[200])}\") return if", "{2,1}] # 4p: [{1,1,1,1}, {2,1,1}, {2,2}] # etc way_sets =", "def __hash__(self): \"\"\" Hash this as a string \"\"\" return", "main(): \"\"\" Entry point \"\"\" # Important: sorted smallest to", "count, 5p count, ... , 200p count] \"\"\" def __hash__(self):", "count, ... , 200p count] \"\"\" def __hash__(self): \"\"\" Hash", "this as a string \"\"\" return hash(\" \".join([str(i) for i", "200] coin_index = {coin: index for index, coin in enumerate(coins)}", "any bigger coins break way_sets.append(way_set_i) print(f\"Number of ways of making", "\".join([str(i) for i in self])) def main(): \"\"\" Entry point", "Can't use any bigger coins break way_sets.append(way_set_i) print(f\"Number of ways", "of the remainder, if > 0 for coin in coins:", "- coin if remainder == 0: # We can make", "\"\"\" return hash(\" \".join([str(i) for i in self])) def main():", "# 3p: [{1,1,1}, {2,1}] # 4p: [{1,1,1,1}, {2,1,1}, {2,2}] #", "the remainder, if > 0 for coin in coins: remainder", "a string \"\"\" return hash(\" \".join([str(i) for i in self]))", "\"\"\" def __hash__(self): \"\"\" Hash this as a string \"\"\"", "{2}] # 3p: [{1,1,1}, {2,1}] # 4p: [{1,1,1,1}, {2,1,1}, {2,2}]", "but no larger coins coin_count = [0 for i in", "up from 1 means we can re-use earlier results #", "2p: [{1,1}, {2}] # 3p: [{1,1,1}, {2,1}] # 4p: [{1,1,1,1},", "coin_count = [0 for i in coins] coin_count[coin_index[coin]] = 1", "remainder == 0: # We can make this with exactly", "How many ways are there of making each number from", "string \"\"\" return hash(\" \".join([str(i) for i in self])) def", "and then all the ways of the remainder, if >", "8 entries are [1p count, 2p count, 5p count, ...", "coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break elif remainder > 0:", "in self])) def main(): \"\"\" Entry point \"\"\" # Important:", "way_sets[remainder]: new_coin_count = [c for c in rem_list] new_coin_count[coin_index[coin]] +=", "storage in sets The 8 entries are [1p count, 2p", "smaller value are for rem_list in way_sets[remainder]: new_coin_count = [c", "index, coin in enumerate(coins)} # How many ways are there", "\"\"\" # Important: sorted smallest to largest coins = [1,", "way_set_i.add(CoinArray(coin_count)) break elif remainder > 0: # We can use", "[1p count, 2p count, 5p count, ... , 200p count]", "200 from these values? # Building up from 1 means", "c in rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't", "# etc way_sets = [None] for i in range(1, 201):", "... , 200p count] \"\"\" def __hash__(self): \"\"\" Hash this", "for index, coin in enumerate(coins)} # How many ways are", "5, 10, 20, 50, 100, 200] coin_index = {coin: index", "# We can use this coin and whatever the options", "whatever the options for the smaller value are for rem_list", "coins coin_count = [0 for i in coins] coin_count[coin_index[coin]] =", "to 200 from these values? # Building up from 1", "this with exactly this coin alone - but no larger", "sets The 8 entries are [1p count, 2p count, 5p", "etc way_sets = [None] for i in range(1, 201): way_set_i", "options for the smaller value are for rem_list in way_sets[remainder]:", "1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't use any bigger coins break", "ways are there of making each number from 1 to", "in coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break elif remainder >", "# Can't use any bigger coins break way_sets.append(way_set_i) print(f\"Number of", "[None] for i in range(1, 201): way_set_i = set() #", "use this coin and whatever the options for the smaller", "if > 0 for coin in coins: remainder = i", "in enumerate(coins)} # How many ways are there of making", "= [0 for i in coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count))", "in rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't use", "alone - but no larger coins coin_count = [0 for", "for c in rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else: #", "= i - coin if remainder == 0: # We", "from 1 to 200 from these values? # Building up", "remainder = i - coin if remainder == 0: #", "for the smaller value are for rem_list in way_sets[remainder]: new_coin_count", "else: # Can't use any bigger coins break way_sets.append(way_set_i) print(f\"Number", "self])) def main(): \"\"\" Entry point \"\"\" # Important: sorted", "bigger coins break way_sets.append(way_set_i) print(f\"Number of ways of making £2:", "4p: [{1,1,1,1}, {2,1,1}, {2,2}] # etc way_sets = [None] for", "{coin: index for index, coin in enumerate(coins)} # How many", "range(1, 201): way_set_i = set() # Try using 1 of", "def main(): \"\"\" Entry point \"\"\" # Important: sorted smallest", "rem_list in way_sets[remainder]: new_coin_count = [c for c in rem_list]", "__hash__(self): \"\"\" Hash this as a string \"\"\" return hash(\"", "all the ways of the remainder, if > 0 for", "Try using 1 of each coin and then all the", "of each coin and then all the ways of the", "> 0 for coin in coins: remainder = i -", "the options for the smaller value are for rem_list in", "\"\"\" Entry point \"\"\" # Important: sorted smallest to largest", "return hash(\" \".join([str(i) for i in self])) def main(): \"\"\"", "this coin and whatever the options for the smaller value", "is hashable for storage in sets The 8 entries are", "200p count] \"\"\" def __hash__(self): \"\"\" Hash this as a", "50, 100, 200] coin_index = {coin: index for index, coin", "# 2p: [{1,1}, {2}] # 3p: [{1,1,1}, {2,1}] # 4p:", "[1, 2, 5, 10, 20, 50, 100, 200] coin_index =", "# Important: sorted smallest to largest coins = [1, 2,", "e.g.: # 1p: [{1}] # 2p: [{1,1}, {2}] # 3p:", "[{1}] # 2p: [{1,1}, {2}] # 3p: [{1,1,1}, {2,1}] #", "can re-use earlier results # e.g.: # 1p: [{1}] #", "there of making each number from 1 to 200 from", "i - coin if remainder == 0: # We can", "list that is hashable for storage in sets The 8", "as a string \"\"\" return hash(\" \".join([str(i) for i in", "to largest coins = [1, 2, 5, 10, 20, 50,", "\"\"\" Hash this as a string \"\"\" return hash(\" \".join([str(i)", "new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't use any bigger", "make this with exactly this coin alone - but no", "We can use this coin and whatever the options for", "are [1p count, 2p count, 5p count, ... , 200p", "The 8 entries are [1p count, 2p count, 5p count,", "for coin in coins: remainder = i - coin if", "1 of each coin and then all the ways of", "with exactly this coin alone - but no larger coins", "class CoinArray(list): \"\"\" Coin list that is hashable for storage", "each number from 1 to 200 from these values? #", "- but no larger coins coin_count = [0 for i", "can use this coin and whatever the options for the", "from these values? # Building up from 1 means we", "largest coins = [1, 2, 5, 10, 20, 50, 100,", "= set() # Try using 1 of each coin and", "that is hashable for storage in sets The 8 entries", "[{1,1}, {2}] # 3p: [{1,1,1}, {2,1}] # 4p: [{1,1,1,1}, {2,1,1},", "i in range(1, 201): way_set_i = set() # Try using", "then all the ways of the remainder, if > 0", "way_set_i.add(CoinArray(new_coin_count)) else: # Can't use any bigger coins break way_sets.append(way_set_i)", "for i in self])) def main(): \"\"\" Entry point \"\"\"", "coin and then all the ways of the remainder, if", "in coins: remainder = i - coin if remainder ==", "5p count, ... , 200p count] \"\"\" def __hash__(self): \"\"\"", "# How many ways are there of making each number", "1p: [{1}] # 2p: [{1,1}, {2}] # 3p: [{1,1,1}, {2,1}]", "[0 for i in coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break", "no larger coins coin_count = [0 for i in coins]", "# e.g.: # 1p: [{1}] # 2p: [{1,1}, {2}] #", "ways of the remainder, if > 0 for coin in", "coin if remainder == 0: # We can make this", "coins = [1, 2, 5, 10, 20, 50, 100, 200]", "for i in coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break elif", "way_sets = [None] for i in range(1, 201): way_set_i =", "1 way_set_i.add(CoinArray(coin_count)) break elif remainder > 0: # We can", "Hash this as a string \"\"\" return hash(\" \".join([str(i) for", "the ways of the remainder, if > 0 for coin", "> 0: # We can use this coin and whatever", "each coin and then all the ways of the remainder,", "3p: [{1,1,1}, {2,1}] # 4p: [{1,1,1,1}, {2,1,1}, {2,2}] # etc", "20, 50, 100, 200] coin_index = {coin: index for index,", "these values? # Building up from 1 means we can", "values? # Building up from 1 means we can re-use", "elif remainder > 0: # We can use this coin", "of ways of making £2: {len(way_sets[200])}\") return if __name__ ==", "{2,1,1}, {2,2}] # etc way_sets = [None] for i in", "ways of making £2: {len(way_sets[200])}\") return if __name__ == \"__main__\":", "the smaller value are for rem_list in way_sets[remainder]: new_coin_count =", "making each number from 1 to 200 from these values?", "[c for c in rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else:", "of making each number from 1 to 200 from these", "We can make this with exactly this coin alone -", "coin_index = {coin: index for index, coin in enumerate(coins)} #", "re-use earlier results # e.g.: # 1p: [{1}] # 2p:", "# 1p: [{1}] # 2p: [{1,1}, {2}] # 3p: [{1,1,1},", "in way_sets[remainder]: new_coin_count = [c for c in rem_list] new_coin_count[coin_index[coin]]", "many ways are there of making each number from 1", "entries are [1p count, 2p count, 5p count, ... ,", "sorted smallest to largest coins = [1, 2, 5, 10,", "way_set_i = set() # Try using 1 of each coin", "if remainder == 0: # We can make this with", "are there of making each number from 1 to 200", "new_coin_count = [c for c in rem_list] new_coin_count[coin_index[coin]] += 1", "= [None] for i in range(1, 201): way_set_i = set()", "# Building up from 1 means we can re-use earlier", "coin in enumerate(coins)} # How many ways are there of", "exactly this coin alone - but no larger coins coin_count", "for storage in sets The 8 entries are [1p count,", "hashable for storage in sets The 8 entries are [1p", "index for index, coin in enumerate(coins)} # How many ways", "and whatever the options for the smaller value are for", "i in coins] coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break elif remainder", "coin_count[coin_index[coin]] = 1 way_set_i.add(CoinArray(coin_count)) break elif remainder > 0: #", "[{1,1,1}, {2,1}] # 4p: [{1,1,1,1}, {2,1,1}, {2,2}] # etc way_sets", "= [1, 2, 5, 10, 20, 50, 100, 200] coin_index", "100, 200] coin_index = {coin: index for index, coin in", "count] \"\"\" def __hash__(self): \"\"\" Hash this as a string", "can make this with exactly this coin alone - but", "break way_sets.append(way_set_i) print(f\"Number of ways of making £2: {len(way_sets[200])}\") return", "Important: sorted smallest to largest coins = [1, 2, 5,", "earlier results # e.g.: # 1p: [{1}] # 2p: [{1,1},", ", 200p count] \"\"\" def __hash__(self): \"\"\" Hash this as", "Coin list that is hashable for storage in sets The", "== 0: # We can make this with exactly this", "use any bigger coins break way_sets.append(way_set_i) print(f\"Number of ways of", "larger coins coin_count = [0 for i in coins] coin_count[coin_index[coin]]", "print(f\"Number of ways of making £2: {len(way_sets[200])}\") return if __name__", "remainder, if > 0 for coin in coins: remainder =", "+= 1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't use any bigger coins", "i in self])) def main(): \"\"\" Entry point \"\"\" #", "value are for rem_list in way_sets[remainder]: new_coin_count = [c for", "from 1 means we can re-use earlier results # e.g.:", "\"\"\" Coin list that is hashable for storage in sets", "# Try using 1 of each coin and then all", "of making £2: {len(way_sets[200])}\") return if __name__ == \"__main__\": main()", "[{1,1,1,1}, {2,1,1}, {2,2}] # etc way_sets = [None] for i", "point \"\"\" # Important: sorted smallest to largest coins =", "using 1 of each coin and then all the ways", "2, 5, 10, 20, 50, 100, 200] coin_index = {coin:", "coin in coins: remainder = i - coin if remainder", "this coin alone - but no larger coins coin_count =", "= [c for c in rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count))", "means we can re-use earlier results # e.g.: # 1p:", "remainder > 0: # We can use this coin and", "201): way_set_i = set() # Try using 1 of each", "coins break way_sets.append(way_set_i) print(f\"Number of ways of making £2: {len(way_sets[200])}\")", "2p count, 5p count, ... , 200p count] \"\"\" def", "= {coin: index for index, coin in enumerate(coins)} # How", "1 means we can re-use earlier results # e.g.: #", "1 to 200 from these values? # Building up from", "set() # Try using 1 of each coin and then", "smallest to largest coins = [1, 2, 5, 10, 20,", "CoinArray(list): \"\"\" Coin list that is hashable for storage in", "results # e.g.: # 1p: [{1}] # 2p: [{1,1}, {2}]", "in range(1, 201): way_set_i = set() # Try using 1", "enumerate(coins)} # How many ways are there of making each", "break elif remainder > 0: # We can use this", "0: # We can use this coin and whatever the", "for rem_list in way_sets[remainder]: new_coin_count = [c for c in", "# 4p: [{1,1,1,1}, {2,1,1}, {2,2}] # etc way_sets = [None]", "<gh_stars>0 class CoinArray(list): \"\"\" Coin list that is hashable for", "in sets The 8 entries are [1p count, 2p count,", "10, 20, 50, 100, 200] coin_index = {coin: index for", "for i in range(1, 201): way_set_i = set() # Try", "# We can make this with exactly this coin alone", "count, 2p count, 5p count, ... , 200p count] \"\"\"", "= 1 way_set_i.add(CoinArray(coin_count)) break elif remainder > 0: # We", "coin and whatever the options for the smaller value are", "are for rem_list in way_sets[remainder]: new_coin_count = [c for c", "0 for coin in coins: remainder = i - coin", "we can re-use earlier results # e.g.: # 1p: [{1}]", "rem_list] new_coin_count[coin_index[coin]] += 1 way_set_i.add(CoinArray(new_coin_count)) else: # Can't use any", "Entry point \"\"\" # Important: sorted smallest to largest coins", "number from 1 to 200 from these values? # Building", "0: # We can make this with exactly this coin" ]
[ "#!/usr/bin/env python # Copyright 2017 Google Inc. All Rights Reserved.", "2017 Google Inc. All Rights Reserved. # # Licensed under", "in segment_label.category_entities: print('\\tLabel category description: {}'.format( category_entity.description)) for i, segment", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features)", "import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation =", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "permissions and # limitations under the License. \"\"\"This application demonstrates", "in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in", "videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video(", "distributed under the License is distributed on an \"AS IS\"", "Inc. All Rights Reserved. # # Licensed under the Apache", "segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels): print('Video label", "= result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels): print('Video label description:", "enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities:", "License. \"\"\"This application demonstrates label detection on a demo video", "the specific language governing permissions and # limitations under the", "a demo video using the Google Cloud API. Usage: python", "videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "video for label annotations:') result = operation.result(timeout=120) print('\\nFinished processing.') #", "operation.result(timeout=120) print('\\nFinished processing.') # first result is retrieved because a", "to {}s'.format(start_time, end_time) confidence = segment.confidence print('\\tSegment {}: {}'.format(i, positions))", "{}'.format( category_entity.description)) for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds", "positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END video_quickstart] if __name__ ==", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "is retrieved because a single video was processed segment_labels =", "retrieved because a single video was processed segment_labels = result.annotation_results[0].segment_label_annotations", "= operation.result(timeout=120) print('\\nFinished processing.') # first result is retrieved because", "Google Cloud API. Usage: python quickstart.py \"\"\" def run_quickstart(): #", "description: {}'.format( category_entity.description)) for i, segment in enumerate(segment_label.segments): start_time =", "detection on a demo video using the Google Cloud API.", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "+ segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time)", "not use this file except in compliance with the License.", "Usage: python quickstart.py \"\"\" def run_quickstart(): # [START video_quickstart] from", "segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9)", "[videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for label", "writing, software # distributed under the License is distributed on", "segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9)", "in writing, software # distributed under the License is distributed", "{}'.format(confidence)) print('\\n') # [END video_quickstart] if __name__ == '__main__': run_quickstart()", "Google Inc. All Rights Reserved. # # Licensed under the", "segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\\tLabel category description: {}'.format( category_entity.description))", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "under the License. \"\"\"This application demonstrates label detection on a", "{}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END video_quickstart] if", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "\"\"\" def run_quickstart(): # [START video_quickstart] from google.cloud import videointelligence", "+ segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos /", "= video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for label annotations:') result", "processing.') # first result is retrieved because a single video", "CONDITIONS OF ANY KIND, either express or implied. # See", "start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "Cloud API. Usage: python quickstart.py \"\"\" def run_quickstart(): # [START", "# first result is retrieved because a single video was", "segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for category_entity", "was processed segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels):", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "demonstrates label detection on a demo video using the Google", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9) end_time", "1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence", "governing permissions and # limitations under the License. \"\"\"This application", "end_time) confidence = segment.confidence print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence))", "python quickstart.py \"\"\" def run_quickstart(): # [START video_quickstart] from google.cloud", "under the License is distributed on an \"AS IS\" BASIS,", "= (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "'{}s to {}s'.format(start_time, end_time) confidence = segment.confidence print('\\tSegment {}: {}'.format(i,", "= segment.confidence print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') #", "License for the specific language governing permissions and # limitations", "{}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\\tLabel category description: {}'.format(", "processed segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels): print('Video", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "because a single video was processed segment_labels = result.annotation_results[0].segment_label_annotations for", "Reserved. # # Licensed under the Apache License, Version 2.0", "[START video_quickstart] from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features", "operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for label annotations:')", "for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos", "category_entity in segment_label.category_entities: print('\\tLabel category description: {}'.format( category_entity.description)) for i,", "the License. \"\"\"This application demonstrates label detection on a demo", "from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION]", "the License for the specific language governing permissions and #", "/ 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions", "features=features) print('\\nProcessing video for label annotations:') result = operation.result(timeout=120) print('\\nFinished", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format(", "= (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds +", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "segment_label.category_entities: print('\\tLabel category description: {}'.format( category_entity.description)) for i, segment in", "1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "the License is distributed on an \"AS IS\" BASIS, #", "'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for label annotations:') result = operation.result(timeout=120)", "/ 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence =", "in compliance with the License. # You may obtain a", "features = [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video", "software # distributed under the License is distributed on an", "{}s'.format(start_time, end_time) confidence = segment.confidence print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence:", "API. Usage: python quickstart.py \"\"\" def run_quickstart(): # [START video_quickstart]", "= '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence print('\\tSegment {}:", "# # Unless required by applicable law or agreed to", "print('\\nFinished processing.') # first result is retrieved because a single", "i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description)) for", "segment.confidence print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END", "for i, segment_label in enumerate(segment_labels): print('Video label description: {}'.format( segment_label.entity.description))", "the Google Cloud API. Usage: python quickstart.py \"\"\" def run_quickstart():", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "demo video using the Google Cloud API. Usage: python quickstart.py", "on a demo video using the Google Cloud API. Usage:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "def run_quickstart(): # [START video_quickstart] from google.cloud import videointelligence video_client", "limitations under the License. \"\"\"This application demonstrates label detection on", "Version 2.0 (the \"License\"); # you may not use this", "for label annotations:') result = operation.result(timeout=120) print('\\nFinished processing.') # first", "label annotations:') result = operation.result(timeout=120) print('\\nFinished processing.') # first result", "{}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END video_quickstart] if __name__", "Copyright 2017 Google Inc. All Rights Reserved. # # Licensed", "for category_entity in segment_label.category_entities: print('\\tLabel category description: {}'.format( category_entity.description)) for", "end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s", "law or agreed to in writing, software # distributed under", "= [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for", "label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\\tLabel category", "video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation = video_client.annotate_video( 'gs://demomaker/cat.mp4',", "video_client.annotate_video( 'gs://demomaker/cat.mp4', features=features) print('\\nProcessing video for label annotations:') result =", "language governing permissions and # limitations under the License. \"\"\"This", "\"\"\"This application demonstrates label detection on a demo video using", "run_quickstart(): # [START video_quickstart] from google.cloud import videointelligence video_client =", "implied. # See the License for the specific language governing", "and # limitations under the License. \"\"\"This application demonstrates label", "result is retrieved because a single video was processed segment_labels", "category_entity.description)) for i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds +", "segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time, end_time) confidence", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "<reponame>nasirdec/GCP-AppEngine-Example #!/usr/bin/env python # Copyright 2017 Google Inc. All Rights", "video using the Google Cloud API. Usage: python quickstart.py \"\"\"", "print('Video label description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\\tLabel", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "single video was processed segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label", "confidence = segment.confidence print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n')", "python # Copyright 2017 Google Inc. All Rights Reserved. #", "(segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9) end_time = (segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "label detection on a demo video using the Google Cloud", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features = [videointelligence.enums.Feature.LABEL_DETECTION] operation", "enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos / 1e9) end_time =", "# Copyright 2017 Google Inc. All Rights Reserved. # #", "positions = '{}s to {}s'.format(start_time, end_time) confidence = segment.confidence print('\\tSegment", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "(segment.segment.end_time_offset.seconds + segment.segment.end_time_offset.nanos / 1e9) positions = '{}s to {}s'.format(start_time,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "category description: {}'.format( category_entity.description)) for i, segment in enumerate(segment_label.segments): start_time", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "video was processed segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label in", "quickstart.py \"\"\" def run_quickstart(): # [START video_quickstart] from google.cloud import", "print('\\tLabel category description: {}'.format( category_entity.description)) for i, segment in enumerate(segment_label.segments):", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "print('\\nProcessing video for label annotations:') result = operation.result(timeout=120) print('\\nFinished processing.')", "print('\\tSegment {}: {}'.format(i, positions)) print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END video_quickstart]", "annotations:') result = operation.result(timeout=120) print('\\nFinished processing.') # first result is", "print('\\tConfidence: {}'.format(confidence)) print('\\n') # [END video_quickstart] if __name__ == '__main__':", "required by applicable law or agreed to in writing, software", "using the Google Cloud API. Usage: python quickstart.py \"\"\" def", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "video_quickstart] from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() features =", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "result = operation.result(timeout=120) print('\\nFinished processing.') # first result is retrieved", "i, segment in enumerate(segment_label.segments): start_time = (segment.segment.start_time_offset.seconds + segment.segment.start_time_offset.nanos /", "# [START video_quickstart] from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient()", "description: {}'.format( segment_label.entity.description)) for category_entity in segment_label.category_entities: print('\\tLabel category description:", "a single video was processed segment_labels = result.annotation_results[0].segment_label_annotations for i,", "first result is retrieved because a single video was processed", "application demonstrates label detection on a demo video using the", "# limitations under the License. \"\"\"This application demonstrates label detection" ]
[ "Option (instrument, maturity_date, strike): return { **{ 'MatDt' : str(maturity_date)", "'__type' : 'equity' } ################################################# def Equity(symbol): return Instrument(symbol) #################################################", "return { '__symbol' : symbol, 'Sym' : symbol, 'SecTyp' :", "str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp' : 'OPT', '__maturity'", "'CFI':'OP' }, **Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type']", ": symbol, 'SecTyp' : 'CS', '__type' : 'equity' } #################################################", "'call' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date = x['__maturity'],", "Put (instrument, maturity_date, strike): # Let Option do some lifting", "'CS', '__type' : 'equity' } ################################################# def Equity(symbol): return Instrument(symbol)", "= str(symbol).upper() return { '__symbol' : symbol, 'Sym' : symbol,", "symbol, 'SecTyp' : 'CS', '__type' : 'equity' } ################################################# def", "strike) } x['__underlying'] = x['Sym'] x['__type'] = 'put' x['__symbol'] =", "maturity_date, strike): # Let Option do some lifting x =", ": str(maturity_date), '__strike' : str(int(strike)) }, **instrument } ################################################# def", "**Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type'] = 'call'", "\"\"\" INSTRUMENT \"\"\" ################################################# def Instrument(symbol): symbol = str(symbol).upper() return", ": 'OPT', '__maturity' : str(maturity_date), '__strike' : str(int(strike)) }, **instrument", "'__symbol' : symbol, 'Sym' : symbol, 'SecTyp' : 'CS', '__type'", ": 'CS', '__type' : 'equity' } ################################################# def Equity(symbol): return", "+ 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp' : 'OPT', '__maturity' :", "utils.option_format( symbol = x['Sym'], exp_date = x['__maturity'], strike = x['__strike'],", "= { **{ 'CFI':'OP' }, **Option(instrument, maturity_date, strike) } x['__underlying']", "x['__type'] = 'put' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date", "\"\"\" ################################################# def Instrument(symbol): symbol = str(symbol).upper() return { '__symbol'", "= x['Sym'] x['__type'] = 'call' x['__symbol'] = utils.option_format( symbol =", "'__strike' : str(int(strike)) }, **instrument } ################################################# def Call (instrument,", "strike) } x['__underlying'] = x['Sym'] x['__type'] = 'call' x['__symbol'] =", "'put' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date = x['__maturity'],", "'equity' } ################################################# def Equity(symbol): return Instrument(symbol) ################################################# def Option", "**{ 'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp'", ") return x ################################################# def Put (instrument, maturity_date, strike): #", "= 'C' ) return x ################################################# def Put (instrument, maturity_date,", "= 'put' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date =", "x ################################################# def Put (instrument, maturity_date, strike): # Let Option", "'SecTyp' : 'CS', '__type' : 'equity' } ################################################# def Equity(symbol):", "exp_date = x['__maturity'], strike = x['__strike'], direction = 'P' )", "some lifting x = { **{ 'CFI':'OP' }, **Option(instrument, maturity_date,", "{ '__symbol' : symbol, 'Sym' : symbol, 'SecTyp' : 'CS',", "strike): # Let Option do some lifting x = {", "'__maturity' : str(maturity_date), '__strike' : str(int(strike)) }, **instrument } #################################################", "x['__maturity'], strike = x['__strike'], direction = 'C' ) return x", "Call (instrument, maturity_date, strike): # Let Option do some lifting", "'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp' : 'OPT', '__maturity' : str(maturity_date),", "= x['__maturity'], strike = x['__strike'], direction = 'C' ) return", "str(int(strike)) }, **instrument } ################################################# def Call (instrument, maturity_date, strike):", "Option do some lifting x = { **{ 'CFI':'OC' },", "def Put (instrument, maturity_date, strike): # Let Option do some", "x = { **{ 'CFI':'OP' }, **Option(instrument, maturity_date, strike) }", "} x['__underlying'] = x['Sym'] x['__type'] = 'call' x['__symbol'] = utils.option_format(", "x['__strike'], direction = 'C' ) return x ################################################# def Put", "def Equity(symbol): return Instrument(symbol) ################################################# def Option (instrument, maturity_date, strike):", "################################################# def Put (instrument, maturity_date, strike): # Let Option do", "}, **instrument } ################################################# def Call (instrument, maturity_date, strike): #", "def Instrument(symbol): symbol = str(symbol).upper() return { '__symbol' : symbol,", "Instrument(symbol): symbol = str(symbol).upper() return { '__symbol' : symbol, 'Sym'", "exp_date = x['__maturity'], strike = x['__strike'], direction = 'C' )", "**Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type'] = 'put'", "} ################################################# def Equity(symbol): return Instrument(symbol) ################################################# def Option (instrument,", "= 'call' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date =", "return x ################################################# def Put (instrument, maturity_date, strike): # Let", "from . import utils ################################################# \"\"\" INSTRUMENT \"\"\" ################################################# def", "def Call (instrument, maturity_date, strike): # Let Option do some", "= { **{ 'CFI':'OC' }, **Option(instrument, maturity_date, strike) } x['__underlying']", "x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date = x['__maturity'], strike", "{ **{ 'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)),", "**{ 'CFI':'OC' }, **Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym']", ": str(int(strike)) }, **instrument } ################################################# def Call (instrument, maturity_date,", "utils ################################################# \"\"\" INSTRUMENT \"\"\" ################################################# def Instrument(symbol): symbol =", "{ **{ 'CFI':'OC' }, **Option(instrument, maturity_date, strike) } x['__underlying'] =", "maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type'] = 'put' x['__symbol']", "################################################# def Option (instrument, maturity_date, strike): return { **{ 'MatDt'", "'StrkPx' : str(int(strike)), 'SecTyp' : 'OPT', '__maturity' : str(maturity_date), '__strike'", "################################################# def Equity(symbol): return Instrument(symbol) ################################################# def Option (instrument, maturity_date,", "def Option (instrument, maturity_date, strike): return { **{ 'MatDt' :", ": symbol, 'Sym' : symbol, 'SecTyp' : 'CS', '__type' :", "do some lifting x = { **{ 'CFI':'OC' }, **Option(instrument,", "x['Sym'] x['__type'] = 'put' x['__symbol'] = utils.option_format( symbol = x['Sym'],", "'SecTyp' : 'OPT', '__maturity' : str(maturity_date), '__strike' : str(int(strike)) },", ". import utils ################################################# \"\"\" INSTRUMENT \"\"\" ################################################# def Instrument(symbol):", "str(maturity_date), '__strike' : str(int(strike)) }, **instrument } ################################################# def Call", ": str(int(strike)), 'SecTyp' : 'OPT', '__maturity' : str(maturity_date), '__strike' :", "################################################# def Call (instrument, maturity_date, strike): # Let Option do", "'OPT', '__maturity' : str(maturity_date), '__strike' : str(int(strike)) }, **instrument }", "x['Sym'] x['__type'] = 'call' x['__symbol'] = utils.option_format( symbol = x['Sym'],", "symbol = x['Sym'], exp_date = x['__maturity'], strike = x['__strike'], direction", "(instrument, maturity_date, strike): return { **{ 'MatDt' : str(maturity_date) +", "x['__type'] = 'call' x['__symbol'] = utils.option_format( symbol = x['Sym'], exp_date", "x['Sym'], exp_date = x['__maturity'], strike = x['__strike'], direction = 'C'", "{ **{ 'CFI':'OP' }, **Option(instrument, maturity_date, strike) } x['__underlying'] =", ": str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp' : 'OPT',", "= x['Sym'] x['__type'] = 'put' x['__symbol'] = utils.option_format( symbol =", "**instrument } ################################################# def Call (instrument, maturity_date, strike): # Let", "= utils.option_format( symbol = x['Sym'], exp_date = x['__maturity'], strike =", "do some lifting x = { **{ 'CFI':'OP' }, **Option(instrument,", "direction = 'C' ) return x ################################################# def Put (instrument,", "= x['Sym'], exp_date = x['__maturity'], strike = x['__strike'], direction =", "x['__underlying'] = x['Sym'] x['__type'] = 'put' x['__symbol'] = utils.option_format( symbol", "'C' ) return x ################################################# def Put (instrument, maturity_date, strike):", "symbol, 'Sym' : symbol, 'SecTyp' : 'CS', '__type' : 'equity'", "x['__underlying'] = x['Sym'] x['__type'] = 'call' x['__symbol'] = utils.option_format( symbol", "Equity(symbol): return Instrument(symbol) ################################################# def Option (instrument, maturity_date, strike): return", "return { **{ 'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' :", "} ################################################# def Call (instrument, maturity_date, strike): # Let Option", "strike): return { **{ 'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx'", "return Instrument(symbol) ################################################# def Option (instrument, maturity_date, strike): return {", "Instrument(symbol) ################################################# def Option (instrument, maturity_date, strike): return { **{", "Let Option do some lifting x = { **{ 'CFI':'OP'", "maturity_date, strike): return { **{ 'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00',", "lifting x = { **{ 'CFI':'OC' }, **Option(instrument, maturity_date, strike)", "}, **Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type'] =", "'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00', 'StrkPx' : str(int(strike)), 'SecTyp' :", "lifting x = { **{ 'CFI':'OP' }, **Option(instrument, maturity_date, strike)", "################################################# \"\"\" INSTRUMENT \"\"\" ################################################# def Instrument(symbol): symbol = str(symbol).upper()", "x['Sym'], exp_date = x['__maturity'], strike = x['__strike'], direction = 'P'", "= x['__strike'], direction = 'C' ) return x ################################################# def", "import utils ################################################# \"\"\" INSTRUMENT \"\"\" ################################################# def Instrument(symbol): symbol", "INSTRUMENT \"\"\" ################################################# def Instrument(symbol): symbol = str(symbol).upper() return {", "(instrument, maturity_date, strike): # Let Option do some lifting x", "maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type'] = 'call' x['__symbol']", "str(int(strike)), 'SecTyp' : 'OPT', '__maturity' : str(maturity_date), '__strike' : str(int(strike))", "str(symbol).upper() return { '__symbol' : symbol, 'Sym' : symbol, 'SecTyp'", "# Let Option do some lifting x = { **{", "= x['__maturity'], strike = x['__strike'], direction = 'P' ) return", "'CFI':'OC' }, **Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym'] x['__type']", "} x['__underlying'] = x['Sym'] x['__type'] = 'put' x['__symbol'] = utils.option_format(", "x['__maturity'], strike = x['__strike'], direction = 'P' ) return x", "################################################# def Instrument(symbol): symbol = str(symbol).upper() return { '__symbol' :", "symbol = str(symbol).upper() return { '__symbol' : symbol, 'Sym' :", "**{ 'CFI':'OP' }, **Option(instrument, maturity_date, strike) } x['__underlying'] = x['Sym']", "some lifting x = { **{ 'CFI':'OC' }, **Option(instrument, maturity_date,", ": 'equity' } ################################################# def Equity(symbol): return Instrument(symbol) ################################################# def", "'Sym' : symbol, 'SecTyp' : 'CS', '__type' : 'equity' }", "Let Option do some lifting x = { **{ 'CFI':'OC'", "strike = x['__strike'], direction = 'C' ) return x #################################################", "x = { **{ 'CFI':'OC' }, **Option(instrument, maturity_date, strike) }", "Option do some lifting x = { **{ 'CFI':'OP' }," ]
[ "a placeholder for external resources that acceptance test might require.\"\"\"", "# import pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup():", "<gh_stars>10-100 # # Copyright (c) 2022 Airbyte, Inc., all rights", "setup test dependencies if needed. otherwise remove the TODO comments", "\"\"\"This fixture is a placeholder for external resources that acceptance", "pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This fixture", "(c) 2022 Airbyte, Inc., all rights reserved. # import pytest", "otherwise remove the TODO comments yield # TODO: clean up", "remove the TODO comments yield # TODO: clean up test", "the TODO comments yield # TODO: clean up test dependencies", "TODO: setup test dependencies if needed. otherwise remove the TODO", "= (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This fixture is a", "external resources that acceptance test might require.\"\"\" # TODO: setup", "resources that acceptance test might require.\"\"\" # TODO: setup test", "import pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This", "@pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This fixture is a placeholder for", "needed. otherwise remove the TODO comments yield # TODO: clean", "is a placeholder for external resources that acceptance test might", "if needed. otherwise remove the TODO comments yield # TODO:", "require.\"\"\" # TODO: setup test dependencies if needed. otherwise remove", "might require.\"\"\" # TODO: setup test dependencies if needed. otherwise", "test dependencies if needed. otherwise remove the TODO comments yield", "fixture is a placeholder for external resources that acceptance test", "Copyright (c) 2022 Airbyte, Inc., all rights reserved. # import", "2022 Airbyte, Inc., all rights reserved. # import pytest pytest_plugins", "acceptance test might require.\"\"\" # TODO: setup test dependencies if", "rights reserved. # import pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True)", "for external resources that acceptance test might require.\"\"\" # TODO:", "Airbyte, Inc., all rights reserved. # import pytest pytest_plugins =", "autouse=True) def connector_setup(): \"\"\"This fixture is a placeholder for external", "# TODO: setup test dependencies if needed. otherwise remove the", "all rights reserved. # import pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\",", "pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This fixture is", "that acceptance test might require.\"\"\" # TODO: setup test dependencies", "Inc., all rights reserved. # import pytest pytest_plugins = (\"source_acceptance_test.plugin\",)", "dependencies if needed. otherwise remove the TODO comments yield #", "(\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def connector_setup(): \"\"\"This fixture is a placeholder", "reserved. # import pytest pytest_plugins = (\"source_acceptance_test.plugin\",) @pytest.fixture(scope=\"session\", autouse=True) def", "placeholder for external resources that acceptance test might require.\"\"\" #", "connector_setup(): \"\"\"This fixture is a placeholder for external resources that", "# Copyright (c) 2022 Airbyte, Inc., all rights reserved. #", "# # Copyright (c) 2022 Airbyte, Inc., all rights reserved.", "def connector_setup(): \"\"\"This fixture is a placeholder for external resources", "test might require.\"\"\" # TODO: setup test dependencies if needed." ]
[ "# -*- coding: utf-8 -*- \"\"\" @Time : 2021/10/9 17:51", "-*- coding: utf-8 -*- \"\"\" @Time : 2021/10/9 17:51 @Auth", "@Time : 2021/10/9 17:51 @Auth : 潇湘 @File :__init__.py.py @IDE", ": 潇湘 @File :__init__.py.py @IDE :PyCharm @QQ : 810400085 \"\"\"", "2021/10/9 17:51 @Auth : 潇湘 @File :__init__.py.py @IDE :PyCharm @QQ", ": 2021/10/9 17:51 @Auth : 潇湘 @File :__init__.py.py @IDE :PyCharm", "17:51 @Auth : 潇湘 @File :__init__.py.py @IDE :PyCharm @QQ :", "<filename>ddt/__init__.py<gh_stars>0 # -*- coding: utf-8 -*- \"\"\" @Time : 2021/10/9", "-*- \"\"\" @Time : 2021/10/9 17:51 @Auth : 潇湘 @File", "\"\"\" @Time : 2021/10/9 17:51 @Auth : 潇湘 @File :__init__.py.py", "coding: utf-8 -*- \"\"\" @Time : 2021/10/9 17:51 @Auth :", "utf-8 -*- \"\"\" @Time : 2021/10/9 17:51 @Auth : 潇湘", "@Auth : 潇湘 @File :__init__.py.py @IDE :PyCharm @QQ : 810400085" ]
[ "determine lags_exog. If False, the values of all exogenous variables", "used (inclusive). Otherwise a list of integers with lags is", "import RegressionModel from sklearn.linear_model import LinearRegression logger = get_logger(__name__) class", "kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def __str__(self): return 'LinearRegression(lags={},", "(inclusive). Otherwise a list of integers with lags is required.", "import Union from ..logging import get_logger from .regression_model import RegressionModel", "a list of integers with lags is required. If True", "list] Number of lagged target values used to predict the", "**kwargs): \"\"\" Simple wrapper for the linear regression model in", "Simple wrapper for the linear regression model in scikit-learn, LinearRegression().", "Union from ..logging import get_logger from .regression_model import RegressionModel from", "def __init__(self, lags: Union[int, list] = None, lags_exog: Union[int, list,", "at the current time `t`. This might lead to leakage", "time `t`. This might lead to leakage if for predictions", "of the exogenous variables at time `t` are not known.", "np import pandas as pd from typing import Union from", "LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int, list] = None, lags_exog: Union[int,", "values used to predict the next time step. If an", "integer is given the last `lags_exog` lags are used (inclusive).", "lead to leakage if for predictions the values of the", "If an integer is given the last `lags` lags are", "\"\"\" Standard Regression model ------------------------- \"\"\" import numpy as np", "from .regression_model import RegressionModel from sklearn.linear_model import LinearRegression logger =", "list of integers with lags is required. lags_exog : Union[int,", "exogenous values used to predict the next time step. If", "if for predictions the values of the exogenous variables at", "typing import Union from ..logging import get_logger from .regression_model import", "last `lags` lags are used (inclusive). Otherwise a list of", "------------------------- \"\"\" import numpy as np import pandas as pd", "import numpy as np import pandas as pd from typing", "as pd from typing import Union from ..logging import get_logger", "for the linear regression model in scikit-learn, LinearRegression(). Parameters ----------", "known. **kwargs Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs", "Number of lagged exogenous values used to predict the next", "time step. If an integer is given the last `lags_exog`", "an integer is given the last `lags_exog` lags are used", "import LinearRegression logger = get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self, lags:", "of lagged exogenous values used to predict the next time", "values of all exogenous variables at the current time `t`.", "leakage if for predictions the values of the exogenous variables", "not known. **kwargs Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\"", "to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs)", "\"\"\" import numpy as np import pandas as pd from", "from ..logging import get_logger from .regression_model import RegressionModel from sklearn.linear_model", "Union[int, list] Number of lagged target values used to predict", "model ------------------------- \"\"\" import numpy as np import pandas as", "lags : Union[int, list] Number of lagged target values used", "None, lags_exog: Union[int, list, bool] = None, **kwargs): \"\"\" Simple", "is required. If True `lags` will be used to determine", "variables at time `t` are not known. **kwargs Additional keyword", "numpy as np import pandas as pd from typing import", "get_logger from .regression_model import RegressionModel from sklearn.linear_model import LinearRegression logger", "regression model in scikit-learn, LinearRegression(). Parameters ---------- lags : Union[int,", "of all exogenous variables at the current time `t`. This", "RegressionModel from sklearn.linear_model import LinearRegression logger = get_logger(__name__) class LinearRegressionModel(RegressionModel):", ": Union[int, list] Number of lagged target values used to", ": Union[int, list, bool] Number of lagged exogenous values used", "import pandas as pd from typing import Union from ..logging", "LinearRegression logger = get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int,", ".regression_model import RegressionModel from sklearn.linear_model import LinearRegression logger = get_logger(__name__)", "lags: Union[int, list] = None, lags_exog: Union[int, list, bool] =", "from typing import Union from ..logging import get_logger from .regression_model", "lags_exog. If False, the values of all exogenous variables at", "False, the values of all exogenous variables at the current", "exogenous variables at time `t` are not known. **kwargs Additional", "bool] = None, **kwargs): \"\"\" Simple wrapper for the linear", "Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs", "the last `lags_exog` lags are used (inclusive). Otherwise a list", "be used to determine lags_exog. If False, the values of", "`t`. This might lead to leakage if for predictions the", "wrapper for the linear regression model in scikit-learn, LinearRegression(). Parameters", "bool] Number of lagged exogenous values used to predict the", "values of the exogenous variables at time `t` are not", "linear regression model in scikit-learn, LinearRegression(). Parameters ---------- lags :", "`lags` lags are used (inclusive). Otherwise a list of integers", "import get_logger from .regression_model import RegressionModel from sklearn.linear_model import LinearRegression", "scikit-learn, LinearRegression(). Parameters ---------- lags : Union[int, list] Number of", "all exogenous variables at the current time `t`. This might", "This might lead to leakage if for predictions the values", "__init__(self, lags: Union[int, list] = None, lags_exog: Union[int, list, bool]", "lags_exog: Union[int, list, bool] = None, **kwargs): \"\"\" Simple wrapper", "from sklearn.linear_model import LinearRegression logger = get_logger(__name__) class LinearRegressionModel(RegressionModel): def", "lagged target values used to predict the next time step.", "`sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) )", "keyword arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs super().__init__(", "to predict the next time step. If an integer is", "as np import pandas as pd from typing import Union", "pandas as pd from typing import Union from ..logging import", "pd from typing import Union from ..logging import get_logger from", "in scikit-learn, LinearRegression(). Parameters ---------- lags : Union[int, list] Number", "the next time step. If an integer is given the", "current time `t`. This might lead to leakage if for", "Union[int, list, bool] = None, **kwargs): \"\"\" Simple wrapper for", "next time step. If an integer is given the last", "with lags is required. If True `lags` will be used", "of integers with lags is required. lags_exog : Union[int, list,", "the current time `t`. This might lead to leakage if", "for predictions the values of the exogenous variables at time", "`lags` will be used to determine lags_exog. If False, the", "integers with lags is required. If True `lags` will be", "predictions the values of the exogenous variables at time `t`", "of integers with lags is required. If True `lags` will", "the last `lags` lags are used (inclusive). Otherwise a list", "`lags_exog` lags are used (inclusive). Otherwise a list of integers", "Otherwise a list of integers with lags is required. lags_exog", "might lead to leakage if for predictions the values of", "at time `t` are not known. **kwargs Additional keyword arguments", "a list of integers with lags is required. lags_exog :", "True `lags` will be used to determine lags_exog. If False,", "Union[int, list, bool] Number of lagged exogenous values used to", "Number of lagged target values used to predict the next", "target values used to predict the next time step. If", "given the last `lags_exog` lags are used (inclusive). Otherwise a", "= get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int, list] =", "passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs super().__init__( lags=lags, lags_exog=lags_exog,", "Standard Regression model ------------------------- \"\"\" import numpy as np import", "= None, **kwargs): \"\"\" Simple wrapper for the linear regression", "LinearRegression(). Parameters ---------- lags : Union[int, list] Number of lagged", "required. lags_exog : Union[int, list, bool] Number of lagged exogenous", "lags_exog : Union[int, list, bool] Number of lagged exogenous values", "If an integer is given the last `lags_exog` lags are", "lags are used (inclusive). Otherwise a list of integers with", "predict the next time step. If an integer is given", "If False, the values of all exogenous variables at the", "If True `lags` will be used to determine lags_exog. If", "list, bool] Number of lagged exogenous values used to predict", "---------- lags : Union[int, list] Number of lagged target values", "lags is required. lags_exog : Union[int, list, bool] Number of", "model in scikit-learn, LinearRegression(). Parameters ---------- lags : Union[int, list]", "the values of the exogenous variables at time `t` are", "sklearn.linear_model import LinearRegression logger = get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self,", "`t` are not known. **kwargs Additional keyword arguments passed to", "is given the last `lags_exog` lags are used (inclusive). Otherwise", "will be used to determine lags_exog. If False, the values", "logger = get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int, list]", "an integer is given the last `lags` lags are used", "self.kwargs = kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def __str__(self):", "are used (inclusive). Otherwise a list of integers with lags", "Otherwise a list of integers with lags is required. If", "Parameters ---------- lags : Union[int, list] Number of lagged target", "**kwargs Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs =", "the exogenous variables at time `t` are not known. **kwargs", "class LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int, list] = None, lags_exog:", "of lagged target values used to predict the next time", "variables at the current time `t`. This might lead to", "list of integers with lags is required. If True `lags`", "..logging import get_logger from .regression_model import RegressionModel from sklearn.linear_model import", "integers with lags is required. lags_exog : Union[int, list, bool]", "integer is given the last `lags` lags are used (inclusive).", "the linear regression model in scikit-learn, LinearRegression(). Parameters ---------- lags", "given the last `lags` lags are used (inclusive). Otherwise a", "Union[int, list] = None, lags_exog: Union[int, list, bool] = None,", "are not known. **kwargs Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`.", "step. If an integer is given the last `lags_exog` lags", "is required. lags_exog : Union[int, list, bool] Number of lagged", "\"\"\" Simple wrapper for the linear regression model in scikit-learn,", "to leakage if for predictions the values of the exogenous", "list] = None, lags_exog: Union[int, list, bool] = None, **kwargs):", "time step. If an integer is given the last `lags`", "time `t` are not known. **kwargs Additional keyword arguments passed", "= None, lags_exog: Union[int, list, bool] = None, **kwargs): \"\"\"", "required. If True `lags` will be used to determine lags_exog.", "the values of all exogenous variables at the current time", "used to predict the next time step. If an integer", "lagged exogenous values used to predict the next time step.", "None, **kwargs): \"\"\" Simple wrapper for the linear regression model", "get_logger(__name__) class LinearRegressionModel(RegressionModel): def __init__(self, lags: Union[int, list] = None,", "to determine lags_exog. If False, the values of all exogenous", "list, bool] = None, **kwargs): \"\"\" Simple wrapper for the", "= kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def __str__(self): return", "lags is required. If True `lags` will be used to", "last `lags_exog` lags are used (inclusive). Otherwise a list of", "super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def __str__(self): return 'LinearRegression(lags={}, lags_exog={})'.format(self.lags,", "\"\"\" self.kwargs = kwargs super().__init__( lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def", "arguments passed to `sklearn.linear_model.LinearRegression`. \"\"\" self.kwargs = kwargs super().__init__( lags=lags,", "with lags is required. lags_exog : Union[int, list, bool] Number", "exogenous variables at the current time `t`. This might lead", "Regression model ------------------------- \"\"\" import numpy as np import pandas", "used to determine lags_exog. If False, the values of all", "step. If an integer is given the last `lags` lags", "lags=lags, lags_exog=lags_exog, model=LinearRegression(**kwargs) ) def __str__(self): return 'LinearRegression(lags={}, lags_exog={})'.format(self.lags, self.lags_exog)", "is given the last `lags` lags are used (inclusive). Otherwise" ]
[ "assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)]", "assert str(x) == 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain ==", "x.digest is None assert x.name() == 'hailgenetics/animage' assert str(x) ==", "test_partition_zero_empty(): assert list(partition(0, [])) == [] def test_partition_even_small(): assert list(partition(3,", "assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file'", "assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x =", "list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3), range(3,", "url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to') == '' def test_url_and_params(): assert", "'localhost:5000' assert x.path == 'a/b/name' assert x.tag == 'tag' assert", "x.path == 'hail-vdc/batch-worker' assert x.tag == '123fds312' assert x.digest is", "'sha256:abc123' assert x.name() == 'name' assert str(x) == 'name@sha256:abc123' x", "x.path == 'a/b/name' assert x.tag == 'tag' assert x.digest ==", "parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000' assert x.path == 'animage' assert", "str(x) == 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000'", "parse_docker_image_reference('animage') assert x.domain is None assert x.path == 'animage' assert", "assert x.domain == 'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image' assert x.tag", "assert x.path == 'a/b/name' assert x.tag is None assert x.digest", "'c': 'd'}) def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert x.domain is", "url_join, url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0, [])) ==", "x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123')", "x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert x.tag is", "is None assert x.name() == 'localhost:5000/animage' assert str(x) == 'localhost:5000/animage'", "assert x.path == 'hail-vdc/batch-worker' assert x.tag == '123fds312' assert x.digest", "range(3, 3)] def test_url_basename(): assert url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file')", "assert str(x) == 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain ==", "x.digest is None assert x.name() == 'localhost:5000/a/b/name' assert str(x) ==", "3)] def test_partition_even_big(): assert list(partition(3, range(9))) == [range(0, 3), range(3,", "'localhost:5000' assert x.path == 'animage' assert x.tag is None assert", "is None assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image' assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'", "def test_partition_zero_empty(): assert list(partition(0, [])) == [] def test_partition_even_small(): assert", "'hailgenetics' assert x.path == 'animage' assert x.tag is None assert", "3), range(3, 3), range(3, 3)] def test_url_basename(): assert url_basename('/path/to/file') ==", "test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/', {}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo',", "x.domain is None assert x.path == 'animage' assert x.tag is", "'a/b/name' assert x.tag == 'tag' assert x.digest is None assert", "== 'animage' assert str(x) == 'animage' x = parse_docker_image_reference('hailgenetics/animage') assert", "== [range(0, 1), range(1, 2), range(2, 3)] def test_partition_even_big(): assert", "== ('https://example.com/', {}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d')", "is None assert x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name'", "'123fds312' assert x.digest is None assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert", "assert x.digest is None assert x.name() == 'hailgenetics/animage' assert str(x)", "assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert x.domain is", "'file') == '/path/to/file' assert url_join('/path/to/', '/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to',", "range(1, 2), range(2, 3), range(3, 3), range(3, 3), range(3, 3)]", "'localhost:5000/animage' assert str(x) == 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain", "9)] def test_partition_uneven_big(): assert list(partition(2, range(9))) == [range(0, 5), range(5,", "== 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert", "parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert", "1), range(1, 2), range(2, 3), range(3, 3), range(3, 3), range(3,", "'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain", "None assert x.name() == 'animage' assert str(x) == 'animage' x", "assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name' x =", "== 'localhost:5000' assert x.path == 'animage' assert x.tag is None", "range(2, 3), range(3, 3), range(3, 3), range(3, 3)] def test_url_basename():", "url_join('/path/to', 'file') == '/path/to/file' assert url_join('/path/to/', 'file') == '/path/to/file' assert", "is None assert x.digest is None assert x.name() == 'localhost:5000/a/b/name'", "== 'localhost:5000/animage' assert str(x) == 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert", "'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain", "parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image' assert", "url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b',", "test_partition_even_small(): assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2,", "== 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert", "x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image')", "parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert", "assert x.domain is None assert x.path == 'name' assert x.tag", "list(partition(2, range(9))) == [range(0, 5), range(5, 9)] def test_partition_toofew(): assert", "'/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https' assert", "None assert x.digest is None assert x.name() == 'localhost:5000/animage' assert", "[range(0, 1), range(1, 2), range(2, 3)] def test_partition_even_big(): assert list(partition(3,", "x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000' assert x.path ==", "def test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/', {}) assert url_and_params('https://example.com/foo?') ==", "is None assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'", "assert str(x) == 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain ==", "== 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000' assert", "x.path == 'name' assert x.tag is None assert x.digest ==", "def test_partition_even_big(): assert list(partition(3, range(9))) == [range(0, 3), range(3, 6),", "list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)] def", "x.name() == 'animage' assert str(x) == 'animage' x = parse_docker_image_reference('hailgenetics/animage')", "assert x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert x.tag", "None assert x.path == 'name' assert x.tag is None assert", "{'a': 'b', 'c': 'd'}) def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert", "None assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name' x", "from hailtop.utils import (partition, url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference) def", "== '123fds312' assert x.digest is None assert x.name() == 'gcr.io/hail-vdc/batch-worker'", "assert x.domain == 'gcr.io' assert x.path == 'hail-vdc/batch-worker' assert x.tag", "== 'a/b/name' assert x.tag is None assert x.digest is None", "== 'gcr.io' assert x.path == 'hail-vdc/batch-worker' assert x.tag == '123fds312'", "None assert x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert", "'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path", "== 'a/b/name' assert x.tag == 'tag' assert x.digest == 'sha256:abc123'", "None assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag' x", "assert x.name() == 'hailgenetics/animage' assert str(x) == 'hailgenetics/animage' x =", "= parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image'", "url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'}) def test_parse_docker_image_reference(): x", "assert x.digest is None assert x.name() == 'animage' assert str(x)", "= parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name'", "is None assert x.digest is None assert x.name() == 'localhost:5000/animage'", "'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert x.domain is None assert x.path", "str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev'", "assert list(partition(0, [])) == [] def test_partition_even_small(): assert list(partition(3, range(3)))", "test_url_join(): assert url_join('/path/to', 'file') == '/path/to/file' assert url_join('/path/to/', 'file') ==", "x.domain == 'localhost:5000' assert x.path == 'animage' assert x.tag is", "3), range(3, 3)] def test_url_basename(): assert url_basename('/path/to/file') == 'file' assert", "assert x.name() == 'animage' assert str(x) == 'animage' x =", "def test_url_basename(): assert url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file') == 'file'", "== 'a/b/name' assert x.tag is None assert x.digest == 'sha256:abc123'", "== 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert x.domain is None assert", "None assert x.name() == 'localhost:5000/animage' assert str(x) == 'localhost:5000/animage' x", "'animage' assert str(x) == 'animage' x = parse_docker_image_reference('hailgenetics/animage') assert x.domain", "assert url_join('/path/to/', 'file') == '/path/to/file' assert url_join('/path/to/', '/absolute/file') == '/absolute/file'", "assert url_join('/path/to', 'file') == '/path/to/file' assert url_join('/path/to/', 'file') == '/path/to/file'", "= parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics' assert x.path == 'animage'", "assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a':", "[])) == [] def test_partition_even_small(): assert list(partition(3, range(3))) == [range(0,", "'tag' assert x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert", "is None assert x.name() == 'hailgenetics/animage' assert str(x) == 'hailgenetics/animage'", "== 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file')", "assert x.path == 'my-project/my-repo/test-image' assert x.tag is None assert x.digest", "3), range(3, 6), range(6, 9)] def test_partition_uneven_big(): assert list(partition(2, range(9)))", "'hail-vdc/batch-worker' assert x.tag == '123fds312' assert x.digest is None assert", "[] def test_partition_even_small(): assert list(partition(3, range(3))) == [range(0, 1), range(1,", "== 'hailgenetics/animage' assert str(x) == 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert", "'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io' assert x.path", "== 'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image' assert x.tag is None", "== '/absolute/file' assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file')", "x.path == 'my-project/my-repo/test-image' assert x.tag is None assert x.digest is", "x.tag == '123fds312' assert x.digest is None assert x.name() ==", "'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain", "def test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to') == ''", "x.path == 'a/b/name' assert x.tag == 'tag' assert x.digest is", "= parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io' assert x.path == 'hail-vdc/batch-worker'", "url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file') == 'file' def test_url_join(): assert", "== 'localhost:5000' assert x.path == 'a/b/name' assert x.tag is None", "'name' assert x.tag is None assert x.digest == 'sha256:abc123' assert", "== 'https' assert url_scheme('/path/to') == '' def test_url_and_params(): assert url_and_params('https://example.com/')", "assert x.domain == 'hailgenetics' assert x.path == 'animage' assert x.tag", "str(x) == 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000'", "hailtop.utils import (partition, url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty():", "x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag')", "= parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name'", "str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000'", "url_and_params('https://example.com/') == ('https://example.com/', {}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert", "'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image' assert x.tag is None assert", "== [range(0, 5), range(5, 9)] def test_partition_toofew(): assert list(partition(6, range(3)))", "'d'}) def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert x.domain is None", "[range(0, 3), range(3, 6), range(6, 9)] def test_partition_uneven_big(): assert list(partition(2,", "'file') == '/path/to/file' assert url_join('/path/to/', 'file') == '/path/to/file' assert url_join('/path/to/',", "== ('https://example.com/foo', {'a': 'b', 'c': 'd'}) def test_parse_docker_image_reference(): x =", "x.tag is None assert x.digest is None assert x.name() ==", "== ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c':", "url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0, [])) == []", "def test_partition_uneven_big(): assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)]", "range(1, 2), range(2, 3)] def test_partition_even_big(): assert list(partition(3, range(9))) ==", "assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x =", "assert x.tag is None assert x.digest is None assert x.name()", "assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to') ==", "== 'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to')", "url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def", "x.domain == 'hailgenetics' assert x.path == 'animage' assert x.tag is", "assert x.name() == 'localhost:5000/animage' assert str(x) == 'localhost:5000/animage' x =", "assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x =", "3), range(3, 3), range(3, 3), range(3, 3)] def test_url_basename(): assert", "'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert x.domain", "range(5, 9)] def test_partition_toofew(): assert list(partition(6, range(3))) == [range(0, 1),", "None assert x.digest is None assert x.name() == 'localhost:5000/a/b/name' assert", "x.digest is None assert x.name() == 'localhost:5000/animage' assert str(x) ==", "assert url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file') == 'file' def test_url_join():", "assert x.digest == 'sha256:abc123' assert x.name() == 'name' assert str(x)", "url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0, [])) == [] def", "'a/b/name' assert x.tag is None assert x.digest == 'sha256:abc123' assert", "None assert x.digest is None assert x.name() == 'animage' assert", "x.tag == 'tag' assert x.digest is None assert x.name() ==", "== 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000' assert", "import (partition, url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert", "test_url_basename(): assert url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file') == 'file' def", "x.name() == 'localhost:5000/animage' assert str(x) == 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name')", "assert str(x) == 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain ==", "== 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'", "assert x.digest is None assert x.name() == 'localhost:5000/animage' assert str(x)", "is None assert x.path == 'animage' assert x.tag is None", "'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain", "None assert x.path == 'animage' assert x.tag is None assert", "= parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000' assert x.path == 'animage'", "== 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000' assert", "('https://example.com/foo', {'a': 'b', 'c': 'd'}) def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage')", "'' def test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/', {}) assert url_and_params('https://example.com/foo?')", "parse_docker_image_reference('name@sha256:abc123') assert x.domain is None assert x.path == 'name' assert", "url_join('/path/to/', 'file') == '/path/to/file' assert url_join('/path/to/', '/absolute/file') == '/absolute/file' assert", "== 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000' assert", "<reponame>vrautela/hail<gh_stars>0 from hailtop.utils import (partition, url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference)", "str(x) == 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io'", "assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'}) def test_parse_docker_image_reference():", "== 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert", "assert x.domain == 'localhost:5000' assert x.path == 'animage' assert x.tag", "parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert", "== [] def test_partition_even_small(): assert list(partition(3, range(3))) == [range(0, 1),", "def test_partition_even_small(): assert list(partition(3, range(3))) == [range(0, 1), range(1, 2),", "assert x.tag == '123fds312' assert x.digest is None assert x.name()", "assert str(x) == 'animage' x = parse_docker_image_reference('hailgenetics/animage') assert x.domain ==", "== 'tag' assert x.digest is None assert x.name() == 'localhost:5000/a/b/name'", "'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/',", "x.domain == 'us-docker.pkg.dev' assert x.path == 'my-project/my-repo/test-image' assert x.tag is", "x.digest is None assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x) ==", "x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path ==", "x.path == 'a/b/name' assert x.tag is None assert x.digest is", "== 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'", "'https' assert url_scheme('/path/to') == '' def test_url_and_params(): assert url_and_params('https://example.com/') ==", "== '' def test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/', {}) assert", "'b', 'c': 'd'}) def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert x.domain", "== 'hail-vdc/batch-worker' assert x.tag == '123fds312' assert x.digest is None", "range(6, 9)] def test_partition_uneven_big(): assert list(partition(2, range(9))) == [range(0, 5),", "== [range(0, 3), range(3, 6), range(6, 9)] def test_partition_uneven_big(): assert", "x.path == 'animage' assert x.tag is None assert x.digest is", "x.tag == 'tag' assert x.digest == 'sha256:abc123' assert x.name() ==", "assert url_basename('https://hail.is/path/to/file') == 'file' def test_url_join(): assert url_join('/path/to', 'file') ==", "x.path == 'a/b/name' assert x.tag is None assert x.digest ==", "('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'})", "test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to') == '' def", "assert x.path == 'a/b/name' assert x.tag == 'tag' assert x.digest", "url_join('/path/to/', '/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert", "'tag' assert x.digest is None assert x.name() == 'localhost:5000/a/b/name' assert", "== 'sha256:abc123' assert x.name() == 'name' assert str(x) == 'name@sha256:abc123'", "'file' def test_url_join(): assert url_join('/path/to', 'file') == '/path/to/file' assert url_join('/path/to/',", "'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000' assert x.path", "range(9))) == [range(0, 3), range(3, 6), range(6, 9)] def test_partition_uneven_big():", "parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0, [])) == [] def test_partition_even_small():", "def test_url_join(): assert url_join('/path/to', 'file') == '/path/to/file' assert url_join('/path/to/', 'file')", "= parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name'", "assert x.digest is None assert x.name() == 'localhost:5000/a/b/name' assert str(x)", "x.domain == 'gcr.io' assert x.path == 'hail-vdc/batch-worker' assert x.tag ==", "'/absolute/file' assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') ==", "assert url_join('/path/to/', '/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file'", "'/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/',", "str(x) == 'animage' x = parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics'", "assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)]", "== 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io' assert", "== 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert", "assert x.domain is None assert x.path == 'animage' assert x.tag", "x = parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000' assert x.path ==", "x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert x.tag ==", "range(3, 3), range(3, 3), range(3, 3)] def test_url_basename(): assert url_basename('/path/to/file')", "def test_partition_toofew(): assert list(partition(6, range(3))) == [range(0, 1), range(1, 2),", "is None assert x.digest is None assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'", "range(3, 6), range(6, 9)] def test_partition_uneven_big(): assert list(partition(2, range(9))) ==", "x = parse_docker_image_reference('animage') assert x.domain is None assert x.path ==", "url_basename('https://hail.is/path/to/file') == 'file' def test_url_join(): assert url_join('/path/to', 'file') == '/path/to/file'", "range(9))) == [range(0, 5), range(5, 9)] def test_partition_toofew(): assert list(partition(6,", "assert x.tag == 'tag' assert x.digest == 'sha256:abc123' assert x.name()", "6), range(6, 9)] def test_partition_uneven_big(): assert list(partition(2, range(9))) == [range(0,", "test_partition_toofew(): assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2,", "list(partition(0, [])) == [] def test_partition_even_small(): assert list(partition(3, range(3))) ==", "parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics' assert x.path == 'animage' assert", "== 'localhost:5000' assert x.path == 'a/b/name' assert x.tag == 'tag'", "str(x) == 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000'", "is None assert x.name() == 'animage' assert str(x) == 'animage'", "x.digest is None assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image' assert str(x) ==", "== 'name' assert str(x) == 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert", "x.name() == 'hailgenetics/animage' assert str(x) == 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage')", "x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag' x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123')", "== 'animage' assert x.tag is None assert x.digest is None", "3)] def test_url_basename(): assert url_basename('/path/to/file') == 'file' assert url_basename('https://hail.is/path/to/file') ==", "assert x.path == 'name' assert x.tag is None assert x.digest", "None assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x", "assert x.digest is None assert x.name() == 'gcr.io/hail-vdc/batch-worker' assert str(x)", "assert url_scheme('/path/to') == '' def test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/',", "assert x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x)", "'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to')", "x = parse_docker_image_reference('name@sha256:abc123') assert x.domain is None assert x.path ==", "[range(0, 5), range(5, 9)] def test_partition_toofew(): assert list(partition(6, range(3))) ==", "None assert x.digest is None assert x.name() == 'hailgenetics/animage' assert", "9)] def test_partition_toofew(): assert list(partition(6, range(3))) == [range(0, 1), range(1,", "assert str(x) == 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert x.domain ==", "'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name@sha256:abc123' x", "range(2, 3)] def test_partition_even_big(): assert list(partition(3, range(9))) == [range(0, 3),", "'name' assert str(x) == 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain", "== 'a/b/name' assert x.tag == 'tag' assert x.digest is None", "assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag' x =", "x.digest is None assert x.name() == 'animage' assert str(x) ==", "== '/path/to/file' assert url_join('/path/to/', 'file') == '/path/to/file' assert url_join('/path/to/', '/absolute/file')", "'my-project/my-repo/test-image' assert x.tag is None assert x.digest is None assert", "'file' assert url_basename('https://hail.is/path/to/file') == 'file' def test_url_join(): assert url_join('/path/to', 'file')", "('https://example.com/', {}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') ==", "x.digest == 'sha256:abc123' assert x.name() == 'name' assert str(x) ==", "2), range(2, 3)] def test_partition_even_big(): assert list(partition(3, range(9))) == [range(0,", "'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev' assert x.path", "'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') ==", "== 'animage' x = parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics' assert", "== 'tag' assert x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name'", "assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file'", "None assert x.digest is None assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image' assert", "'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path", "= parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name'", "== 'name' assert x.tag is None assert x.digest == 'sha256:abc123'", "== [range(0, 1), range(1, 2), range(2, 3), range(3, 3), range(3,", "test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert x.domain is None assert x.path", "assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3),", "url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file' assert", "x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev' assert x.path ==", "parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path == 'a/b/name' assert", "'animage' x = parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics' assert x.path", "parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io' assert x.path == 'hail-vdc/batch-worker' assert", "x = parse_docker_image_reference('hailgenetics/animage') assert x.domain == 'hailgenetics' assert x.path ==", "range(3))) == [range(0, 1), range(1, 2), range(2, 3)] def test_partition_even_big():", "list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)] def", "'file') == 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme():", "x.digest == 'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x) ==", "assert x.digest is None assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image' assert str(x)", "assert x.path == 'animage' assert x.tag is None assert x.digest", "5), range(5, 9)] def test_partition_toofew(): assert list(partition(6, range(3))) == [range(0,", "assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain ==", "x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312') assert x.domain == 'gcr.io' assert x.path ==", "== 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain == 'us-docker.pkg.dev' assert", "is None assert x.path == 'name' assert x.tag is None", "'/path/to/file' assert url_join('/path/to/', 'file') == '/path/to/file' assert url_join('/path/to/', '/absolute/file') ==", "2), range(2, 3), range(3, 3), range(3, 3), range(3, 3)] def", "== 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000' assert", "== 'hailgenetics' assert x.path == 'animage' assert x.tag is None", "assert x.name() == 'name' assert str(x) == 'name@sha256:abc123' x =", "x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123') assert x.domain == 'localhost:5000' assert x.path ==", "is None assert x.digest == 'sha256:abc123' assert x.name() == 'name'", "str(x) == 'localhost:5000/a/b/name@sha256:abc123' x = parse_docker_image_reference('name@sha256:abc123') assert x.domain is None", "assert url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to') == '' def test_url_and_params():", "x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000' assert x.path ==", "{}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo',", "== 'my-project/my-repo/test-image' assert x.tag is None assert x.digest is None", "'sha256:abc123' assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x", "1), range(1, 2), range(2, 3)] def test_partition_even_big(): assert list(partition(3, range(9)))", "x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123' x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')", "{}) assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'}) def", "== 'file' assert url_basename('https://hail.is/path/to/file') == 'file' def test_url_join(): assert url_join('/path/to',", "None assert x.digest == 'sha256:abc123' assert x.name() == 'name' assert", "range(3, 3), range(3, 3)] def test_url_basename(): assert url_basename('/path/to/file') == 'file'", "range(3))) == [range(0, 1), range(1, 2), range(2, 3), range(3, 3),", "== 'https://hail.is/path/to/file' assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme(): assert", "'hailgenetics/animage' assert str(x) == 'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert x.domain", "is None assert x.digest is None assert x.name() == 'animage'", "x.domain is None assert x.path == 'name' assert x.tag is", "test_partition_uneven_big(): assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)] def", "'a/b/name' assert x.tag is None assert x.digest is None assert", "is None assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name:tag'", "is None assert x.name() == 'localhost:5000/a/b/name' assert str(x) == 'localhost:5000/a/b/name'", "== 'file' def test_url_join(): assert url_join('/path/to', 'file') == '/path/to/file' assert", "is None assert x.digest is None assert x.name() == 'hailgenetics/animage'", "test_partition_even_big(): assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6,", "assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert x.domain ==", "= parse_docker_image_reference('animage') assert x.domain is None assert x.path == 'animage'", "'localhost:5000' assert x.path == 'a/b/name' assert x.tag is None assert", "assert url_and_params('https://example.com/') == ('https://example.com/', {}) assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {})", "'gcr.io' assert x.path == 'hail-vdc/batch-worker' assert x.tag == '123fds312' assert", "url_scheme('/path/to') == '' def test_url_and_params(): assert url_and_params('https://example.com/') == ('https://example.com/', {})", "'/path/to/file' assert url_join('/path/to/', '/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to', 'file') ==", "url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0, []))", "= parse_docker_image_reference('name@sha256:abc123') assert x.domain is None assert x.path == 'name'", "'animage' assert x.tag is None assert x.digest is None assert", "== '/path/to/file' assert url_join('/path/to/', '/absolute/file') == '/absolute/file' assert url_join('https://hail.is/path/to', 'file')", "def test_parse_docker_image_reference(): x = parse_docker_image_reference('animage') assert x.domain is None assert", "assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)] def test_partition_toofew():", "assert x.tag == 'tag' assert x.digest is None assert x.name()", "(partition, url_basename, url_join, url_scheme, url_and_params, parse_docker_image_reference) def test_partition_zero_empty(): assert list(partition(0,", "'a/b/name' assert x.tag == 'tag' assert x.digest == 'sha256:abc123' assert", "'localhost:5000/a/b/name' x = parse_docker_image_reference('localhost:5000/a/b/name:tag') assert x.domain == 'localhost:5000' assert x.path", "None assert x.name() == 'hailgenetics/animage' assert str(x) == 'hailgenetics/animage' x", "== 'gcr.io/hail-vdc/batch-worker' assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312' x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image') assert", "[range(0, 1), range(1, 2), range(2, 3), range(3, 3), range(3, 3),", "url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https'", "'https://hail.is/absolute/file' def test_url_scheme(): assert url_scheme('https://hail.is/path/to') == 'https' assert url_scheme('/path/to') ==", "x.tag is None assert x.digest == 'sha256:abc123' assert x.name() ==", "str(x) == 'localhost:5000/animage' x = parse_docker_image_reference('localhost:5000/a/b/name') assert x.domain == 'localhost:5000'", "x.name() == 'name' assert str(x) == 'name@sha256:abc123' x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')", "'hailgenetics/animage' x = parse_docker_image_reference('localhost:5000/animage') assert x.domain == 'localhost:5000' assert x.path", "assert x.tag is None assert x.digest == 'sha256:abc123' assert x.name()" ]
[ "'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna,", "from . import views urlpatterns = [ path('home/', views.home, name", "path('home/', views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood,", "path,include from . import views urlpatterns = [ path('home/', views.home,", "path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood, name='join-hood'), path('leave_hood/<id>',", "= 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood,", "= 'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'),", "[ path('home/', views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'),", "= 'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'),", "= 'post'), path('posts/',views.viewPost, name = 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/',", "= 'bizna'), path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost, name = 'posts'),", "path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name", "'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood,", "name = 'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna, name =", "views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood, name='join-hood'), path('leave_hood/<id>', views.leave_neighbourhood,", "name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood, name='join-hood'), path('leave_hood/<id>', views.leave_neighbourhood, name='leave-hood'),", "path('posts/',views.viewPost, name = 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"),", "'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness,", "path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood, name", "'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post,", "urlpatterns = [ path('home/', views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood, name", "= 'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'),", "name = 'bizna'), path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost, name =", "path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost, name", "'bizna'), path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost, name = 'posts'), path('searchbizna/',", "path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness, name", "name = 'add_bizna'), path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name =", "views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood, name", "name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post, name =", "'post'), path('posts/',views.viewPost, name = 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood,", "django.urls import path,include from . import views urlpatterns = [", "import path,include from . import views urlpatterns = [ path('home/',", "path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna, name", "path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost, name = 'posts'), path('searchbizna/', views.searchBizna,", "import views urlpatterns = [ path('home/', views.home, name = 'home'),", "name = 'post'), path('posts/',views.viewPost, name = 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"),", "= 'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'),", "= 'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'),", "'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness, name = 'add_bizna'), path('bizna/',views.viewBizna,", "path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood, name='join-hood'), path('leave_hood/<id>', views.leave_neighbourhood, name='leave-hood'), ]", "<reponame>wadi-1000/Vicinity from django.urls import path,include from . import views urlpatterns", "name = 'viewhood'), path('hood/<int:pk>/',views.hood, name = 'hood'), path('add_bizna/',views.uploadBuisness, name =", "= 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post, name = 'post'),", "'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>', views.join_neighbourhood, name='join-hood'),", ". import views urlpatterns = [ path('home/', views.home, name =", "= [ path('home/', views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood, name =", "name = 'home'), path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'), path('viewhood/',views.viewHood, name =", "path('bizna/',views.viewBizna, name = 'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post, name", "name = 'posts'), path('searchbizna/', views.searchBizna, name=\"search_results\"), path('searchhood/', views.searchHood, name=\"search_res\"), path('join_hood/<id>',", "'view_bizna'), path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'), path('post/',views.create_post, name = 'post'), path('posts/',views.viewPost,", "name = 'add_hood'), path('viewhood/',views.viewHood, name = 'viewhood'), path('hood/<int:pk>/',views.hood, name =", "views urlpatterns = [ path('home/', views.home, name = 'home'), path('add_hood/',views.uploadNeighbourhood,", "from django.urls import path,include from . import views urlpatterns =" ]
[ "as punctuation, spaces, etc). - the corresponding string This is", "to break texts in lines and tokens (aka. words) with", "a unicode query text. \"\"\" if not text: return []", "of ngrams of length `ngram_length` given an iterable. Each ngram", "u'text', u'with', u'spaces'] Unbalanced templates are handled correctly: >>> list(rule_tokenizer('{{}some", "(3, 4)] >>> list(ngrams([1,2,3], 2)) [(1, 2), (2, 3)] >>>", "to the probabilistic properties of Rabin fingerprints the probability that", "[1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4), (4, 5)]", "software code scanning tool from nexB Inc. and others. #", "-*- # # Copyright (c) 2017 nexB Inc. and others.", "text. \"\"\" if not text: return [] text = lower", "data created with ScanCode or any ScanCode # derivative work,", "not text: return [] text = lower and text.lower() or", "import print_function from __future__ import unicode_literals from itertools import islice", "word. # Keeping the trailing + is important for licenses", "import islice from itertools import izip import re from zlib", "enumerate(ngrams): # FIXME: use a proper hash nghs = [crc32(str(ng))", "first fingerprints every token and then selects a shingle s", "nghs = [crc32(str(ng)) for ng in ngram] min_hash = min(nghs)", "# we could instead get lines and tokens at once", "for templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _", "3), (2, 6, 1), (7, 3, 4)], with_pos=True)) [(0, (2,", "(7, 3, 4))] This works also from a generator: >>>", "others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The", "to compute ngrams. For example: >>> list(ngrams([1,2,3,4,5], 2)) [(1, 2),", "and non-tokens from a unicode query text keeping everything (including", "This splitter yields plain token strings or double braces-enclosed strings", "shingle are different. For example: >>> list(select_ngrams([(2, 1, 3), (1,", "4)])) [(2, 1, 3), (1, 1, 3), (2, 6, 1),", "to reconstruct the matched query text accurately. \"\"\" if not", "advice. Consult an Attorney # for any legal advice. #", "are handled and skipped for templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text", "yield ngram last = ngram else: # always yield the", "generated with ScanCode require an acknowledgment. # ScanCode is a", "(such as punctuation, spaces, etc). - the corresponding string This", "s if the minimum fingerprint value of all k tokens", "3), (3, 4)] >>> list(ngrams([1,2,3], 2)) [(1, 2), (2, 3)]", "For example: >>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text with spAces!", ">>> list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2, 3), (3, 4), (4,", ">>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3),", "token or False if this is not (such as punctuation,", "1, 3), (2, 6, 1), (7, 3, 4)] \"\"\" last", "== min_hash or nghs[-1] == min_hash: yield ngram last =", "for x in [(2, 1, 3), (1, 1, 3), (5,", "1), (7, 3, 4)])) [(2, 1, 3), (1, 1, 3),", "free software code scanning tool from nexB Inc. and others.", "(2, 3), (3, 4)] >>> list(ngrams([1,2,3], 2)) [(1, 2), (2,", "# http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed", "a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required", "3, 4), (2, 3, 4, 5)] >>> list(ngrams([1,2,3,4], 2)) [(1,", "# under the License is distributed on an \"AS IS\"", "if the minimum fingerprint value of all k tokens in", "re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens", "return (token for token in tokens if token and not", "we could instead get lines and tokens at once in", "token in tokens if token and not token.startswith('{{')) def ngrams(iterable,", "in the shingle are different. For example: >>> list(select_ngrams([(2, 1,", "token or punct: yield (True, token) if token else (False,", "a word. # Keeping the trailing + is important for", "+ ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return", "template part is anything enclosed in double braces template_pattern =", "WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express", "token and then selects a shingle s if the minimum", "and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the", "contains less than `ngram_length` items. Note: this is a fairly", "plain token strings or double braces-enclosed strings # {{something}} for", "3), (3, 4), (4, 5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1, 2,", "https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import from", "such as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall", "return [] text = lower and text.lower() or text return", "under the License. # # When you publish or redistribute", "6, 1), (7, 3, 4)])) [(2, 1, 3), (1, 1,", "ANY KIND, either express or implied. See the License for", "# and + in the middle or end of a", "copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by", "matched_query_text_tokenizer(text): \"\"\" Return an iterable of tokens and non-tokens from", "(7, 3, 4)])) [(2, 1, 3), (1, 1, 3), (2,", "accurately. \"\"\" if not text: return for match in tokens_and_non_tokens(text):", "text: return [] text = lower and text.lower() or text", ">>> list(ngrams([1,2,3,4], 2)) [(1, 2), (2, 3), (3, 4)] >>>", "(7, 3, 4)], with_pos=True)) [(0, (2, 1, 3)), (1, (1,", "strip: keepends = False else: keepends = True lines =", "of tokens and non-tokens from a unicode query text keeping", "iterable of tokens from a unicode query text. \"\"\" if", "See the License for the # specific language governing permissions", "a file at `location` or a `query string`. Include empty", "word_splitter(text) if token) # Alternate pattern used for matched text", "u'spaces'] Unbalanced templates are handled correctly: >>> list(rule_tokenizer('{{}some }}Text with", "WARRANTIES OR # CONDITIONS OF ANY KIND, either express or", "You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0", "= mgd.get('punct') if token or punct: yield (True, token) if", "(2, 3), (3, 4), (4, 5)] \"\"\" return izip(*(islice(iterable, i,", "izip(*(islice(iterable, i, None) for i in range(ngram_length))) def select_ngrams(ngrams, with_pos=False):", "require an acknowledgment. # ScanCode is a trademark of nexB", "work, you must accompany this data with the following acknowledgment:", "template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\"", "License. # You may obtain a copy of the License", "(1, (1, 1, 3)), (3, (2, 6, 1)), (4, (7,", "s occurs at the first or the last position of", "u'text', u'with', u'spaces'] \"\"\" if not text: return [] text", "Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download.", "yield ngram last = ngram if last != ngram: yield", "returned: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1,", "2), (2, 3), (3, 4)] >>> list(ngrams([1,2,3], 2)) [(1, 2),", "publish or redistribute any data created with ScanCode or any", "You may not use this software except in compliance with", "islice from itertools import izip import re from zlib import", "3)), (1, (1, 1, 3)), (3, (2, 6, 1)), (4,", "use this software except in compliance with the License. #", "under the License is distributed on an \"AS IS\" BASIS,", "works with arrays or tuples: >>> from array import array", "(3, 4), (4, 5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3,", "an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF", "tokens and non-tokens from a unicode query text keeping everything", "token else (False, punct) # Template-aware splitter, keeping a templated", "middle or end of a word. # Keeping the trailing", "braces-enclosed strings # {{something}} for templates. curly barces are otherwise", "3), (3, 4), (4, 5)] \"\"\" return izip(*(islice(iterable, i, None)", "For example: >>> list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2, 3), (3,", "3, 4)], with_pos=True)) [(0, (2, 1, 3)), (1, (1, 1,", "of (pos, ngram) are returned: >>> list(select_ngrams([(2, 1, 3), (1,", "}}Text with spAces! + _ -')) [u'some', u'text', u'with', u'spaces']", "for line in lines: if strip: yield line.strip() else: yield", "re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\" Return an iterable of", "+ _ -')) [u'some', u'text', u'with', u'spaces'] Templates are handled", "if i == 0: yield ngram last = ngram if", "skipped for templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! +", "sequence. Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints", "# always yield the first or last ngram too. if", "\"\"\" # TODO: OPTIMIZE: tokenizing line by line may be", "in word_splitter(text) if token) # Alternate pattern used for matched", "re from zlib import crc32 from textcode.analysis import text_lines \"\"\"", "the first or last ngram too. if i == 0:", "(query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text, lower=True):", "else: yield line # Split on whitespace and punctuations: keep", "the probability that a shingle is chosen is 2/k if", "list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2,", "query text. \"\"\" if not text: return [] text =", "line # Split on whitespace and punctuations: keep only characters", "None for i, ngram in enumerate(ngrams): # FIXME: use a", "in lines and tokens (aka. words) with specialized version for", "+ ')' + '|' + '(?P<punct>' + not_query_pattern + ')'", "occurs at the first or the last position of s", "3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]", "if the string is a text token or False if", "textcode.analysis import text_lines \"\"\" Utilities to break texts in lines", "also in between). Due to the probabilistic properties of Rabin", "implied. See the License for the # specific language governing", "ng in ngram] min_hash = min(nghs) if with_pos: ngram =", "otherwise treated as punctuation. # A template part is anything", "Note: this is a fairly arcane but optimized way to", "list(ngrams([1,2,3,4], 2)) [(1, 2), (2, 3), (3, 4)] >>> list(ngrams([1,2,3],", "')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return an", "any ScanCode # derivative work, you must accompany this data", "\"\"\" Return an iterable of tokens and non-tokens from a", "created with ScanCode or any ScanCode # derivative work, you", "4)] \"\"\" last = None for i, ngram in enumerate(ngrams):", "= lower and text.lower() or text return (token for token", "agreed to in writing, software distributed # under the License", "from # ScanCode should be considered or used as legal", "always yield the first or last ngram too. if i", "def rule_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens from", "on whitespace and punctuations: keep only characters # and +", "in compliance with the License. # You may obtain a", "part {{anything}} as a token. # This splitter yields plain", ">>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')) [u'some', u'text',", "ngrams of length `ngram_length` given an iterable. Each ngram is", "# CONDITIONS OF ANY KIND, either express or implied. See", "4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2, 3),", "Positions can also be included. In this case, tuple of", "following acknowledgment: # # Generated with ScanCode and provided on", "trailing + is important for licenses name such as GPL2+", "if all tokens in the shingle are different. For example:", "All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode", "generator: >>> list(select_ngrams(x for x in [(2, 1, 3), (1,", "for matched text collection not_query_pattern = '[\\W_+]+[\\W_]?' # collect tokens", "text keeping everything (including punctuations, line endings, etc.) The returned", "(3, 4), (4, 5)] \"\"\" return izip(*(islice(iterable, i, None) for", "must accompany this data with the following acknowledgment: # #", "minimum fingerprint value of all k tokens in s occurs", "This works also from a generator: >>> list(select_ngrams(x for x", "if token) # Alternate pattern used for matched text collection", "support and download. from __future__ import absolute_import from __future__ import", "at once in a batch? lines = [] if location:", "<reponame>chetanya-shrimali/scancode-toolkit<filename>src/licensedcode/tokenize.py # -*- coding: utf-8 -*- # # Copyright (c)", "whitespace and punctuations: keep only characters # and + in", "'%s|%s+' % (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall def", "parts, including leading and trailing templated parts. For example: >>>", "Copyright (c) 2017 nexB Inc. and others. All rights reserved.", "either express or implied. See the License for the #", "handled and skipped for templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}}", "given a file at `location` or a `query string`. Include", "izip import re from zlib import crc32 from textcode.analysis import", "def select_ngrams(ngrams, with_pos=False): \"\"\" Return an iterable as a subset", "permissions and limitations under the License. # # When you", "ngram last = ngram else: # always yield the first", "or text tokens = template_splitter(text) # skip templates return (token", "tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return an iterable", "specific language governing permissions and limitations under the License. #", "(2, 3), (3, 4), (4, 5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1,", "WARRANTIES # OR CONDITIONS OF ANY KIND, either express or", "text token or False if this is not (such as", "tokens in the shingle are different. For example: >>> list(select_ngrams([(2,", "part is anything enclosed in double braces template_pattern = '\\{\\{[^{}]*\\}\\}'", "an Attorney # for any legal advice. # ScanCode is", "and then selects a shingle s if the minimum fingerprint", "and limitations under the License. # # When you publish", "limitations under the License. # # When you publish or", "get lines and tokens at once in a batch? lines", "return for match in tokens_and_non_tokens(text): if not match: continue mgd", "is 2/k if all tokens in the shingle are different.", "1, 3)), (1, (1, 1, 3)), (3, (2, 6, 1)),", "also include the starting position for the ngram in the", "(7, 3, 4)] \"\"\" last = None for i, ngram", "coding: utf-8 -*- # # Copyright (c) 2017 nexB Inc.", "to in writing, software distributed # under the License is", "+ _ -{{}}')) [u'some', u'text', u'with', u'spaces'] \"\"\" if not", "scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/", "the minimum fingerprint value of all k tokens in s", "any legal advice. # ScanCode is a free software code", "queries and rules texts. \"\"\" def query_lines(location=None, query_string=None, strip=True): \"\"\"", "are handled correctly: >>> list(rule_tokenizer('{{}some }}Text with spAces! + _", "6, 1), (7, 3, 4)], with_pos=True)) [(0, (2, 1, 3)),", "lines = text_lines(location, demarkup=False) elif query_string: if strip: keepends =", "of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "for match in tokens_and_non_tokens(text): if not match: continue mgd =", "__future__ import absolute_import from __future__ import print_function from __future__ import", "https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache", "select_ngrams(ngrams, with_pos=False): \"\"\" Return an iterable as a subset of", "are different. For example: >>> list(select_ngrams([(2, 1, 3), (1, 1,", "i, None) for i in range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\"", "if nghs[0] == min_hash or nghs[-1] == min_hash: yield ngram", "lines and tokens at once in a batch? lines =", "or nghs[-1] == min_hash: yield ngram last = ngram else:", "lines = query_string.splitlines(keepends) for line in lines: if strip: yield", "non-tokens from a unicode query text keeping everything (including punctuations,", "3), (2, 6, 1), (7, 3, 4)] Positions can also", "lines: if strip: yield line.strip() else: yield line # Split", "name such as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern,", "words) with specialized version for queries and rules texts. \"\"\"", "yield line # Split on whitespace and punctuations: keep only", "corresponding string This is used to reconstruct the matched query", "version 2.0. # Data generated with ScanCode require an acknowledgment.", "Data generated with ScanCode require an acknowledgment. # ScanCode is", "list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text with spAces! + _ -'))", "also be included. In this case, tuple of (pos, ngram)", "returned iterable is empty if the input iterable contains less", "2)) [(1, 2), (2, 3), (3, 4)] >>> list(ngrams([1,2,3], 2))", "with ScanCode or any ScanCode # derivative work, you must", "the License. # You may obtain a copy of the", "this is not (such as punctuation, spaces, etc). - the", "under the Apache License version 2.0. # Data generated with", "or last ngram too. if i == 0: yield ngram", "with specialized version for queries and rules texts. \"\"\" def", "array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3,", "or double braces-enclosed strings # {{something}} for templates. curly barces", "barces are otherwise treated as punctuation. # A template part", "# Keeping the trailing + is important for licenses name", "k tokens in s occurs at the first or the", "of a word. # Keeping the trailing + is important", "way to compute ngrams. For example: >>> list(ngrams([1,2,3,4,5], 2)) [(1,", "= False else: keepends = True lines = query_string.splitlines(keepends) for", "everything (including punctuations, line endings, etc.) The returned iterable contains", "(2, 3), (3, 4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1,", "i == 0: yield ngram last = ngram if last", "and + in the middle or end of a word.", "may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 #", "required by applicable law or agreed to in writing, software", "[] >>> list(rule_tokenizer('some Text with spAces! + _ -')) [u'some',", "ngrams(iterable, ngram_length): \"\"\" Return an iterable of ngrams of length", "'[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\" Return", "Return an iterable of tokens from a unicode rule text,", "# Generated with ScanCode and provided on an \"AS IS\"", "re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return an iterable of tokens and", "`with_pos` is True also include the starting position for the", "1, 3), (2, 6, 1), (7, 3, 4)] Positions can", "5)] \"\"\" return izip(*(islice(iterable, i, None) for i in range(ngram_length)))", "not match: continue mgd = match.groupdict() token = mgd.get('token') punct", "with the License. # You may obtain a copy of", "ANY KIND, either express or implied. No content created from", "True also include the starting position for the ngram in", "ngram else: # always yield the first or last ngram", "writing, software distributed # under the License is distributed on", "GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text,", "The returned iterable contains 2-tuples of: - True if the", "applicable law or agreed to in writing, software distributed #", "+ is important for licenses name such as GPL2+ query_pattern", "unicode rule text, skipping templated parts, including leading and trailing", "obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless", "# derivative work, you must accompany this data with the", "\"\"\" def query_lines(location=None, query_string=None, strip=True): \"\"\" Return an iterable of", "Return an iterable of tokens from a unicode query text.", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY", "line endings, etc.) The returned iterable contains 2-tuples of: -", "-')) [u'some', u'text', u'with', u'spaces'] Unbalanced templates are handled correctly:", "this is a fairly arcane but optimized way to compute", "with arrays or tuples: >>> from array import array >>>", "less than `ngram_length` items. Note: this is a fairly arcane", "- the corresponding string This is used to reconstruct the", "text, skipping templated parts, including leading and trailing templated parts.", "from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support", "tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for", "the middle or end of a word. # Keeping the", "compliance with the License. # You may obtain a copy", "Split on whitespace and punctuations: keep only characters # and", "Unbalanced templates are handled correctly: >>> list(rule_tokenizer('{{}some }}Text with spAces!", "an iterable of tokens from a unicode rule text, skipping", "= lower and text.lower() or text tokens = template_splitter(text) #", "# The ScanCode software is licensed under the Apache License", "if this is not (such as punctuation, spaces, etc). -", "Include empty lines. \"\"\" # TODO: OPTIMIZE: tokenizing line by", "are otherwise treated as punctuation. # A template part is", "text return (token for token in word_splitter(text) if token) #", "acknowledgment: # # Generated with ScanCode and provided on an", "strip=True): \"\"\" Return an iterable of text lines given a", "(token for token in word_splitter(text) if token) # Alternate pattern", "# This splitter yields plain token strings or double braces-enclosed", "2)) [] This also works with arrays or tuples: >>>", "the first or the last position of s (and potentially", "ngram in enumerate(ngrams): # FIXME: use a proper hash nghs", "u'spaces'] Templates are handled and skipped for templated sequences: >>>", "punctuations, line endings, etc.) The returned iterable contains 2-tuples of:", "# skip templates return (token for token in tokens if", "iterable contains less than `ngram_length` items. Note: this is a", "as a subset of a sequence of ngrams using the", "3, 4))] This works also from a generator: >>> list(select_ngrams(x", "etc). - the corresponding string This is used to reconstruct", "CONDITIONS OF ANY KIND, either express or implied. See the", "= re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\" Return an", "if with_pos: ngram = (i, ngram,) if nghs[0] == min_hash", "mgd = match.groupdict() token = mgd.get('token') punct = mgd.get('punct') if", "curly barces are otherwise treated as punctuation. # A template", "query_string=None, strip=True): \"\"\" Return an iterable of text lines given", "except in compliance with the License. # You may obtain", "trailing templated parts. For example: >>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some", "not text: return for match in tokens_and_non_tokens(text): if not match:", "this data with the following acknowledgment: # # Generated with", "with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')) [u'some', u'text', u'with', u'spaces'] \"\"\"", "text tokens = template_splitter(text) # skip templates return (token for", "skip templates return (token for token in tokens if token", "2), (2, 3), (3, 4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2))", "a subset of a sequence of ngrams using the hailstorm", "of s (and potentially also in between). Due to the", "1, 3), (2, 6, 1), (7, 3, 4)])) [(2, 1,", "that a shingle is chosen is 2/k if all tokens", "text.lower() or text return (token for token in word_splitter(text) if", "2/k if all tokens in the shingle are different. For", "tokens at once in a batch? lines = [] if", "fingerprint value of all k tokens in s occurs at", "u'with', u'spaces'] \"\"\" if not text: return [] text =", "contains 2-tuples of: - True if the string is a", "if location: lines = text_lines(location, demarkup=False) elif query_string: if strip:", "= text_lines(location, demarkup=False) elif query_string: if strip: keepends = False", "[u'some', u'text', u'with', u'spaces'] Templates are handled and skipped for", "ScanCode require an acknowledgment. # ScanCode is a trademark of", "every token and then selects a shingle s if the", "shingle s if the minimum fingerprint value of all k", "spaces, etc). - the corresponding string This is used to", "2)) [(1, 2), (2, 3)] >>> list(ngrams([1,2], 2)) [(1, 2)]", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "collect tokens and non-token texts in two different groups _text_capture_pattern", "{{junk}}spAces! + _ -{{}}')) [u'some', u'text', u'with', u'spaces'] \"\"\" if", "This also works with arrays or tuples: >>> from array", "the last position of s (and potentially also in between).", "query_string: if strip: keepends = False else: keepends = True", ">>> from array import array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1,", "rule_pattern = '%s|%s+' % (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern ,", "re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return an iterable of tokens", "should be considered or used as legal advice. Consult an", "keepends = True lines = query_string.splitlines(keepends) for line in lines:", "\"\"\" Return an iterable of tokens from a unicode query", "as punctuation. # A template part is anything enclosed in", "the probabilistic properties of Rabin fingerprints the probability that a", "text = lower and text.lower() or text tokens = template_splitter(text)", "example: >>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text with spAces! +", "# You may obtain a copy of the License at:", "\"\"\" Utilities to break texts in lines and tokens (aka.", "splitter, keeping a templated part {{anything}} as a token. #", "import crc32 from textcode.analysis import text_lines \"\"\" Utilities to break", "included. In this case, tuple of (pos, ngram) are returned:", "line by line may be rather slow # we could", "from zlib import crc32 from textcode.analysis import text_lines \"\"\" Utilities", "http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under", "nexB Inc. and others. All rights reserved. # http://nexb.com and", "strip: yield line.strip() else: yield line # Split on whitespace", "A template part is anything enclosed in double braces template_pattern", "ngram,) if nghs[0] == min_hash or nghs[-1] == min_hash: yield", "templates are handled correctly: >>> list(rule_tokenizer('{{}some }}Text with spAces! +", "lines given a file at `location` or a `query string`.", "too. if i == 0: yield ngram last = ngram", "(5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))", "may be rather slow # we could instead get lines", "crc32 from textcode.analysis import text_lines \"\"\" Utilities to break texts", "line may be rather slow # we could instead get", "or implied. No content created from # ScanCode should be", "5)] >>> list(ngrams([1,2,3,4], 2)) [(1, 2), (2, 3), (3, 4)]", "the starting position for the ngram in the original sequence.", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS", "line in lines: if strip: yield line.strip() else: yield line", "empty if the input iterable contains less than `ngram_length` items.", "position for the ngram in the original sequence. Definition from", "absolute_import from __future__ import print_function from __future__ import unicode_literals from", "texts in lines and tokens (aka. words) with specialized version", "[(0, (2, 1, 3)), (1, (1, 1, 3)), (3, (2,", "ngram] min_hash = min(nghs) if with_pos: ngram = (i, ngram,)", "Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints every", "templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))", "is important for licenses name such as GPL2+ query_pattern =", "express or implied. No content created from # ScanCode should", "6, 1), (7, 3, 4)] Positions can also be included.", "splitter yields plain token strings or double braces-enclosed strings #", "the Apache License version 2.0. # Data generated with ScanCode", "software except in compliance with the License. # You may", "non-token texts in two different groups _text_capture_pattern = '(?P<token>' +", "is a text token or False if this is not", "given an iterable. Each ngram is a tuple of ngram_length", "elif query_string: if strip: keepends = False else: keepends =", "from the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints every token", "else (False, punct) # Template-aware splitter, keeping a templated part", "double braces-enclosed strings # {{something}} for templates. curly barces are", "templated parts, including leading and trailing templated parts. For example:", "any data created with ScanCode or any ScanCode # derivative", "considered or used as legal advice. Consult an Attorney #", ">>> list(ngrams([1,2,3], 2)) [(1, 2), (2, 3)] >>> list(ngrams([1,2], 2))", "# Data generated with ScanCode require an acknowledgment. # ScanCode", "unicode query text. \"\"\" if not text: return [] text", "= '%s|%s+' % (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall", "the following acknowledgment: # # Generated with ScanCode and provided", "this software except in compliance with the License. # You", "or text return (token for token in word_splitter(text) if token)", "for licenses name such as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter", "but optimized way to compute ngrams. For example: >>> list(ngrams([1,2,3,4,5],", "else: # always yield the first or last ngram too.", "yields plain token strings or double braces-enclosed strings # {{something}}", "for i, ngram in enumerate(ngrams): # FIXME: use a proper", "- True if the string is a text token or", "fingerprints every token and then selects a shingle s if", "for token in word_splitter(text) if token) # Alternate pattern used", "last = ngram else: # always yield the first or", "chosen is 2/k if all tokens in the shingle are", "with spAces! + _ -')) [u'some', u'text', u'with', u'spaces'] Unbalanced", "a tuple of ngram_length items. The returned iterable is empty", "list(ngrams([1,2,3], 2)) [(1, 2), (2, 3)] >>> list(ngrams([1,2], 2)) [(1,", "selects a shingle s if the minimum fingerprint value of", "in between). Due to the probabilistic properties of Rabin fingerprints", "1)), (4, (7, 3, 4))] This works also from a", "lower and text.lower() or text return (token for token in", "utf-8 -*- # # Copyright (c) 2017 nexB Inc. and", "The ScanCode software is licensed under the Apache License version", "{{anything}} as a token. # This splitter yields plain token", "iterable of text lines given a file at `location` or", "slow # we could instead get lines and tokens at", "a batch? lines = [] if location: lines = text_lines(location,", "a sequence of ngrams using the hailstorm algorithm. If `with_pos`", "of all k tokens in s occurs at the first", "min_hash = min(nghs) if with_pos: ngram = (i, ngram,) if", "or False if this is not (such as punctuation, spaces,", "arcane but optimized way to compute ngrams. For example: >>>", "list(rule_tokenizer('{{}some }}Text with spAces! + _ -')) [u'some', u'text', u'with',", "unicode_literals from itertools import islice from itertools import izip import", "version for queries and rules texts. \"\"\" def query_lines(location=None, query_string=None,", "\"\"\" Return an iterable of text lines given a file", "[u'some', u'text', u'with', u'spaces'] Unbalanced templates are handled correctly: >>>", "in range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\" Return an iterable as", "None) for i in range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\" Return", "lines. \"\"\" # TODO: OPTIMIZE: tokenizing line by line may", "not use this software except in compliance with the License.", "starting position for the ngram in the original sequence. Definition", "in s occurs at the first or the last position", "batch? lines = [] if location: lines = text_lines(location, demarkup=False)", "{{something}} for templates. curly barces are otherwise treated as punctuation.", "a shingle is chosen is 2/k if all tokens in", "input iterable contains less than `ngram_length` items. Note: this is", "False if this is not (such as punctuation, spaces, etc).", "not token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\" Return an iterable of", "length `ngram_length` given an iterable. Each ngram is a tuple", "strings or double braces-enclosed strings # {{something}} for templates. curly", "with_pos=False): \"\"\" Return an iterable as a subset of a", "break texts in lines and tokens (aka. words) with specialized", "KIND, either express or implied. See the License for the", "and rules texts. \"\"\" def query_lines(location=None, query_string=None, strip=True): \"\"\" Return", "of a sequence of ngrams using the hailstorm algorithm. If", "2), (2, 3)] >>> list(ngrams([1,2], 2)) [(1, 2)] >>> list(ngrams([1],", "u'with', u'spaces'] Templates are handled and skipped for templated sequences:", "Templates are handled and skipped for templated sequences: >>> list(rule_tokenizer('{{Hi}}some", "skipping templated parts, including leading and trailing templated parts. For", "text accurately. \"\"\" if not text: return for match in", "yield (True, token) if token else (False, punct) # Template-aware", "once in a batch? lines = [] if location: lines", "tokens from a unicode query text. \"\"\" if not text:", "u'with', u'spaces'] Unbalanced templates are handled correctly: >>> list(rule_tokenizer('{{}some }}Text", "[(1, 2), (2, 3), (3, 4), (4, 5)] >>> list(ngrams([1,2,3,4,5],", "in tokens_and_non_tokens(text): if not match: continue mgd = match.groupdict() token", "a token. # This splitter yields plain token strings or", "not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\"", "ngram = (i, ngram,) if nghs[0] == min_hash or nghs[-1]", "hash nghs = [crc32(str(ng)) for ng in ngram] min_hash =", "or implied. See the License for the # specific language", "[] text = lower and text.lower() or text return (token", "express or implied. See the License for the # specific", "yield line.strip() else: yield line # Split on whitespace and", "-{{}}')) [u'some', u'text', u'with', u'spaces'] \"\"\" if not text: return", "query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True):", "probabilistic properties of Rabin fingerprints the probability that a shingle", "or any ScanCode # derivative work, you must accompany this", "a unicode rule text, skipping templated parts, including leading and", "2.0. # Data generated with ScanCode require an acknowledgment. #", "line.strip() else: yield line # Split on whitespace and punctuations:", "Keeping the trailing + is important for licenses name such", "# You may not use this software except in compliance", "then selects a shingle s if the minimum fingerprint value", "for any legal advice. # ScanCode is a free software", "+ _ -')) [u'some', u'text', u'with', u'spaces'] Unbalanced templates are", "iterable as a subset of a sequence of ngrams using", "min_hash or nghs[-1] == min_hash: yield ngram last = ngram", "text_lines \"\"\" Utilities to break texts in lines and tokens", "tokens from a unicode rule text, skipping templated parts, including", "paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints every token and then", "4, 5)] >>> list(ngrams([1,2,3,4], 2)) [(1, 2), (2, 3), (3,", "and punctuations: keep only characters # and + in the", "tokens if token and not token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\"", "used for matched text collection not_query_pattern = '[\\W_+]+[\\W_]?' # collect", "(2, 1, 3)), (1, (1, 1, 3)), (3, (2, 6,", "of nexB Inc. # # You may not use this", "if not match: continue mgd = match.groupdict() token = mgd.get('token')", ">>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1,", "min_hash: yield ngram last = ngram else: # always yield", "demarkup=False) elif query_string: if strip: keepends = False else: keepends", "return [] text = lower and text.lower() or text tokens", "sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')) [u'some',", "with_pos=True)) [(0, (2, 1, 3)), (1, (1, 1, 3)), (3,", "the corresponding string This is used to reconstruct the matched", "import array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2), (2, 3),", "or agreed to in writing, software distributed # under the", "last = None for i, ngram in enumerate(ngrams): # FIXME:", "ngram too. if i == 0: yield ngram last =", "text = lower and text.lower() or text return (token for", "punctuation. # A template part is anything enclosed in double", "(token for token in tokens if token and not token.startswith('{{'))", "token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\" Return an iterable of ngrams", "created from # ScanCode should be considered or used as", "\"\"\" if not text: return [] text = lower and", "the License. # # When you publish or redistribute any", "\"\"\" Return an iterable of ngrams of length `ngram_length` given", "# Alternate pattern used for matched text collection not_query_pattern =", "value of all k tokens in s occurs at the", "else: keepends = True lines = query_string.splitlines(keepends) for line in", "ngram last = ngram if last != ngram: yield ngram", "iterable of tokens and non-tokens from a unicode query text", "list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4), (4, 5)]", "http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints every token and then selects", "is True also include the starting position for the ngram", "Attorney # for any legal advice. # ScanCode is a", "(7, 3, 4)] Positions can also be included. In this", "keepends = False else: keepends = True lines = query_string.splitlines(keepends)", "Alternate pattern used for matched text collection not_query_pattern = '[\\W_+]+[\\W_]?'", "with ScanCode and provided on an \"AS IS\" BASIS, WITHOUT", "# # You may not use this software except in", "+ '|' + '(?P<punct>' + not_query_pattern + ')' tokens_and_non_tokens =", "used to reconstruct the matched query text accurately. \"\"\" if", "the hailstorm algorithm. If `with_pos` is True also include the", "from itertools import izip import re from zlib import crc32", "+ '(?P<punct>' + not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer", "or the last position of s (and potentially also in", "(3, (2, 6, 1)), (4, (7, 3, 4))] This works", "= match.groupdict() token = mgd.get('token') punct = mgd.get('punct') if token", "in two different groups _text_capture_pattern = '(?P<token>' + query_pattern +", "Return an iterable as a subset of a sequence of", "re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\" Return an iterable", "is chosen is 2/k if all tokens in the shingle", "last ngram too. if i == 0: yield ngram last", "range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\" Return an iterable as a", "OR CONDITIONS OF ANY KIND, either express or implied. No", "[] This also works with arrays or tuples: >>> from", "6, 1)), (4, (7, 3, 4))] This works also from", "you publish or redistribute any data created with ScanCode or", "rules texts. \"\"\" def query_lines(location=None, query_string=None, strip=True): \"\"\" Return an", "the string is a text token or False if this", "string is a text token or False if this is", "\"\"\" Return an iterable of tokens from a unicode rule", "spAces! + _ -')) [u'some', u'text', u'with', u'spaces'] Unbalanced templates", "# ScanCode is a free software code scanning tool from", "License for the # specific language governing permissions and limitations", "list(rule_tokenizer('some Text with spAces! + _ -')) [u'some', u'text', u'with',", "texts. \"\"\" def query_lines(location=None, query_string=None, strip=True): \"\"\" Return an iterable", "Text with spAces! + _ -')) [u'some', u'text', u'with', u'spaces']", "= ngram else: # always yield the first or last", "(2, 6, 1), (7, 3, 4)])) [(2, 1, 3), (1,", "zlib import crc32 from textcode.analysis import text_lines \"\"\" Utilities to", "2)) [(1, 2)] >>> list(ngrams([1], 2)) [] This also works", "iterable of ngrams of length `ngram_length` given an iterable. Each", "# ScanCode should be considered or used as legal advice.", "a unicode query text keeping everything (including punctuations, line endings,", "trademark of nexB Inc. # # You may not use", "The returned iterable is empty if the input iterable contains", "at the first or the last position of s (and", "import re from zlib import crc32 from textcode.analysis import text_lines", "import text_lines \"\"\" Utilities to break texts in lines and", "4), (4, 5)] \"\"\" return izip(*(islice(iterable, i, None) for i", "2, 3, 4), (2, 3, 4, 5)] >>> list(ngrams([1,2,3,4], 2))", "is not (such as punctuation, spaces, etc). - the corresponding", "(i, ngram,) if nghs[0] == min_hash or nghs[-1] == min_hash:", "ScanCode should be considered or used as legal advice. Consult", "collection not_query_pattern = '[\\W_+]+[\\W_]?' # collect tokens and non-token texts", "Return an iterable of tokens and non-tokens from a unicode", "rule text, skipping templated parts, including leading and trailing templated", "if strip: keepends = False else: keepends = True lines", "punct: yield (True, token) if token else (False, punct) #", "items. Note: this is a fairly arcane but optimized way", "[(1, 2), (2, 3), (3, 4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]),", "and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ #", "list(ngrams([1,2], 2)) [(1, 2)] >>> list(ngrams([1], 2)) [] This also", "(2, 6, 1), (7, 3, 4)] \"\"\" last = None", "token = mgd.get('token') punct = mgd.get('punct') if token or punct:", "than `ngram_length` items. Note: this is a fairly arcane but", "3), (1, 1, 3), (5, 1, 3), (2, 6, 1),", "(including punctuations, line endings, etc.) The returned iterable contains 2-tuples", "template_splitter = re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\" Return", "re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens", "[(1, 2)] >>> list(ngrams([1], 2)) [] This also works with", "print_function from __future__ import unicode_literals from itertools import islice from", ", re.UNICODE).findall def rule_tokenizer(text, lower=True): \"\"\" Return an iterable of", "import unicode_literals from itertools import islice from itertools import izip", "_text_capture_pattern = '(?P<token>' + query_pattern + ')' + '|' +", "import absolute_import from __future__ import print_function from __future__ import unicode_literals", "query_string.splitlines(keepends) for line in lines: if strip: yield line.strip() else:", "from __future__ import absolute_import from __future__ import print_function from __future__", "= min(nghs) if with_pos: ngram = (i, ngram,) if nghs[0]", "return izip(*(islice(iterable, i, None) for i in range(ngram_length))) def select_ngrams(ngrams,", "When you publish or redistribute any data created with ScanCode", "4)) [(1, 2, 3, 4), (2, 3, 4, 5)] >>>", "with the following acknowledgment: # # Generated with ScanCode and", "KIND, either express or implied. No content created from #", ">>> list(ngrams([1,2], 2)) [(1, 2)] >>> list(ngrams([1], 2)) [] This", "`ngram_length` items. Note: this is a fairly arcane but optimized", "[(1, 2), (2, 3), (3, 4)] >>> list(ngrams([1,2,3], 2)) [(1,", "last position of s (and potentially also in between). Due", "# A template part is anything enclosed in double braces", "lines and tokens (aka. words) with specialized version for queries", "(2, 3, 4, 5)] >>> list(ngrams([1,2,3,4], 2)) [(1, 2), (2,", "all tokens in the shingle are different. For example: >>>", "and download. from __future__ import absolute_import from __future__ import print_function", "= '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' % (query_pattern, template_pattern,) template_splitter =", "also from a generator: >>> list(select_ngrams(x for x in [(2,", "4)], with_pos=True)) [(0, (2, 1, 3)), (1, (1, 1, 3)),", "texts in two different groups _text_capture_pattern = '(?P<token>' + query_pattern", "tokens (aka. words) with specialized version for queries and rules", "_ -')) [u'some', u'text', u'with', u'spaces'] Templates are handled and", "token in word_splitter(text) if token) # Alternate pattern used for", "list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3),", ">>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4),", "implied. No content created from # ScanCode should be considered", "for token in tokens if token and not token.startswith('{{')) def", "end of a word. # Keeping the trailing + is", "matched text collection not_query_pattern = '[\\W_+]+[\\W_]?' # collect tokens and", "text: return for match in tokens_and_non_tokens(text): if not match: continue", "The algorithm first fingerprints every token and then selects a", "For example: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5,", "6, 1), (7, 3, 4)] \"\"\" last = None for", "iterable of tokens from a unicode rule text, skipping templated", "from __future__ import print_function from __future__ import unicode_literals from itertools", "or redistribute any data created with ScanCode or any ScanCode", "and skipped for templated sequences: >>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces!", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "(4, (7, 3, 4))] This works also from a generator:", "u'spaces'] \"\"\" if not text: return [] text = lower", "'(?P<punct>' + not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def", "licenses name such as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter =", "is a tuple of ngram_length items. The returned iterable is", "and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from", "ngrams using the hailstorm algorithm. If `with_pos` is True also", "2), (2, 3), (3, 4), (4, 5)] >>> list(ngrams([1,2,3,4,5], 4))", "http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "original sequence. Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first", "Apache License version 2.0. # Data generated with ScanCode require", "3, 4, 5)] >>> list(ngrams([1,2,3,4], 2)) [(1, 2), (2, 3),", "lower=True): \"\"\" Return an iterable of tokens from a unicode", "== min_hash: yield ngram last = ngram else: # always", "and tokens (aka. words) with specialized version for queries and", "ScanCode # derivative work, you must accompany this data with", "__future__ import print_function from __future__ import unicode_literals from itertools import", "tokens and non-token texts in two different groups _text_capture_pattern =", "3), (2, 6, 1), (7, 3, 4)])) [(2, 1, 3),", "be included. In this case, tuple of (pos, ngram) are", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR #", "between). Due to the probabilistic properties of Rabin fingerprints the", "import izip import re from zlib import crc32 from textcode.analysis", "an iterable of ngrams of length `ngram_length` given an iterable.", "(2, 6, 1), (7, 3, 4)] Positions can also be", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS", "3), (3, 4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2),", "text lines given a file at `location` or a `query", "enclosed in double braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+'", "ScanCode software is licensed under the Apache License version 2.0.", "Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/", "for support and download. from __future__ import absolute_import from __future__", "this case, tuple of (pos, ngram) are returned: >>> list(select_ngrams([(2,", "is a fairly arcane but optimized way to compute ngrams.", "Each ngram is a tuple of ngram_length items. The returned", "match.groupdict() token = mgd.get('token') punct = mgd.get('punct') if token or", "items. The returned iterable is empty if the input iterable", "in lines: if strip: yield line.strip() else: yield line #", "provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES # OR", "download. from __future__ import absolute_import from __future__ import print_function from", "+ in the middle or end of a word. #", "used as legal advice. Consult an Attorney # for any", "software is licensed under the Apache License version 2.0. #", "etc.) The returned iterable contains 2-tuples of: - True if", "(4, 5)] \"\"\" return izip(*(islice(iterable, i, None) for i in", "= '(?P<token>' + query_pattern + ')' + '|' + '(?P<punct>'", "is empty if the input iterable contains less than `ngram_length`", "governing permissions and limitations under the License. # # When", "if token else (False, punct) # Template-aware splitter, keeping a", "= '[\\W_+]+[\\W_]?' # collect tokens and non-token texts in two", "unicode query text keeping everything (including punctuations, line endings, etc.)", "2017 nexB Inc. and others. All rights reserved. # http://nexb.com", "(1, 1, 3)), (3, (2, 6, 1)), (4, (7, 3,", "# # Copyright (c) 2017 nexB Inc. and others. All", "= query_string.splitlines(keepends) for line in lines: if strip: yield line.strip()", "templates return (token for token in tokens if token and", "reconstruct the matched query text accurately. \"\"\" if not text:", "templated parts. For example: >>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text", "example: >>> list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2, 3), (3, 4),", "from a unicode rule text, skipping templated parts, including leading", "# FIXME: use a proper hash nghs = [crc32(str(ng)) for", "matched query text accurately. \"\"\" if not text: return for", "from a unicode query text. \"\"\" if not text: return", "BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either", "3, 4)] Positions can also be included. In this case,", "(False, punct) # Template-aware splitter, keeping a templated part {{anything}}", "in [(2, 1, 3), (1, 1, 3), (5, 1, 3),", "[u'some', u'text', u'with', u'spaces'] \"\"\" if not text: return []", "token) if token else (False, punct) # Template-aware splitter, keeping", "templates. curly barces are otherwise treated as punctuation. # A", "nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and", "Return an iterable of text lines given a file at", "also works with arrays or tuples: >>> from array import", "\"AS IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY", "1, 3), (1, 1, 3), (2, 6, 1), (7, 3,", "in ngram] min_hash = min(nghs) if with_pos: ngram = (i,", "the trailing + is important for licenses name such as", "for templates. curly barces are otherwise treated as punctuation. #", "the # specific language governing permissions and limitations under the", "ngram is a tuple of ngram_length items. The returned iterable", "ngrams. For example: >>> list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2, 3),", "redistribute any data created with ScanCode or any ScanCode #", "5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3, 4), (2, 3,", "[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7,", "= [crc32(str(ng)) for ng in ngram] min_hash = min(nghs) if", "arrays or tuples: >>> from array import array >>> list(ngrams(array(b'h',", "a free software code scanning tool from nexB Inc. and", "correctly: >>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -')) [u'some',", "of text lines given a file at `location` or a", "location: lines = text_lines(location, demarkup=False) elif query_string: if strip: keepends", "% (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern , re.UNICODE).findall def rule_tokenizer(text,", "from array import array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2),", "# ScanCode is a trademark of nexB Inc. # #", "or end of a word. # Keeping the trailing +", "3, 4)])) [(2, 1, 3), (1, 1, 3), (2, 6,", "Consult an Attorney # for any legal advice. # ScanCode", "the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm first fingerprints every token and", "characters # and + in the middle or end of", "OF ANY KIND, either express or implied. No content created", "(1, 1, 3), (2, 6, 1), (7, 3, 4)] \"\"\"", "(4, 5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3, 4), (2,", "tuple of (pos, ngram) are returned: >>> list(select_ngrams([(2, 1, 3),", ">>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4), (4,", "i, ngram in enumerate(ngrams): # FIXME: use a proper hash", "keeping everything (including punctuations, line endings, etc.) The returned iterable", "= None for i, ngram in enumerate(ngrams): # FIXME: use", "True if the string is a text token or False", "CONDITIONS OF ANY KIND, either express or implied. No content", "OPTIMIZE: tokenizing line by line may be rather slow #", "rather slow # we could instead get lines and tokens", "a generator: >>> list(select_ngrams(x for x in [(2, 1, 3),", "')' + '|' + '(?P<punct>' + not_query_pattern + ')' tokens_and_non_tokens", "template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' % (query_pattern, template_pattern,) template_splitter", "1), (7, 3, 4)] Positions can also be included. In", "if the input iterable contains less than `ngram_length` items. Note:", "\"\"\" if not text: return for match in tokens_and_non_tokens(text): if", "or punct: yield (True, token) if token else (False, punct)", "compute ngrams. For example: >>> list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2,", "Return an iterable of ngrams of length `ngram_length` given an", "are returned: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5,", "def query_lines(location=None, query_string=None, strip=True): \"\"\" Return an iterable of text", "from itertools import islice from itertools import izip import re", "is licensed under the Apache License version 2.0. # Data", "ScanCode is a free software code scanning tool from nexB", "law or agreed to in writing, software distributed # under", "'(?P<token>' + query_pattern + ')' + '|' + '(?P<punct>' +", "# TODO: OPTIMIZE: tokenizing line by line may be rather", "derivative work, you must accompany this data with the following", "2), (2, 3), (3, 4), (4, 5)] \"\"\" return izip(*(islice(iterable,", "sequence of ngrams using the hailstorm algorithm. If `with_pos` is", "nghs[-1] == min_hash: yield ngram last = ngram else: #", "different groups _text_capture_pattern = '(?P<token>' + query_pattern + ')' +", "hailstorm algorithm. If `with_pos` is True also include the starting", "__future__ import unicode_literals from itertools import islice from itertools import", "return (token for token in word_splitter(text) if token) # Alternate", "Generated with ScanCode and provided on an \"AS IS\" BASIS,", "and non-token texts in two different groups _text_capture_pattern = '(?P<token>'", "In this case, tuple of (pos, ngram) are returned: >>>", "[] text = lower and text.lower() or text tokens =", "the License for the # specific language governing permissions and", "spAces! + _ -')) [u'some', u'text', u'with', u'spaces'] Templates are", "not_query_pattern = '[\\W_+]+[\\W_]?' # collect tokens and non-token texts in", "an iterable of tokens and non-tokens from a unicode query", "fairly arcane but optimized way to compute ngrams. For example:", "# Copyright (c) 2017 nexB Inc. and others. All rights", "the ngram in the original sequence. Definition from the paper:", "others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__", "including leading and trailing templated parts. For example: >>> list(rule_tokenizer(''))", "min(nghs) if with_pos: ngram = (i, ngram,) if nghs[0] ==", "# -*- coding: utf-8 -*- # # Copyright (c) 2017", "iterable. Each ngram is a tuple of ngram_length items. The", "itertools import islice from itertools import izip import re from", "text.lower() or text tokens = template_splitter(text) # skip templates return", "for ng in ngram] min_hash = min(nghs) if with_pos: ngram", "you must accompany this data with the following acknowledgment: #", "is a free software code scanning tool from nexB Inc.", "\"\"\" Return an iterable as a subset of a sequence", "lines = [] if location: lines = text_lines(location, demarkup=False) elif", "algorithm first fingerprints every token and then selects a shingle", "s (and potentially also in between). Due to the probabilistic", "probability that a shingle is chosen is 2/k if all", "2)) [(1, 2), (2, 3), (3, 4), (4, 5)] \"\"\"", ">>> list(ngrams([1], 2)) [] This also works with arrays or", "Utilities to break texts in lines and tokens (aka. words)", "with spAces! + _ -')) [u'some', u'text', u'with', u'spaces'] Templates", "ngram) are returned: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3),", "and tokens at once in a batch? lines = []", "parts. For example: >>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text with", "`query string`. Include empty lines. \"\"\" # TODO: OPTIMIZE: tokenizing", "4)] >>> list(ngrams([1,2,3], 2)) [(1, 2), (2, 3)] >>> list(ngrams([1,2],", "# Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import", "5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4),", "1, 3), (5, 1, 3), (2, 6, 1), (7, 3,", "ScanCode is a trademark of nexB Inc. # # You", "# for any legal advice. # ScanCode is a free", "query text accurately. \"\"\" if not text: return for match", "as legal advice. Consult an Attorney # for any legal", "Rabin fingerprints the probability that a shingle is chosen is", "if strip: yield line.strip() else: yield line # Split on", "[(1, 2), (2, 3), (3, 4), (4, 5)] \"\"\" return", "query_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens from a", "tuple of ngram_length items. The returned iterable is empty if", "and text.lower() or text return (token for token in word_splitter(text)", "handled correctly: >>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -'))", "punctuations: keep only characters # and + in the middle", "from a unicode query text keeping everything (including punctuations, line", "properties of Rabin fingerprints the probability that a shingle is", "empty lines. \"\"\" # TODO: OPTIMIZE: tokenizing line by line", "if not text: return [] text = lower and text.lower()", "template_splitter(text) # skip templates return (token for token in tokens", "for the ngram in the original sequence. Definition from the", "either express or implied. No content created from # ScanCode", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "language governing permissions and limitations under the License. # #", "list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3, 4), (2, 3, 4, 5)]", "and trailing templated parts. For example: >>> list(rule_tokenizer('')) [] >>>", "1), (7, 3, 4)] \"\"\" last = None for i,", "list(ngrams([1,2,3,4,5], 2)) [(1, 2), (2, 3), (3, 4), (4, 5)]", "(1, 1, 3), (5, 1, 3), (2, 6, 1), (7,", "continue mgd = match.groupdict() token = mgd.get('token') punct = mgd.get('punct')", "case, tuple of (pos, ngram) are returned: >>> list(select_ngrams([(2, 1,", "in the middle or end of a word. # Keeping", "by line may be rather slow # we could instead", "if not text: return for match in tokens_and_non_tokens(text): if not", "instead get lines and tokens at once in a batch?", "ScanCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "the original sequence. Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf The algorithm", "[(2, 1, 3), (1, 1, 3), (5, 1, 3), (2,", "+ query_pattern + ')' + '|' + '(?P<punct>' + not_query_pattern", ">>> list(rule_tokenizer('some Text with spAces! + _ -')) [u'some', u'text',", "and not token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\" Return an iterable", "query text keeping everything (including punctuations, line endings, etc.) The", "using the hailstorm algorithm. If `with_pos` is True also include", "(2, 6, 1), (7, 3, 4)], with_pos=True)) [(0, (2, 1,", "leading and trailing templated parts. For example: >>> list(rule_tokenizer('')) []", "(2, 6, 1)), (4, (7, 3, 4))] This works also", "This is used to reconstruct the matched query text accurately.", ">>> list(rule_tokenizer('')) [] >>> list(rule_tokenizer('some Text with spAces! + _", "token. # This splitter yields plain token strings or double", "content created from # ScanCode should be considered or used", "tokens in s occurs at the first or the last", "not (such as punctuation, spaces, etc). - the corresponding string", "query_lines(location=None, query_string=None, strip=True): \"\"\" Return an iterable of text lines", "position of s (and potentially also in between). Due to", "token) # Alternate pattern used for matched text collection not_query_pattern", "(pos, ngram) are returned: >>> list(select_ngrams([(2, 1, 3), (1, 1,", "_ -')) [u'some', u'text', u'with', u'spaces'] Unbalanced templates are handled", "all k tokens in s occurs at the first or", "string`. Include empty lines. \"\"\" # TODO: OPTIMIZE: tokenizing line", ">>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -')) [u'some', u'text',", "# Split on whitespace and punctuations: keep only characters #", ">>> list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3, 4), (2, 3, 4,", "potentially also in between). Due to the probabilistic properties of", "Due to the probabilistic properties of Rabin fingerprints the probability", "1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]", "for queries and rules texts. \"\"\" def query_lines(location=None, query_string=None, strip=True):", "the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "# # Generated with ScanCode and provided on an \"AS", "in the original sequence. Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf The", "in writing, software distributed # under the License is distributed", "ScanCode or any ScanCode # derivative work, you must accompany", "# {{something}} for templates. curly barces are otherwise treated as", "(1, 1, 3), (2, 6, 1), (7, 3, 4)] Positions", "file at `location` or a `query string`. Include empty lines.", "or tuples: >>> from array import array >>> list(ngrams(array(b'h', [1,2,3,4,5]),", "FIXME: use a proper hash nghs = [crc32(str(ng)) for ng", "first or the last position of s (and potentially also", "of Rabin fingerprints the probability that a shingle is chosen", "list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')) [u'some', u'text', u'with',", "WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express", "is used to reconstruct the matched query text accurately. \"\"\"", "'\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' % (query_pattern, template_pattern,) template_splitter = re.compile(rule_pattern", "if token or punct: yield (True, token) if token else", "u'text', u'with', u'spaces'] Templates are handled and skipped for templated", "3, 4)] \"\"\" last = None for i, ngram in", "punct) # Template-aware splitter, keeping a templated part {{anything}} as", "= [] if location: lines = text_lines(location, demarkup=False) elif query_string:", "and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES #", "rule_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens from a", "templated part {{anything}} as a token. # This splitter yields", "an iterable of text lines given a file at `location`", "No content created from # ScanCode should be considered or", "Template-aware splitter, keeping a templated part {{anything}} as a token.", "the shingle are different. For example: >>> list(select_ngrams([(2, 1, 3),", "an acknowledgment. # ScanCode is a trademark of nexB Inc.", "could instead get lines and tokens at once in a", "shingle is chosen is 2/k if all tokens in the", "include the starting position for the ngram in the original", "licensed under the Apache License version 2.0. # Data generated", "algorithm. If `with_pos` is True also include the starting position", "lower and text.lower() or text tokens = template_splitter(text) # skip", "is anything enclosed in double braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern", "# When you publish or redistribute any data created with", "= (i, ngram,) if nghs[0] == min_hash or nghs[-1] ==", "an iterable of tokens from a unicode query text. \"\"\"", "== 0: yield ngram last = ngram if last !=", "(5, 1, 3), (2, 6, 1), (7, 3, 4)])) [(2,", "[crc32(str(ng)) for ng in ngram] min_hash = min(nghs) if with_pos:", "for i in range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\" Return an", "from a generator: >>> list(select_ngrams(x for x in [(2, 1,", "punctuation, spaces, etc). - the corresponding string This is used", "data with the following acknowledgment: # # Generated with ScanCode", "4)] Positions can also be included. In this case, tuple", "IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND,", "yield the first or last ngram too. if i ==", "different. For example: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3),", "text collection not_query_pattern = '[\\W_+]+[\\W_]?' # collect tokens and non-token", "two different groups _text_capture_pattern = '(?P<token>' + query_pattern + ')'", "query_pattern + ')' + '|' + '(?P<punct>' + not_query_pattern +", "optimized way to compute ngrams. For example: >>> list(ngrams([1,2,3,4,5], 2))", "1), (7, 3, 4)], with_pos=True)) [(0, (2, 1, 3)), (1,", "(4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3,", "iterable contains 2-tuples of: - True if the string is", "1, 3), (1, 1, 3), (5, 1, 3), (2, 6,", "mgd.get('token') punct = mgd.get('punct') if token or punct: yield (True,", "= mgd.get('token') punct = mgd.get('punct') if token or punct: yield", "x in [(2, 1, 3), (1, 1, 3), (5, 1,", "match in tokens_and_non_tokens(text): if not match: continue mgd = match.groupdict()", "anything enclosed in double braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern =", "License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "2)) [(1, 2), (2, 3), (3, 4), (4, 5)] >>>", "= re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text): \"\"\" Return an iterable of", "strings # {{something}} for templates. curly barces are otherwise treated", "for the # specific language governing permissions and limitations under", "only characters # and + in the middle or end", "list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2), (2, 3), (3, 4), (4,", "treated as punctuation. # A template part is anything enclosed", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF", "code scanning tool from nexB Inc. and others. # Visit", "(aka. words) with specialized version for queries and rules texts.", "in double braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' %", "an iterable as a subset of a sequence of ngrams", "3)), (3, (2, 6, 1)), (4, (7, 3, 4))] This", "ngram in the original sequence. Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf", "can also be included. In this case, tuple of (pos,", "[(1, 2), (2, 3)] >>> list(ngrams([1,2], 2)) [(1, 2)] >>>", "0: yield ngram last = ngram if last != ngram:", "itertools import izip import re from zlib import crc32 from", "def query_tokenizer(text, lower=True): \"\"\" Return an iterable of tokens from", "'|' + '(?P<punct>' + not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern,", "pattern used for matched text collection not_query_pattern = '[\\W_+]+[\\W_]?' #", "_ -{{}}')) [u'some', u'text', u'with', u'spaces'] \"\"\" if not text:", "4))] This works also from a generator: >>> list(select_ngrams(x for", "legal advice. # ScanCode is a free software code scanning", "1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True)) [(0,", "fingerprints the probability that a shingle is chosen is 2/k", "# specific language governing permissions and limitations under the License.", "[] if location: lines = text_lines(location, demarkup=False) elif query_string: if", "= True lines = query_string.splitlines(keepends) for line in lines: if", "a templated part {{anything}} as a token. # This splitter", "from textcode.analysis import text_lines \"\"\" Utilities to break texts in", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "(and potentially also in between). Due to the probabilistic properties", "`ngram_length` given an iterable. Each ngram is a tuple of", "{{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')) [u'some', u'text', u'with', u'spaces']", "def ngrams(iterable, ngram_length): \"\"\" Return an iterable of ngrams of", "first or last ngram too. if i == 0: yield", "with ScanCode require an acknowledgment. # ScanCode is a trademark", "a trademark of nexB Inc. # # You may not", "legal advice. Consult an Attorney # for any legal advice.", "ngram_length items. The returned iterable is empty if the input", "[(1, 2, 3, 4), (2, 3, 4, 5)] >>> list(ngrams([1,2,3,4],", "if token and not token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\" Return", "token and not token.startswith('{{')) def ngrams(iterable, ngram_length): \"\"\" Return an", "nexB Inc. # # You may not use this software", "endings, etc.) The returned iterable contains 2-tuples of: - True", "and text.lower() or text tokens = template_splitter(text) # skip templates", "of tokens from a unicode rule text, skipping templated parts,", "accompany this data with the following acknowledgment: # # Generated", "punct = mgd.get('punct') if token or punct: yield (True, token)", "(True, token) if token else (False, punct) # Template-aware splitter,", "= template_splitter(text) # skip templates return (token for token in", "`location` or a `query string`. Include empty lines. \"\"\" #", "in tokens if token and not token.startswith('{{')) def ngrams(iterable, ngram_length):", "a proper hash nghs = [crc32(str(ng)) for ng in ngram]", "nghs[0] == min_hash or nghs[-1] == min_hash: yield ngram last", "# OR CONDITIONS OF ANY KIND, either express or implied.", "3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))", "braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' % (query_pattern, template_pattern,)", "of length `ngram_length` given an iterable. Each ngram is a", "a fairly arcane but optimized way to compute ngrams. For", "# # When you publish or redistribute any data created", "tuples: >>> from array import array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2))", "reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is", "specialized version for queries and rules texts. \"\"\" def query_lines(location=None,", "important for licenses name such as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*'", "TODO: OPTIMIZE: tokenizing line by line may be rather slow", "OF ANY KIND, either express or implied. See the License", "a text token or False if this is not (such", "License. # # When you publish or redistribute any data", "2-tuples of: - True if the string is a text", "of ngram_length items. The returned iterable is empty if the", "of: - True if the string is a text token", "mgd.get('punct') if token or punct: yield (True, token) if token", "-')) [u'some', u'text', u'with', u'spaces'] Templates are handled and skipped", "# Template-aware splitter, keeping a templated part {{anything}} as a", "in enumerate(ngrams): # FIXME: use a proper hash nghs =", "software distributed # under the License is distributed on an", "works also from a generator: >>> list(select_ngrams(x for x in", "def matched_query_text_tokenizer(text): \"\"\" Return an iterable of tokens and non-tokens", "the matched query text accurately. \"\"\" if not text: return", "or a `query string`. Include empty lines. \"\"\" # TODO:", "word_splitter = re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\" Return an", "of ngrams using the hailstorm algorithm. If `with_pos` is True", "-*- coding: utf-8 -*- # # Copyright (c) 2017 nexB", "+ not_query_pattern + ')' tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer def matched_query_text_tokenizer(text):", "rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software", "keeping a templated part {{anything}} as a token. # This", "double braces template_pattern = '\\{\\{[^{}]*\\}\\}' rule_pattern = '%s|%s+' % (query_pattern,", "iterable is empty if the input iterable contains less than", "use a proper hash nghs = [crc32(str(ng)) for ng in", "is a trademark of nexB Inc. # # You may", "at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "True lines = query_string.splitlines(keepends) for line in lines: if strip:", "in a batch? lines = [] if location: lines =", "\"\"\" last = None for i, ngram in enumerate(ngrams): #", "may not use this software except in compliance with the", "# Unless required by applicable law or agreed to in", "Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import", "token strings or double braces-enclosed strings # {{something}} for templates.", "4), (4, 5)] >>> list(ngrams([1,2,3,4,5], 4)) [(1, 2, 3, 4),", "If `with_pos` is True also include the starting position for", "tokenizing line by line may be rather slow # we", "groups _text_capture_pattern = '(?P<token>' + query_pattern + ')' + '|'", "(c) 2017 nexB Inc. and others. All rights reserved. #", "= re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\" Return an iterable", "match: continue mgd = match.groupdict() token = mgd.get('token') punct =", "3)] >>> list(ngrams([1,2], 2)) [(1, 2)] >>> list(ngrams([1], 2)) []", "array import array >>> list(ngrams(array(b'h', [1,2,3,4,5]), 2)) [(1, 2), (2,", "3), (5, 1, 3), (2, 6, 1), (7, 3, 4)],", "i in range(ngram_length))) def select_ngrams(ngrams, with_pos=False): \"\"\" Return an iterable", "or used as legal advice. Consult an Attorney # for", "at `location` or a `query string`. Include empty lines. \"\"\"", "returned iterable contains 2-tuples of: - True if the string", "tokens_and_non_tokens(text): if not match: continue mgd = match.groupdict() token =", "License version 2.0. # Data generated with ScanCode require an", "advice. # ScanCode is a free software code scanning tool", "BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either", "a shingle s if the minimum fingerprint value of all", "False else: keepends = True lines = query_string.splitlines(keepends) for line", "ngram_length): \"\"\" Return an iterable of ngrams of length `ngram_length`", "with_pos: ngram = (i, ngram,) if nghs[0] == min_hash or", "be rather slow # we could instead get lines and", "example: >>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1,", "from __future__ import unicode_literals from itertools import islice from itertools", "tokens = template_splitter(text) # skip templates return (token for token", "# collect tokens and non-token texts in two different groups", "OR # CONDITIONS OF ANY KIND, either express or implied.", "text_lines(location, demarkup=False) elif query_string: if strip: keepends = False else:", "(3, 4), (4, 5)] >>> list(ngrams(tuple([1,2,3,4,5]), 2)) [(1, 2), (2,", "'[\\W_+]+[\\W_]?' # collect tokens and non-token texts in two different", "proper hash nghs = [crc32(str(ng)) for ng in ngram] min_hash", "be considered or used as legal advice. Consult an Attorney", "a `query string`. Include empty lines. \"\"\" # TODO: OPTIMIZE:", "of tokens from a unicode query text. \"\"\" if not", "as a token. # This splitter yields plain token strings", "\"\"\" return izip(*(islice(iterable, i, None) for i in range(ngram_length))) def", "IS\" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND,", "distributed # under the License is distributed on an \"AS", "keep only characters # and + in the middle or", "= '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall def query_tokenizer(text, lower=True): \"\"\"", "string This is used to reconstruct the matched query text", "an iterable. Each ngram is a tuple of ngram_length items.", "(2, 3)] >>> list(ngrams([1,2], 2)) [(1, 2)] >>> list(ngrams([1], 2))", "subset of a sequence of ngrams using the hailstorm algorithm.", "2)] >>> list(ngrams([1], 2)) [] This also works with arrays", "as GPL2+ query_pattern = '[^\\W_]+\\+?[^\\W_]*' word_splitter = re.compile(query_pattern, re.UNICODE).findall def", "3), (2, 6, 1), (7, 3, 4)] \"\"\" last =", "4), (2, 3, 4, 5)] >>> list(ngrams([1,2,3,4], 2)) [(1, 2),", "Inc. # # You may not use this software except", "acknowledgment. # ScanCode is a trademark of nexB Inc. #", "list(ngrams([1], 2)) [] This also works with arrays or tuples:", "the input iterable contains less than `ngram_length` items. Note: this" ]
[ "import helpers_test EBUILDS: Dict[str, List[Dict[str, Any]]] = {} helpers_test.import_directory(__name__, os.path.dirname(__file__))", "\"\"\"Ebuild Test Fixtures.\"\"\" import os from typing import Any, Dict,", "from typing import Any, Dict, List from etest_test import helpers_test", "etest_test import helpers_test EBUILDS: Dict[str, List[Dict[str, Any]]] = {} helpers_test.import_directory(__name__,", "os from typing import Any, Dict, List from etest_test import", "Fixtures.\"\"\" import os from typing import Any, Dict, List from", "Any, Dict, List from etest_test import helpers_test EBUILDS: Dict[str, List[Dict[str,", "Dict, List from etest_test import helpers_test EBUILDS: Dict[str, List[Dict[str, Any]]]", "List from etest_test import helpers_test EBUILDS: Dict[str, List[Dict[str, Any]]] =", "import os from typing import Any, Dict, List from etest_test", "Test Fixtures.\"\"\" import os from typing import Any, Dict, List", "import Any, Dict, List from etest_test import helpers_test EBUILDS: Dict[str,", "typing import Any, Dict, List from etest_test import helpers_test EBUILDS:", "<filename>etest_test/fixtures_test/ebuilds_test/__init__.py \"\"\"Ebuild Test Fixtures.\"\"\" import os from typing import Any,", "from etest_test import helpers_test EBUILDS: Dict[str, List[Dict[str, Any]]] = {}" ]
[ "def get_db(self): return self.db def add_phrase(self, phrase): #[{ \"english\": eng,", "data struct here words = {} for i, phrase in", "= phrase[\"english\"] #lang = phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng] =", "= dbKeysSet - modelKeysSet if len(newRecordsKeys): for newKey in newRecordsKeys:", "set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys = modelKeysSet", "= self.client[dbName] self.table = tableName self.count = self.db[self.table].find().count() def get_db(self):", "self.phrases.getData() newData[key] = meanings self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData() def", "self.db[self.table].find().count() def get_db(self): return self.db def add_phrase(self, phrase): #[{ \"english\":", "meaning) def saveDb(self): dbData = self.db.get_all() modelData = self.getAllWords() #That's", "modelKeysSet if len(newRecordsKeys): for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey]))", "dbName, tableName): self.client = MongoClient(url) self.db = self.client[dbName] self.table =", "= self.db[self.table].find().count() def show_one(self, phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\" %", "meanings): newData = self.phrases.getData() newData[key] = meanings self.phrases.setData(newData) def getAllWords(self):", "data = self.db.get_all() self.phrases.setData(data) def addWord(self, key, lang, meanings): newData", "True else: return False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self):", "def __init__(self, url, dbName, tableName): self.client = MongoClient(url) self.db =", "saveDb(self): dbData = self.db.get_all() modelData = self.getAllWords() #That's for future", "if self.count > 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\") def", "a table of pairs - eng and its meanings\"\"\" def", "Observable from phrase import Phrase class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\"", "empty\") def show_random(self): entries = self.db[self.table].find() self.count = entries.count() if", "= wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def saveDb(self):", ": pl}] record = {\"english\" : phrase.eng, \"polish\" : phrase.meanings}", "print(\"Database is empty\") def show_random(self): entries = self.db[self.table].find() self.count =", "= set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet =", "entries = self.db[self.table].find() self.count = entries.count() if self.count > 0:", "print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count() class Model: \"\"\"That needs a", "phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def get_all(self):", "#That's for future optimization: update db instead of adding it", "for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys): for", "struct here words = {} for i, phrase in enumerate(self.db[self.table].find()):", "pl}] record = {\"english\" : phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record)", "add_phrase(self, phrase): #[{ \"english\": eng, \"polish\" : pl}] record =", ": phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def show_one(self, phrase): print(\"eng:", "\"\"\"Proxy for MongoDB\"\"\" def __init__(self, url, dbName, tableName): self.client =", "enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase) else: print(\"Database is empty\") def", "meaning return words def show_all(self): if self.count > 0: for", "end=\" \") self.show_one(phrase) else: print(\"Database is empty\") def show_random(self): entries", "__init__(self, url, dbName, tableName): self.client = MongoClient(url) self.db = self.client[dbName]", "\"polish\" : pl}] record = {\"english\" : phrase.eng, \"polish\" :", "and its meanings\"\"\" def __init__(self): self.phrases = Observable({}) self.db =", "get_db(self): return self.db def add_phrase(self, phrase): #[{ \"english\": eng, \"polish\"", "pairs - eng and its meanings\"\"\" def __init__(self): self.phrases =", "self.count = entries.count() if self.count > 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database", "meanings self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData() def removeWord(self, key): newData", "= wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def saveDb(self): dbData = self.db.get_all()", "set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet = set(modelData.values())", "\\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define your data struct", "enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang = phrase[\"lang\"] meaning = phrase[\"polish\"]", "= {\"english\" : phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count =", "phrase[\"polish\"])) def get_all(self): #define your data struct here words =", "is empty\") def show_random(self): entries = self.db[self.table].find() self.count = entries.count()", "def saveDb(self): dbData = self.db.get_all() modelData = self.getAllWords() #That's for", "import MongoClient from observable import Observable from phrase import Phrase", "self.table = tableName self.count = self.db[self.table].find().count() def get_db(self): return self.db", "if self.db[self.table].find_one({\"english\" : eng}): return True else: return False def", "Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data = self.db.get_all() self.phrases.setData(data)", "modelData = self.getAllWords() #That's for future optimization: update db instead", "MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def __init__(self, url, dbName, tableName): self.client", "self.count = self.db[self.table].find().count() def show_one(self, phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\"", "class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def __init__(self, url, dbName, tableName):", "in enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang = phrase[\"lang\"] meaning =", "self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word = wordAndMeaning[0] meaning", "self.db.get_all() self.phrases.setData(data) def addWord(self, key, lang, meanings): newData = self.phrases.getData()", "0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\") def record_exists(self, eng): if", "\"RepeatItDb\", \"phrases\") data = self.db.get_all() self.phrases.setData(data) def addWord(self, key, lang,", "for future optimization: update db instead of adding it all", "url, dbName, tableName): self.client = MongoClient(url) self.db = self.client[dbName] self.table", "wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def saveDb(self): dbData", "#define your data struct here words = {} for i,", "phrase[\"english\"] #lang = phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng] = meaning", "print(\"Database is empty\") def record_exists(self, eng): if self.db[self.table].find_one({\"english\" : eng}):", "else: print(\"Database is empty\") def show_random(self): entries = self.db[self.table].find() self.count", "record = {\"english\" : phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count", "len(newRecordsKeys): for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys):", "\") self.show_one(phrase) else: print(\"Database is empty\") def show_random(self): entries =", "modelValuesSet = set(modelData.values()) newRecordsKeys = modelKeysSet - dbKeysSet deletedRecordsKeys =", "else: print(\"Database is empty\") def record_exists(self, eng): if self.db[self.table].find_one({\"english\" :", "self.count = self.db[self.table].find().count() def get_db(self): return self.db def add_phrase(self, phrase):", "MongoClient from observable import Observable from phrase import Phrase class", "def add_phrase(self, phrase): #[{ \"english\": eng, \"polish\" : pl}] record", "% (phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define your data struct here", "False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count", "def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count =", "record_exists(self, eng): if self.db[self.table].find_one({\"english\" : eng}): return True else: return", "self.db[self.table].find().count() def show_one(self, phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"],", "self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word = wordAndMeaning[0] meaning = wordAndMeaning[1]", "wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def saveDb(self): dbData = self.db.get_all() modelData", "update db instead of adding it all dbKeysSet = set(dbData.keys())", "dbKeysSet = set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet", "import Observable from phrase import Phrase class MongoDbProxy: \"\"\"Proxy for", "= self.db.get_all() self.phrases.setData(data) def addWord(self, key, lang, meanings): newData =", "from pymongo import MongoClient from observable import Observable from phrase", "\"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def show_one(self, phrase):", "newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys:", "i, phrase in enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase) else: print(\"Database", "self.count > 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\") def record_exists(self,", "self.phrases.setData(data) def addWord(self, key, lang, meanings): newData = self.phrases.getData() newData[key]", "phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def show_one(self, phrase): print(\"eng: \\'%s\\'", "removeWord(self, key): newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning):", "from phrase import Phrase class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def", "if len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle also value", "self.db.self.table.drop() self.count = self.db[self.table].find().count() class Model: \"\"\"That needs a table", "get_all(self): #define your data struct here words = {} for", "self.addWord(word, \"pl\", meaning) def saveDb(self): dbData = self.db.get_all() modelData =", "return True else: return False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def", "newData[key] = meanings self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData() def removeWord(self,", "future optimization: update db instead of adding it all dbKeysSet", "for i, phrase in enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang =", "= self.db[self.table].find() self.count = entries.count() if self.count > 0: self.show_one(entries[random.randrange(self.count)])", "\"\"\"That needs a table of pairs - eng and its", "def saveWord(self, wordAndMeaning): word = wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word,", "wordAndMeaning): word = wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word, \"pl\", meaning)", "self.count = self.db[self.table].find().count() class Model: \"\"\"That needs a table of", "from observable import Observable from phrase import Phrase class MongoDbProxy:", "#[{ \"english\": eng, \"polish\" : pl}] record = {\"english\" :", "= phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng] = meaning return words", "if len(newRecordsKeys): for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if", "= set(modelData.values()) newRecordsKeys = modelKeysSet - dbKeysSet deletedRecordsKeys = dbKeysSet", "= meanings self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData() def removeWord(self, key):", "self.db[self.table].find_one({\"english\" : eng}): return True else: return False def drop_record(self,", "self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data = self.db.get_all() self.phrases.setData(data) def", "show_one(self, phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def", "print(\"eng: \\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define", "for i, phrase in enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase) else:", "= MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data = self.db.get_all() self.phrases.setData(data) def addWord(self,", "saveWord(self, wordAndMeaning): word = wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word, \"pl\",", "def removeWord(self, key): newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self,", "self.show_one(phrase) else: print(\"Database is empty\") def show_random(self): entries = self.db[self.table].find()", "self.phrases = Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data =", "\"phrases\") data = self.db.get_all() self.phrases.setData(data) def addWord(self, key, lang, meanings):", "key, lang, meanings): newData = self.phrases.getData() newData[key] = meanings self.phrases.setData(newData)", "= self.getAllWords() #That's for future optimization: update db instead of", "tableName self.count = self.db[self.table].find().count() def get_db(self): return self.db def add_phrase(self,", "addWord(self, key, lang, meanings): newData = self.phrases.getData() newData[key] = meanings", "db instead of adding it all dbKeysSet = set(dbData.keys()) dbValuesSet", "drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count()", ": phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def", "random from pymongo import MongoClient from observable import Observable from", "- dbKeysSet deletedRecordsKeys = dbKeysSet - modelKeysSet if len(newRecordsKeys): for", "class Model: \"\"\"That needs a table of pairs - eng", "dbValuesSet = set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys", "newData = self.phrases.getData() newData[key] = meanings self.phrases.setData(newData) def getAllWords(self): return", "self.client = MongoClient(url) self.db = self.client[dbName] self.table = tableName self.count", "words def show_all(self): if self.count > 0: for i, phrase", "eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count() class", "return words def show_all(self): if self.count > 0: for i,", "if self.count > 0: for i, phrase in enumerate(self.db[self.table].find()): print(i,", "eng}): return True else: return False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng})", "self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData() def removeWord(self, key): newData =", "set(modelData.values()) newRecordsKeys = modelKeysSet - dbKeysSet deletedRecordsKeys = dbKeysSet -", "self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey)", "def show_one(self, phrase): print(\"eng: \\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"]))", "__init__(self): self.phrases = Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data", "def show_random(self): entries = self.db[self.table].find() self.count = entries.count() if self.count", "def __init__(self): self.phrases = Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\")", "for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle also value update print(\"Saving", "your data struct here words = {} for i, phrase", "self.db.get_all() modelData = self.getAllWords() #That's for future optimization: update db", "self.count > 0: for i, phrase in enumerate(self.db[self.table].find()): print(i, end=\"", "dbData = self.db.get_all() modelData = self.getAllWords() #That's for future optimization:", "= self.db[self.table].find().count() class Model: \"\"\"That needs a table of pairs", "MongoClient(url) self.db = self.client[dbName] self.table = tableName self.count = self.db[self.table].find().count()", "dbKeysSet deletedRecordsKeys = dbKeysSet - modelKeysSet if len(newRecordsKeys): for newKey", "getAllWords(self): return self.phrases.getData() def removeWord(self, key): newData = self.phrases.getData() newData.pop(key)", "eng and its meanings\"\"\" def __init__(self): self.phrases = Observable({}) self.db", "show_all(self): if self.count > 0: for i, phrase in enumerate(self.db[self.table].find()):", "{\"english\" : phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count()", "def addWord(self, key, lang, meanings): newData = self.phrases.getData() newData[key] =", "> 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\") def record_exists(self, eng):", "dbKeysSet - modelKeysSet if len(newRecordsKeys): for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey,", "self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count() class Model:", "of pairs - eng and its meanings\"\"\" def __init__(self): self.phrases", "def get_all(self): #define your data struct here words = {}", "return self.phrases.getData() def removeWord(self, key): newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData)", "- eng and its meanings\"\"\" def __init__(self): self.phrases = Observable({})", "words[eng] = meaning return words def show_all(self): if self.count >", "is empty\") def record_exists(self, eng): if self.db[self.table].find_one({\"english\" : eng}): return", "set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys = modelKeysSet - dbKeysSet deletedRecordsKeys", "pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define your data", "deletedRecordsKeys = dbKeysSet - modelKeysSet if len(newRecordsKeys): for newKey in", "= phrase[\"polish\"] words[eng] = meaning return words def show_all(self): if", "phrase[\"polish\"] words[eng] = meaning return words def show_all(self): if self.count", "for MongoDB\"\"\" def __init__(self, url, dbName, tableName): self.client = MongoClient(url)", "Phrase class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def __init__(self, url, dbName,", "= Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data = self.db.get_all()", "return self.db def add_phrase(self, phrase): #[{ \"english\": eng, \"polish\" :", "newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys): for deletedKey", "in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\", modelData[newKey])) if len(deletedRecordsKeys): for deletedKey in", "i, phrase in enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang = phrase[\"lang\"]", "pymongo import MongoClient from observable import Observable from phrase import", "print(i, end=\" \") self.show_one(phrase) else: print(\"Database is empty\") def show_random(self):", "0: for i, phrase in enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase)", "\"pl\", modelData[newKey])) if len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle", "self.getAllWords() #That's for future optimization: update db instead of adding", "def record_exists(self, eng): if self.db[self.table].find_one({\"english\" : eng}): return True else:", "import random from pymongo import MongoClient from observable import Observable", "{} for i, phrase in enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang", "newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word =", "self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\") def record_exists(self, eng): if self.db[self.table].find_one({\"english\"", "tableName): self.client = MongoClient(url) self.db = self.client[dbName] self.table = tableName", "> 0: for i, phrase in enumerate(self.db[self.table].find()): print(i, end=\" \")", "entries.count() if self.count > 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is empty\")", "eng = phrase[\"english\"] #lang = phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng]", "self.db[self.table].find().count() class Model: \"\"\"That needs a table of pairs -", "optimization: update db instead of adding it all dbKeysSet =", "= tableName self.count = self.db[self.table].find().count() def get_db(self): return self.db def", "here words = {} for i, phrase in enumerate(self.db[self.table].find()): eng", "\"english\": eng, \"polish\" : pl}] record = {\"english\" : phrase.eng,", "newRecordsKeys = modelKeysSet - dbKeysSet deletedRecordsKeys = dbKeysSet - modelKeysSet", "= set(dbData.values()) modelKeysSet = set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys =", "words = {} for i, phrase in enumerate(self.db[self.table].find()): eng =", "def getAllWords(self): return self.phrases.getData() def removeWord(self, key): newData = self.phrases.getData()", "eng, \"polish\" : pl}] record = {\"english\" : phrase.eng, \"polish\"", "modelData[newKey])) if len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle also", "word = wordAndMeaning[0] meaning = wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def", "needs a table of pairs - eng and its meanings\"\"\"", "its meanings\"\"\" def __init__(self): self.phrases = Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\",", "self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def show_one(self, phrase): print(\"eng: \\'%s\\' pol:", "phrase in enumerate(self.db[self.table].find()): eng = phrase[\"english\"] #lang = phrase[\"lang\"] meaning", "modelKeysSet = set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys = modelKeysSet -", "self.phrases.getData() def removeWord(self, key): newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def", "it all dbKeysSet = set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet =", "#lang = phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng] = meaning return", "instead of adding it all dbKeysSet = set(dbData.keys()) dbValuesSet =", "empty\") def record_exists(self, eng): if self.db[self.table].find_one({\"english\" : eng}): return True", "\"pl\", meaning) def saveDb(self): dbData = self.db.get_all() modelData = self.getAllWords()", "show_random(self): entries = self.db[self.table].find() self.count = entries.count() if self.count >", "= modelKeysSet - dbKeysSet deletedRecordsKeys = dbKeysSet - modelKeysSet if", "modelKeysSet - dbKeysSet deletedRecordsKeys = dbKeysSet - modelKeysSet if len(newRecordsKeys):", "def show_all(self): if self.count > 0: for i, phrase in", "- modelKeysSet if len(newRecordsKeys): for newKey in newRecordsKeys: self.db.add_phrase(Phrase(newKey, \"pl\",", ": eng}): return True else: return False def drop_record(self, eng):", "lang, meanings): newData = self.phrases.getData() newData[key] = meanings self.phrases.setData(newData) def", "= MongoClient(url) self.db = self.client[dbName] self.table = tableName self.count =", "phrase[\"lang\"] meaning = phrase[\"polish\"] words[eng] = meaning return words def", "def drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count() class Model: \"\"\"That", "self.client[dbName] self.table = tableName self.count = self.db[self.table].find().count() def get_db(self): return", "phrase in enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase) else: print(\"Database is", "= self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word = wordAndMeaning[0]", "self.db[self.table].find() self.count = entries.count() if self.count > 0: self.show_one(entries[random.randrange(self.count)]) else:", "meanings\"\"\" def __init__(self): self.phrases = Observable({}) self.db = MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\",", "phrase): #[{ \"english\": eng, \"polish\" : pl}] record = {\"english\"", "of adding it all dbKeysSet = set(dbData.keys()) dbValuesSet = set(dbData.values())", "self.db = self.client[dbName] self.table = tableName self.count = self.db[self.table].find().count() def", "MongoDbProxy(\"mongodb://localhost:27017/\", \"RepeatItDb\", \"phrases\") data = self.db.get_all() self.phrases.setData(data) def addWord(self, key,", "eng): if self.db[self.table].find_one({\"english\" : eng}): return True else: return False", "adding it all dbKeysSet = set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet", "all dbKeysSet = set(dbData.keys()) dbValuesSet = set(dbData.values()) modelKeysSet = set(modelData.keys())", "= {} for i, phrase in enumerate(self.db[self.table].find()): eng = phrase[\"english\"]", "newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word = wordAndMeaning[0] meaning =", "= set(modelData.keys()) modelValuesSet = set(modelData.values()) newRecordsKeys = modelKeysSet - dbKeysSet", "meaning = phrase[\"polish\"] words[eng] = meaning return words def show_all(self):", "\\'%s\\' pol: \\'%s\\'\" % (phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define your", "MongoDB\"\"\" def __init__(self, url, dbName, tableName): self.client = MongoClient(url) self.db", "= entries.count() if self.count > 0: self.show_one(entries[random.randrange(self.count)]) else: print(\"Database is", "Model: \"\"\"That needs a table of pairs - eng and", "len(deletedRecordsKeys): for deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle also value update", "phrase.eng, \"polish\" : phrase.meanings} self.db[self.table].insert(record) self.count = self.db[self.table].find().count() def show_one(self,", "table of pairs - eng and its meanings\"\"\" def __init__(self):", "= self.db.get_all() modelData = self.getAllWords() #That's for future optimization: update", "(phrase[\"english\"], phrase[\"polish\"])) def get_all(self): #define your data struct here words", "self.db def add_phrase(self, phrase): #[{ \"english\": eng, \"polish\" : pl}]", "import Phrase class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def __init__(self, url,", "else: return False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\")", "key): newData = self.phrases.getData() newData.pop(key) self.phrases.setData(newData) def saveWord(self, wordAndMeaning): word", "observable import Observable from phrase import Phrase class MongoDbProxy: \"\"\"Proxy", "= self.db[self.table].find().count() def get_db(self): return self.db def add_phrase(self, phrase): #[{", "= meaning return words def show_all(self): if self.count > 0:", "deletedKey in deletedRecordsKeys: self.db.drop_record(deletedKey) #Handle also value update print(\"Saving database...\")", "in enumerate(self.db[self.table].find()): print(i, end=\" \") self.show_one(phrase) else: print(\"Database is empty\")", "phrase import Phrase class MongoDbProxy: \"\"\"Proxy for MongoDB\"\"\" def __init__(self,", "drop_db(self): print(\"Dropping\") self.db.self.table.drop() self.count = self.db[self.table].find().count() class Model: \"\"\"That needs", "return False def drop_record(self, eng): self.db[self.table].delete_one({\"english\":eng}) def drop_db(self): print(\"Dropping\") self.db.self.table.drop()", "meaning = wordAndMeaning[1] self.addWord(word, \"pl\", meaning) def saveDb(self): dbData =", "= self.phrases.getData() newData[key] = meanings self.phrases.setData(newData) def getAllWords(self): return self.phrases.getData()" ]
[ "(req.text) checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser(): user =", "is expected\") elif (rand==2): dp = DetectionPoint(\"Login Page\", \"Hidden field", "= User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3): attacker = User(\"Anonymous\", \"172.16.31.10\",", "elif (user==2): attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3): attacker", "dp for i in range (50): requestGenerator() time.sleep(1.5) def closingTime():", "\"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print (req.text) checkResp =", "def randomUser(): user = random.randint(1,3) attacker=0 if (user==1): attacker =", "\"yyyy\") elif (user==3): attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker", "where POST is expected\") elif (rand==2): dp = DetectionPoint(\"Login Page\",", "import User from sampleObjects.User import User from datetime import datetime", "def requestGenerator(): userObject = randomUser() detectionPointObject = randomDetectionPoint() req =", "\"354343jjk23\") return attacker def randomDetectionPoint(): rand = random.randint(1,2) dp=0 if", "user = random.randint(1,3) attacker=0 if (user==1): attacker = User(\"Phillipo\", \"255.255.255.101\",", ": detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print (req.text) checkResp = requests.get('http://localhost:5000/getResponses')", "attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker def randomDetectionPoint(): rand", "= requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\"", "User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3): attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\")", "requests, random, atexit def requestGenerator(): userObject = randomUser() detectionPointObject =", "(checkResp.text) def randomUser(): user = random.randint(1,3) attacker=0 if (user==1): attacker", "attacker=0 if (user==1): attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2):", "str(datetime.now().isoformat())}) print (req.text) checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser():", "DetectionPoint(\"HTTP Verb\", \"GET Request used where POST is expected\") elif", "(rand==1): dp = DetectionPoint(\"HTTP Verb\", \"GET Request used where POST", "used where POST is expected\") elif (rand==2): dp = DetectionPoint(\"Login", "(user==1): attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2): attacker =", "def randomDetectionPoint(): rand = random.randint(1,2) dp=0 if (rand==1): dp =", "form\") return dp for i in range (50): requestGenerator() time.sleep(1.5)", "print (req.text) checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser(): user", "atexit def requestGenerator(): userObject = randomUser() detectionPointObject = randomDetectionPoint() req", "datetime import datetime from sampleObjects.DetectionPoint import DetectionPoint import time, requests,", "= random.randint(1,2) dp=0 if (rand==1): dp = DetectionPoint(\"HTTP Verb\", \"GET", "random, atexit def requestGenerator(): userObject = randomUser() detectionPointObject = randomDetectionPoint()", "within the login form\") return dp for i in range", "from datetime import datetime from sampleObjects.DetectionPoint import DetectionPoint import time,", "for i in range (50): requestGenerator() time.sleep(1.5) def closingTime(): print", "field altered within the login form\") return dp for i", "rand = random.randint(1,2) dp=0 if (rand==1): dp = DetectionPoint(\"HTTP Verb\",", "= User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker def randomDetectionPoint(): rand =", "randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__, \"DetectionPoint\" :", "User from sampleObjects.User import User from datetime import datetime from", "\"xxxx\") elif (user==2): attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3):", "return dp for i in range (50): requestGenerator() time.sleep(1.5) def", "req = requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__,", "sampleObjects.DetectionPoint import DetectionPoint import time, requests, random, atexit def requestGenerator():", "print (checkResp.text) def randomUser(): user = random.randint(1,3) attacker=0 if (user==1):", "elif (rand==2): dp = DetectionPoint(\"Login Page\", \"Hidden field altered within", "checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser(): user = random.randint(1,3)", "attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2): attacker = User(\"Sergio\",", "sampleObjects.User import User from datetime import datetime from sampleObjects.DetectionPoint import", "(user==3): attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker def randomDetectionPoint():", "elif (user==3): attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker def", "#from user import User from sampleObjects.User import User from datetime", "\"GET Request used where POST is expected\") elif (rand==2): dp", "random.randint(1,3) attacker=0 if (user==1): attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif", "DetectionPoint import time, requests, random, atexit def requestGenerator(): userObject =", "datetime from sampleObjects.DetectionPoint import DetectionPoint import time, requests, random, atexit", "\"Hidden field altered within the login form\") return dp for", "randomUser() detectionPointObject = randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json = {\"User\":", "= requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser(): user = random.randint(1,3) attacker=0", "import DetectionPoint import time, requests, random, atexit def requestGenerator(): userObject", "= User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2): attacker = User(\"Sergio\", \"192.168.127.12\",", "(user==2): attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3): attacker =", "json = {\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())})", "\"255.255.255.101\", \"xxxx\") elif (user==2): attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif", "user import User from sampleObjects.User import User from datetime import", "import User from datetime import datetime from sampleObjects.DetectionPoint import DetectionPoint", "requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" :", "= random.randint(1,3) attacker=0 if (user==1): attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\")", "attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\") elif (user==3): attacker = User(\"Anonymous\",", "DetectionPoint(\"Login Page\", \"Hidden field altered within the login form\") return", "randomDetectionPoint(): rand = random.randint(1,2) dp=0 if (rand==1): dp = DetectionPoint(\"HTTP", "User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2): attacker = User(\"Sergio\", \"192.168.127.12\", \"yyyy\")", "dp = DetectionPoint(\"Login Page\", \"Hidden field altered within the login", "\"Time\" : str(datetime.now().isoformat())}) print (req.text) checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text)", "(rand==2): dp = DetectionPoint(\"Login Page\", \"Hidden field altered within the", "import time, requests, random, atexit def requestGenerator(): userObject = randomUser()", "User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return attacker def randomDetectionPoint(): rand = random.randint(1,2)", "randomUser(): user = random.randint(1,3) attacker=0 if (user==1): attacker = User(\"Phillipo\",", "\"192.168.127.12\", \"yyyy\") elif (user==3): attacker = User(\"Anonymous\", \"172.16.31.10\", \"354343jjk23\") return", "User from datetime import datetime from sampleObjects.DetectionPoint import DetectionPoint import", "= {\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print", "from sampleObjects.DetectionPoint import DetectionPoint import time, requests, random, atexit def", "userObject = randomUser() detectionPointObject = randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json", "dp = DetectionPoint(\"HTTP Verb\", \"GET Request used where POST is", "the login form\") return dp for i in range (50):", "expected\") elif (rand==2): dp = DetectionPoint(\"Login Page\", \"Hidden field altered", "altered within the login form\") return dp for i in", "requests.get('http://localhost:5000/getResponses') print (checkResp.text) def randomUser(): user = random.randint(1,3) attacker=0 if", "{\"User\": userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print (req.text)", "#!flask/bin/python #from user import User from sampleObjects.User import User from", "return attacker def randomDetectionPoint(): rand = random.randint(1,2) dp=0 if (rand==1):", "\"172.16.31.10\", \"354343jjk23\") return attacker def randomDetectionPoint(): rand = random.randint(1,2) dp=0", "= DetectionPoint(\"Login Page\", \"Hidden field altered within the login form\")", "in range (50): requestGenerator() time.sleep(1.5) def closingTime(): print (\"Exiting\") atexit.register(closingTime)", "= randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__, \"DetectionPoint\"", "login form\") return dp for i in range (50): requestGenerator()", "Verb\", \"GET Request used where POST is expected\") elif (rand==2):", "dp=0 if (rand==1): dp = DetectionPoint(\"HTTP Verb\", \"GET Request used", "detectionPointObject = randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json = {\"User\": userObject.__dict__,", "POST is expected\") elif (rand==2): dp = DetectionPoint(\"Login Page\", \"Hidden", "= randomUser() detectionPointObject = randomDetectionPoint() req = requests.post('http://localhost:5000/addevent', json =", "from sampleObjects.User import User from datetime import datetime from sampleObjects.DetectionPoint", "requestGenerator(): userObject = randomUser() detectionPointObject = randomDetectionPoint() req = requests.post('http://localhost:5000/addevent',", "userObject.__dict__, \"DetectionPoint\" : detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print (req.text) checkResp", "= DetectionPoint(\"HTTP Verb\", \"GET Request used where POST is expected\")", "if (user==1): attacker = User(\"Phillipo\", \"255.255.255.101\", \"xxxx\") elif (user==2): attacker", "detectionPointObject.__dict__, \"Time\" : str(datetime.now().isoformat())}) print (req.text) checkResp = requests.get('http://localhost:5000/getResponses') print", "if (rand==1): dp = DetectionPoint(\"HTTP Verb\", \"GET Request used where", "Request used where POST is expected\") elif (rand==2): dp =", "i in range (50): requestGenerator() time.sleep(1.5) def closingTime(): print (\"Exiting\")", "Page\", \"Hidden field altered within the login form\") return dp", "time, requests, random, atexit def requestGenerator(): userObject = randomUser() detectionPointObject", "random.randint(1,2) dp=0 if (rand==1): dp = DetectionPoint(\"HTTP Verb\", \"GET Request", "import datetime from sampleObjects.DetectionPoint import DetectionPoint import time, requests, random,", "attacker def randomDetectionPoint(): rand = random.randint(1,2) dp=0 if (rand==1): dp", ": str(datetime.now().isoformat())}) print (req.text) checkResp = requests.get('http://localhost:5000/getResponses') print (checkResp.text) def" ]
[ "print('Start downloading \"%s\"' % name) # Launch a coroutine for", "def handle(self, body): # Adapted from: # \"Making 1 million", "link all requests together t0 = datetime.datetime.now() async with ClientSession()", "in the order of the original sequence, # so we", "dt = (datetime.datetime.now() - t0).total_seconds() print('All downloads completed; elapsed time:", "https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session): async with session.get(url) as response:", "print('All downloads completed; elapsed time: {} [s]'.format(dt)) # asyncio.gather returns", "each URL fetch task = loop.create_task(fetch(url, session)) tasks.append(task) # Wait", "handle(self, body): # Adapted from: # \"Making 1 million requests", "% name) # Launch a coroutine for each URL fetch", "url in BLOGS.items(): print('Start downloading \"%s\"' % name) # Launch", "import asyncio import json import datetime from aiohttp import ClientSession", "return await response.read() tasks = [] loop = asyncio.get_event_loop() #", "fetch task = loop.create_task(fetch(url, session)) tasks.append(task) # Wait on, and", "import ClientSession from channels.generic.http import AsyncHttpConsumer from .constants import BLOGS", "returns results in the order of the original sequence, #", "the order of the original sequence, # so we can", "import datetime from aiohttp import ClientSession from channels.generic.http import AsyncHttpConsumer", "coroutine for each URL fetch task = loop.create_task(fetch(url, session)) tasks.append(task)", "[r.decode('utf-8') for r in responses])) text = json.dumps(data) # We", "NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer that fetches URLs. \"\"\" async", "Adapted from: # \"Making 1 million requests with python-aiohttp\" #", "response using send_response rather than returning # it in Channels'", "response.read() tasks = [] loop = asyncio.get_event_loop() # aiohttp allows", "and then gather, all responses responses = await asyncio.gather(*tasks) dt", "have to send a response using send_response rather than returning", "using send_response rather than returning # it in Channels' async", "send_response rather than returning # it in Channels' async HTTP", "aiohttp allows a ClientSession object to link all requests together", "datetime from aiohttp import ClientSession from channels.generic.http import AsyncHttpConsumer from", "million requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session):", "time: {} [s]'.format(dt)) # asyncio.gather returns results in the order", "{} [s]'.format(dt)) # asyncio.gather returns results in the order of", "sequence, # so we can safely zip these together. data", "AsyncHttpConsumer from .constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP", "task = loop.create_task(fetch(url, session)) tasks.append(task) # Wait on, and then", "so we can safely zip these together. data = dict(zip(BLOGS.keys(),", "that fetches URLs. \"\"\" async def handle(self, body): # Adapted", "as response: return await response.read() tasks = [] loop =", "HTTP consumer await self.send_response(200, text.encode(), headers=[ (\"Content-Type\", \"application/json\"), ] )", "HTTP consumer that fetches URLs. \"\"\" async def handle(self, body):", "in Channels' async HTTP consumer await self.send_response(200, text.encode(), headers=[ (\"Content-Type\",", "json import datetime from aiohttp import ClientSession from channels.generic.http import", "await asyncio.gather(*tasks) dt = (datetime.datetime.now() - t0).total_seconds() print('All downloads completed;", "on, and then gather, all responses responses = await asyncio.gather(*tasks)", "asyncio.get_event_loop() # aiohttp allows a ClientSession object to link all", "then gather, all responses responses = await asyncio.gather(*tasks) dt =", "= await asyncio.gather(*tasks) dt = (datetime.datetime.now() - t0).total_seconds() print('All downloads", "all responses responses = await asyncio.gather(*tasks) dt = (datetime.datetime.now() -", "response: return await response.read() tasks = [] loop = asyncio.get_event_loop()", "python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session): async with session.get(url)", "with ClientSession() as session: for name, url in BLOGS.items(): print('Start", "[] loop = asyncio.get_event_loop() # aiohttp allows a ClientSession object", "= datetime.datetime.now() async with ClientSession() as session: for name, url", "gather, all responses responses = await asyncio.gather(*tasks) dt = (datetime.datetime.now()", "import AsyncHttpConsumer from .constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async", "a coroutine for each URL fetch task = loop.create_task(fetch(url, session))", "session.get(url) as response: return await response.read() tasks = [] loop", "ClientSession object to link all requests together t0 = datetime.datetime.now()", ".constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer that", "def fetch(url, session): async with session.get(url) as response: return await", "# aiohttp allows a ClientSession object to link all requests", "these together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses]))", "session): async with session.get(url) as response: return await response.read() tasks", "a response using send_response rather than returning # it in", "asyncio.gather returns results in the order of the original sequence,", "URLs. \"\"\" async def handle(self, body): # Adapted from: #", "Async HTTP consumer that fetches URLs. \"\"\" async def handle(self,", "the original sequence, # so we can safely zip these", "returning # it in Channels' async HTTP consumer await self.send_response(200,", "= dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses])) text = json.dumps(data)", "\"\"\" Async HTTP consumer that fetches URLs. \"\"\" async def", "# Launch a coroutine for each URL fetch task =", "BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer that fetches URLs.", "\"%s\"' % name) # Launch a coroutine for each URL", "to send a response using send_response rather than returning #", "async def fetch(url, session): async with session.get(url) as response: return", "tasks.append(task) # Wait on, and then gather, all responses responses", "of the original sequence, # so we can safely zip", "can safely zip these together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for", "order of the original sequence, # so we can safely", "# it in Channels' async HTTP consumer await self.send_response(200, text.encode(),", "class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer that fetches URLs. \"\"\"", "= json.dumps(data) # We have to send a response using", "than returning # it in Channels' async HTTP consumer await", "import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer that fetches", "\"Making 1 million requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def", "body): # Adapted from: # \"Making 1 million requests with", "# Adapted from: # \"Making 1 million requests with python-aiohttp\"", "fetches URLs. \"\"\" async def handle(self, body): # Adapted from:", "requests together t0 = datetime.datetime.now() async with ClientSession() as session:", "async with ClientSession() as session: for name, url in BLOGS.items():", "for each URL fetch task = loop.create_task(fetch(url, session)) tasks.append(task) #", "= (datetime.datetime.now() - t0).total_seconds() print('All downloads completed; elapsed time: {}", "t0).total_seconds() print('All downloads completed; elapsed time: {} [s]'.format(dt)) # asyncio.gather", "zip these together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in", "safely zip these together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r", "name) # Launch a coroutine for each URL fetch task", "We have to send a response using send_response rather than", "aiohttp import ClientSession from channels.generic.http import AsyncHttpConsumer from .constants import", "r in responses])) text = json.dumps(data) # We have to", "session: for name, url in BLOGS.items(): print('Start downloading \"%s\"' %", "name, url in BLOGS.items(): print('Start downloading \"%s\"' % name) #", "URL fetch task = loop.create_task(fetch(url, session)) tasks.append(task) # Wait on,", "from channels.generic.http import AsyncHttpConsumer from .constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer):", "[s]'.format(dt)) # asyncio.gather returns results in the order of the", "downloads completed; elapsed time: {} [s]'.format(dt)) # asyncio.gather returns results", "together t0 = datetime.datetime.now() async with ClientSession() as session: for", "from: # \"Making 1 million requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html", "responses = await asyncio.gather(*tasks) dt = (datetime.datetime.now() - t0).total_seconds() print('All", "responses responses = await asyncio.gather(*tasks) dt = (datetime.datetime.now() - t0).total_seconds()", "ClientSession from channels.generic.http import AsyncHttpConsumer from .constants import BLOGS class", "await response.read() tasks = [] loop = asyncio.get_event_loop() # aiohttp", "t0 = datetime.datetime.now() async with ClientSession() as session: for name,", "<gh_stars>0 import asyncio import json import datetime from aiohttp import", "fetch(url, session): async with session.get(url) as response: return await response.read()", "text = json.dumps(data) # We have to send a response", "async HTTP consumer await self.send_response(200, text.encode(), headers=[ (\"Content-Type\", \"application/json\"), ]", "Channels' async HTTP consumer await self.send_response(200, text.encode(), headers=[ (\"Content-Type\", \"application/json\"),", "# asyncio.gather returns results in the order of the original", "downloading \"%s\"' % name) # Launch a coroutine for each", "import json import datetime from aiohttp import ClientSession from channels.generic.http", "async with session.get(url) as response: return await response.read() tasks =", "\"\"\" async def handle(self, body): # Adapted from: # \"Making", "= [] loop = asyncio.get_event_loop() # aiohttp allows a ClientSession", "- t0).total_seconds() print('All downloads completed; elapsed time: {} [s]'.format(dt)) #", "original sequence, # so we can safely zip these together.", "for r in responses])) text = json.dumps(data) # We have", "Launch a coroutine for each URL fetch task = loop.create_task(fetch(url,", "1 million requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url,", "data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses])) text =", "# Wait on, and then gather, all responses responses =", "as session: for name, url in BLOGS.items(): print('Start downloading \"%s\"'", "we can safely zip these together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8')", "Wait on, and then gather, all responses responses = await", "with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session): async with", "to link all requests together t0 = datetime.datetime.now() async with", "# so we can safely zip these together. data =", "asyncio import json import datetime from aiohttp import ClientSession from", "from aiohttp import ClientSession from channels.generic.http import AsyncHttpConsumer from .constants", "async def handle(self, body): # Adapted from: # \"Making 1", "with session.get(url) as response: return await response.read() tasks = []", "dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses])) text = json.dumps(data) #", "in responses])) text = json.dumps(data) # We have to send", "# \"Making 1 million requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async", "requests with python-aiohttp\" # https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session): async", "all requests together t0 = datetime.datetime.now() async with ClientSession() as", "object to link all requests together t0 = datetime.datetime.now() async", "for name, url in BLOGS.items(): print('Start downloading \"%s\"' % name)", "elapsed time: {} [s]'.format(dt)) # asyncio.gather returns results in the", "loop.create_task(fetch(url, session)) tasks.append(task) # Wait on, and then gather, all", "together. data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses])) text", "# We have to send a response using send_response rather", "send a response using send_response rather than returning # it", "in BLOGS.items(): print('Start downloading \"%s\"' % name) # Launch a", "session)) tasks.append(task) # Wait on, and then gather, all responses", "ClientSession() as session: for name, url in BLOGS.items(): print('Start downloading", "a ClientSession object to link all requests together t0 =", "rather than returning # it in Channels' async HTTP consumer", "completed; elapsed time: {} [s]'.format(dt)) # asyncio.gather returns results in", "loop = asyncio.get_event_loop() # aiohttp allows a ClientSession object to", "allows a ClientSession object to link all requests together t0", "= loop.create_task(fetch(url, session)) tasks.append(task) # Wait on, and then gather,", "asyncio.gather(*tasks) dt = (datetime.datetime.now() - t0).total_seconds() print('All downloads completed; elapsed", "responses])) text = json.dumps(data) # We have to send a", "# https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html async def fetch(url, session): async with session.get(url) as", "consumer that fetches URLs. \"\"\" async def handle(self, body): #", "datetime.datetime.now() async with ClientSession() as session: for name, url in", "= asyncio.get_event_loop() # aiohttp allows a ClientSession object to link", "json.dumps(data) # We have to send a response using send_response", "channels.generic.http import AsyncHttpConsumer from .constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\"", "BLOGS.items(): print('Start downloading \"%s\"' % name) # Launch a coroutine", "from .constants import BLOGS class NewsCollectorAsyncConsumer(AsyncHttpConsumer): \"\"\" Async HTTP consumer", "results in the order of the original sequence, # so", "tasks = [] loop = asyncio.get_event_loop() # aiohttp allows a", "it in Channels' async HTTP consumer await self.send_response(200, text.encode(), headers=[", "(datetime.datetime.now() - t0).total_seconds() print('All downloads completed; elapsed time: {} [s]'.format(dt))" ]
[ "'w') as file: file.write(content) def read_line_looping(file_name, count): i = 0", "i = 0 lines = [] file = open(file_name, 'r')", "= open(file_name, 'r') line = file.readline() if line == '':", "raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty') while i", "from pathlib import Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with", "be empty') while i < count: lines.append(line.strip()) i += 1", "line == '': file.close() file = open(file_name, 'r') line =", "= [] file = open(file_name, 'r') line = file.readline() if", "EmptyFileError(f'Error: Dictionary {file_name} seems to be empty') while i <", "== '': raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty')", "import Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w')", "while i < count: lines.append(line.strip()) i += 1 line =", "read_line_looping(file_name, count): i = 0 lines = [] file =", "= file.readline() if line == '': raise EmptyFileError(f'Error: Dictionary {file_name}", "to be empty') while i < count: lines.append(line.strip()) i +=", "0 lines = [] file = open(file_name, 'r') line =", "Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w') as", "file = open(file_name, 'r') line = file.readline() file.close() return lines", "open(file_name, 'w') as file: file.write(content) def read_line_looping(file_name, count): i =", "if line == '': file.close() file = open(file_name, 'r') line", "line == '': raise EmptyFileError(f'Error: Dictionary {file_name} seems to be", "< count: lines.append(line.strip()) i += 1 line = file.readline() if", "line = file.readline() if line == '': file.close() file =", "pathlib import Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name,", "= 0 lines = [] file = open(file_name, 'r') line", "seems to be empty') while i < count: lines.append(line.strip()) i", "'r') line = file.readline() if line == '': raise EmptyFileError(f'Error:", "count): i = 0 lines = [] file = open(file_name,", "empty') while i < count: lines.append(line.strip()) i += 1 line", "= file.readline() if line == '': file.close() file = open(file_name,", "def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w') as file:", "file.close() file = open(file_name, 'r') line = file.readline() file.close() return", "file.write(content) def read_line_looping(file_name, count): i = 0 lines = []", "exist_ok=True) with open(file_name, 'w') as file: file.write(content) def read_line_looping(file_name, count):", "open(file_name, 'r') line = file.readline() file.close() return lines class EmptyFileError(Exception):", "= open(file_name, 'r') line = file.readline() file.close() return lines class", "import os from pathlib import Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True,", "1 line = file.readline() if line == '': file.close() file", "'': raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty') while", "'r') line = file.readline() file.close() return lines class EmptyFileError(Exception): pass", "count: lines.append(line.strip()) i += 1 line = file.readline() if line", "os from pathlib import Path def write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)", "open(file_name, 'r') line = file.readline() if line == '': raise", "as file: file.write(content) def read_line_looping(file_name, count): i = 0 lines", "i < count: lines.append(line.strip()) i += 1 line = file.readline()", "i += 1 line = file.readline() if line == '':", "file.readline() if line == '': file.close() file = open(file_name, 'r')", "line = file.readline() if line == '': raise EmptyFileError(f'Error: Dictionary", "Dictionary {file_name} seems to be empty') while i < count:", "content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w') as file: file.write(content) def", "if line == '': raise EmptyFileError(f'Error: Dictionary {file_name} seems to", "+= 1 line = file.readline() if line == '': file.close()", "file: file.write(content) def read_line_looping(file_name, count): i = 0 lines =", "{file_name} seems to be empty') while i < count: lines.append(line.strip())", "write(file_name, content): Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w') as file: file.write(content)", "'': file.close() file = open(file_name, 'r') line = file.readline() file.close()", "== '': file.close() file = open(file_name, 'r') line = file.readline()", "file.readline() if line == '': raise EmptyFileError(f'Error: Dictionary {file_name} seems", "def read_line_looping(file_name, count): i = 0 lines = [] file", "lines.append(line.strip()) i += 1 line = file.readline() if line ==", "file = open(file_name, 'r') line = file.readline() if line ==", "Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True) with open(file_name, 'w') as file: file.write(content) def read_line_looping(file_name,", "with open(file_name, 'w') as file: file.write(content) def read_line_looping(file_name, count): i", "[] file = open(file_name, 'r') line = file.readline() if line", "lines = [] file = open(file_name, 'r') line = file.readline()" ]
[]
[ "R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars) yEval, runMedData = R_runmed_smooth_spline(x,", "1.4826 * median(abs(allResiduals)) return predErr # ----------------------------------------------------------------------- if __name__ ==", "predErr = 0 allResiduals = array([]) SSE = sum(y **", "arange, hstack from win32com.client import Dispatch import math myName =", "random.shuffle(W) ind = 0 predErr = 0 allResiduals = array([])", "pieces random.shuffle(W) ind = 0 predErr = 0 allResiduals =", "# return isSuccessfulFit, yFit, yEval, runMedData SSE = sum(y **", "list(range(1, K + 1)) Z = [N for j in", "bestPredErr, ppmArrs # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc,", "= sum(y ** 2) MAD = 1.4826 * median(abs(y)) if", "yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars) #", "sc=sc, **pars) # ppmArrs[ind] = [yFit, yEval] else: isSuccessfulFit =", "----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange =", "= list(range(1, K + 1)) Z = [N for j", "0 predErr = 0 allResiduals = array([]) SSE = sum(y", "x1[ind] y1 = y1[ind] t1 = c() isSuccessfulFit, yFit, yEval,", "ARG3[0][0] y = ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get", "K ##min length of pieces W = list(range(L)) Z =", "x1 = x1[ind] y1 = y1[ind] t1 = c() isSuccessfulFit,", "y1[ind] t1 = c() isSuccessfulFit, yFit, yEval, runMedData, predErr =", "import linspace, cos, lexsort, zeros, sin from pylab import plot,", "----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just returns the prediction", "1.1, 0.1]) t2 = c() print('done in %s seconds' %", "R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest = y[test] - yFit predErr", "there are few ## print('spar ', spar) return spar #", "test.sort() train = W[0:ind] + W[ind + j:] train.sort() ind", "deviations instead of sum of squared residues # ----------------------------------------------------------------------- def", "t2 = c() print('done in %s seconds' % (t2 -", "t1)) subplot(211) plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval,", "number']) sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5) sparSet", "= sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest)", "lexsort, zeros, sin from pylab import plot, show, subplot, savefig,", "== min(predErrSet)][-1] # take the last one (smoothest) if there", "fit successful? # return isSuccessfulFit, yFit, yEval, runMedData SSE =", "pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x = ARG3[0][0] y =", "'__main__': from numpy import linspace, cos, lexsort, zeros, sin from", "x = ARG3[0][0] y = ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\")", "== '__main__': from numpy import linspace, cos, lexsort, zeros, sin", "y1, 'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval, 'r+-') ylim([-1.5, +1.5])", "yEval, runMedData, predErr = \\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10,", "+ 1 for j in Z[0:R]] # length of the", "int(kwargs['K']) # --Related to K-fold CV--------------------------- L = len(x) N", "isSuccessfulFit = True # ppmArrs = [[] for i in", "pars['spar range'].split(',')]) sparStepsNum = int(pars['spar steps number']) sparStep = round((sparRange[1]", "to K-fold CV--------------------------- L = len(x) N = L /", "'d') yEval = zeros(len(xEval), 'd') # ppmArrs[ind] = [yFit, yEval]", "= ARG3[0][0] y = ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") #", "runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest = y[test] -", "---running through K training/testings------------- for val in Z: j =", "from numpy import linspace, cos, lexsort, zeros, sin from pylab", "import pprint as p from time import clock as c", "= list(range(L)) Z = list(range(1, K + 1)) Z =", "time import clock as c x1 = linspace(0, 30, 300)", "= MAD if bestPredErr < SSE: isSuccessfulFit = True #", "#nice test y1 = x1 * 0.03 y1 += random.normal(scale=0.2,", "for j in Z[0:R]] # length of the pieces random.shuffle(W)", "= cos(x1) ## y1 = zeros(len(x1),'d') #nice test y1 =", "def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just returns the prediction error", "random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1, x1)) x1 = x1[ind] y1", "pprint import pprint as p from time import clock as", "yEval] sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs # ----------------------------------------------------------------------- # -----------------------------------------------------------------------", "5) sparSet = arange(sparRange[0], sparRange[1], sparStep) predErrSet = zeros(len(sparSet), 'd')", "of pieces W = list(range(L)) Z = list(range(1, K +", "in Z] R = L % K Z[0:R] = [j", "sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def", "= Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x = ARG3[0][0] y = ARG3[0][1]", "else: isSuccessfulFit = False # ppmArrs = [[] for i", "predErr # ----------------------------------------------------------------------- if __name__ == '__main__': from numpy import", "= W[0:ind] + W[ind + j:] train.sort() ind += j", "import random, array, median, zeros, arange, hstack from win32com.client import", "isSuccessfulFit, yFit, yEval, runMedData SSE = sum(y ** 2) MAD", "get the best smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc,", "ppmArrs[ind] = [yFit, yEval] sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs #", "+= random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1, x1)) x1 = x1[ind]", "y, spar=bestSpar, sc=sc, **pars) # compare with original SSE #", "* median(abs(allResiduals)) return predErr # ----------------------------------------------------------------------- if __name__ == '__main__':", "range'].split(',')]) sparStepsNum = int(pars['spar steps number']) sparStep = round((sparRange[1] -", "do I need this??? # ---running through K training/testings------------- for", "get the prediction error for this smoothing parameter bestPredErr =", "< SSE: isSuccessfulFit = True # ppmArrs = [[] for", "yFit, yEval, runMedData, predErr = \\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01,", "import Dispatch import math myName = 'R_runmed_spline' useMAD = True", "zeros(len(x1),'d') #nice test y1 = x1 * 0.03 y1 +=", "sc=sc, **pars) # compare with original SSE # is fit", "clf, ylim from pprint import pprint as p from time", "predErr = \\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1,", "in Z: j = math.floor(val) # ---making training/testing subsets------------- test", "y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit, runMedData =", "for j in Z] R = L % K Z[0:R]", "y, sc=sc, **pars) # get the prediction error for this", "= R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) # get the prediction error", "## print('spar ', spar) return spar # ----------------------------------------------------------------------- # -----------------------------------------------------------------------", "W = list(range(L)) Z = list(range(1, K + 1)) Z", "yEval] else: isSuccessfulFit = False # ppmArrs = [[] for", "return spar # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs):", "median, zeros, arange, hstack from win32com.client import Dispatch import math", "Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x = ARG3[0][0] y", "spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last one", "# ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just returns the", "= linspace(0, 30, 300) ## y1 = cos(x1) ## y1", "__name__ == '__main__': from numpy import linspace, cos, lexsort, zeros,", "in range(len(ARG3)): x = ARG3[ind][0] y = ARG3[ind][1] xEval =", "median(abs(y)) if useMAD: SSE = MAD if bestPredErr < SSE:", "clock as c x1 = linspace(0, 30, 300) ## y1", "hstack((allResiduals, residualsTest)) # ----------------------------------------------- if useMAD: predErr = 1.4826 *", "sin from pylab import plot, show, subplot, savefig, clf, ylim", "yEval, runMedData SSE = sum(y ** 2) MAD = 1.4826", "y[test] - yFit predErr += sum(residualsTest ** 2) allResiduals =", "sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5) sparSet =", "%s seconds' % (t2 - t1)) subplot(211) plot(x1, y1, 'bo')", "# VLAD. Why do I need this??? # ---running through", "= W[ind:ind + j] test.sort() train = W[0:ind] + W[ind", "cos, lexsort, zeros, sin from pylab import plot, show, subplot,", "ind = 0 predErr = 0 allResiduals = array([]) SSE", "zeros, sin from pylab import plot, show, subplot, savefig, clf,", "c() isSuccessfulFit, yFit, yEval, runMedData, predErr = \\ R_runmed_spline_MAIN(x1, y1,", "Dispatch import math myName = 'R_runmed_spline' useMAD = True #", "numpy import random, array, median, zeros, arange, hstack from win32com.client", "+= j # ----------------------------------------------- # ---fit runmed_spline here---------------------- yFit, runMed", "ARG3[ind][2] # yFit = zeros(len(x), 'd') yEval = zeros(len(xEval), 'd')", "c x1 = linspace(0, 30, 300) ## y1 = cos(x1)", "** 2) allResiduals = hstack((allResiduals, residualsTest)) # ----------------------------------------------- if useMAD:", "bestPredErr < SSE: isSuccessfulFit = True # ppmArrs = [[]", "y, xEval, spar=bestSpar, sc=sc, **pars) # ppmArrs[ind] = [yFit, yEval]", "---fit runmed_spline here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs)", "spar=sparSet[i], sc=sc, **pars) predErrSet[i] = predErr ## p(zip(sparSet, predErrSet)) spar", "W[0:ind] + W[ind + j:] train.sort() ind += j #", "as c x1 = linspace(0, 30, 300) ## y1 =", "L = len(x) N = L / K ##min length", "ppmArrs[ind] = [yFit, yEval] else: isSuccessfulFit = False # ppmArrs", "xEval, spar=bestSpar, sc=sc, **pars) # ppmArrs[ind] = [yFit, yEval] else:", "bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars) # compare with", "smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) # get", "SSE = MAD if bestPredErr < SSE: isSuccessfulFit = True", "x = ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2] #", "successful? # return isSuccessfulFit, yFit, yEval, runMedData SSE = sum(y", "W[ind:ind + j] test.sort() train = W[0:ind] + W[ind +", "R = L % K Z[0:R] = [j + 1", "int(pars['spar steps number']) sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum,", "K-fold CV--------------------------- L = len(x) N = L / K", "= [N for j in Z] R = L %", "size=y1.shape) ind = lexsort(keys=(y1, x1)) x1 = x1[ind] y1 =", "# ----------------------------------------------- # ---fit runmed_spline here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train],", "p from time import clock as c x1 = linspace(0,", "myName = 'R_runmed_spline' useMAD = True # use median absolute", "* median(abs(y)) if useMAD: SSE = MAD if bestPredErr <", "Z] R = L % K Z[0:R] = [j +", "= math.floor(val) # ---making training/testing subsets------------- test = W[ind:ind +", "', spar) return spar # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x,", "# ARG3 x = ARG3[0][0] y = ARG3[0][1] sc =", "= hstack((allResiduals, residualsTest)) # ----------------------------------------------- if useMAD: predErr = 1.4826", "R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1]) t2 =", "sum(y ** 2) # VLAD. Why do I need this???", "if bestPredErr < SSE: isSuccessfulFit = True # ppmArrs =", "useMAD: SSE = MAD if bestPredErr < SSE: isSuccessfulFit =", "# ---making training/testing subsets------------- test = W[ind:ind + j] test.sort()", "array, median, zeros, arange, hstack from win32com.client import Dispatch import", "**pars) # get the prediction error for this smoothing parameter", "[N for j in Z] R = L % K", "sc=sc, **pars) yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc,", "y1 += random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1, x1)) x1 =", "= R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars) # ppmArrs[ind] =", "**pars) # compare with original SSE # is fit successful?", "Z[0:R]] # length of the pieces random.shuffle(W) ind = 0", "predErr ## p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet == min(predErrSet)][-1] #", "runMedData, predErr = \\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6,", "# ---running through K training/testings------------- for val in Z: j", "win32com.client import Dispatch import math myName = 'R_runmed_spline' useMAD =", "residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] #", "ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit, runMedData", "ind = lexsort(keys=(y1, x1)) x1 = x1[ind] y1 = y1[ind]", "ARG3[ind][1] xEval = ARG3[ind][2] # yFit = zeros(len(x), 'd') yEval", "runmed_spline here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest", "spar) return spar # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y,", "val in Z: j = math.floor(val) # ---making training/testing subsets-------------", "W[ind + j:] train.sort() ind += j # ----------------------------------------------- #", "x1 = linspace(0, 30, 300) ## y1 = cos(x1) ##", "2) MAD = 1.4826 * median(abs(y)) if useMAD: SSE =", "predErrSet = zeros(len(sparSet), 'd') for i in range(len(sparSet)): predErr =", "= zeros(len(xEval), 'd') # ppmArrs[ind] = [yFit, yEval] sc.Close() return", "y1 = zeros(len(x1),'d') #nice test y1 = x1 * 0.03", "arange(sparRange[0], sparRange[1], sparStep) predErrSet = zeros(len(sparSet), 'd') for i in", "in Z[0:R]] # length of the pieces random.shuffle(W) ind =", "= ARG3[ind][2] # yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar,", "I need this??? # ---running through K training/testings------------- for val", "R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars) # ppmArrs[ind] = [yFit,", "def R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x =", "# ---fit runmed_spline here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test],", "= R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest = y[test] - yFit", "pprint as p from time import clock as c x1", "j in Z] R = L % K Z[0:R] =", "---making training/testing subsets------------- test = W[ind:ind + j] test.sort() train", "zeros(len(x), 'd') yEval = zeros(len(xEval), 'd') # ppmArrs[ind] = [yFit,", "from time import clock as c x1 = linspace(0, 30,", "y, spar=sparSet[i], sc=sc, **pars) predErrSet[i] = predErr ## p(zip(sparSet, predErrSet))", "1.4826 * median(abs(y)) if useMAD: SSE = MAD if bestPredErr", "= R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars) # compare with original", "subsets------------- test = W[ind:ind + j] test.sort() train = W[0:ind]", "## p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet == min(predErrSet)][-1] # take", "train.sort() ind += j # ----------------------------------------------- # ---fit runmed_spline here----------------------", "# ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3", "as p from time import clock as c x1 =", "instead of sum of squared residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3,", "= ARG3[ind][1] xEval = ARG3[ind][2] # yFit = zeros(len(x), 'd')", "in pars['spar range'].split(',')]) sparStepsNum = int(pars['spar steps number']) sparStep =", "return isSuccessfulFit, bestPredErr, ppmArrs # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x,", "0 allResiduals = array([]) SSE = sum(y ** 2) #", "= True # ppmArrs = [[] for i in range(len(ARG3))]", "of sum of squared residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller):", "i in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars)", "predErrSet)) spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last", "**kwargs): \"\"\" just returns the prediction error \"\"\" K =", "useMAD: predErr = 1.4826 * median(abs(allResiduals)) return predErr # -----------------------------------------------------------------------", "squared residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName]", "sparRange[0]) / sparStepsNum, 5) sparSet = arange(sparRange[0], sparRange[1], sparStep) predErrSet", "j] test.sort() train = W[0:ind] + W[ind + j:] train.sort()", "R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange = array([float(i) for i in", "error \"\"\" K = int(kwargs['K']) # --Related to K-fold CV---------------------------", "take the last one (smoothest) if there are few ##", "# compare with original SSE # is fit successful? #", "spar # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\"", "# ----------------------------------------------------------------------- if __name__ == '__main__': from numpy import linspace,", "VLAD. Why do I need this??? # ---running through K", "if __name__ == '__main__': from numpy import linspace, cos, lexsort,", "import R_runmed_smooth_spline from numpy import random, array, median, zeros, arange,", "----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just returns", "R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars) predErrSet[i] = predErr ## p(zip(sparSet,", "= c() print('done in %s seconds' % (t2 - t1))", "for ind in range(len(ARG3)): x = ARG3[ind][0] y = ARG3[ind][1]", "array([]) SSE = sum(y ** 2) # VLAD. Why do", "R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x = ARG3[0][0]", "isSuccessfulFit, bestPredErr, ppmArrs # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y,", "bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) # get the prediction", "# ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange", "j:] train.sort() ind += j # ----------------------------------------------- # ---fit runmed_spline", "with original SSE # is fit successful? # return isSuccessfulFit,", "= arange(sparRange[0], sparRange[1], sparStep) predErrSet = zeros(len(sparSet), 'd') for i", "= ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the best", "= y[test] - yFit predErr += sum(residualsTest ** 2) allResiduals", "from pylab import plot, show, subplot, savefig, clf, ylim from", "list(range(L)) Z = list(range(1, K + 1)) Z = [N", "Z[0:R] = [j + 1 for j in Z[0:R]] #", "i in range(len(ARG3))] for ind in range(len(ARG3)): x = ARG3[ind][0]", "y = ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the", "sparRange[1], sparStep) predErrSet = zeros(len(sparSet), 'd') for i in range(len(sparSet)):", "numpy import linspace, cos, lexsort, zeros, sin from pylab import", "** 2) # VLAD. Why do I need this??? #", "**pars) predErrSet[i] = predErr ## p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet", "spar=bestSpar, sc=sc, **pars) yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar,", "for i in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc,", "zeros, arange, hstack from win32com.client import Dispatch import math myName", "sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest) if", "'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1, y1 - yEval, 'go') ylim([-1.5,", "zeros(len(xEval), 'd') # ppmArrs[ind] = [yFit, yEval] sc.Close() return isSuccessfulFit,", "absolute deviations instead of sum of squared residues # -----------------------------------------------------------------------", "y, **kwargs): \"\"\" just returns the prediction error \"\"\" K", "yEval = zeros(len(xEval), 'd') # ppmArrs[ind] = [yFit, yEval] sc.Close()", "median(abs(allResiduals)) return predErr # ----------------------------------------------------------------------- if __name__ == '__main__': from", "plot(x1, yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1, y1 - yEval,", "p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet == min(predErrSet)][-1] # take the", "math myName = 'R_runmed_spline' useMAD = True # use median", "allResiduals = array([]) SSE = sum(y ** 2) # VLAD.", "x, spar=bestSpar, sc=sc, **pars) yEval, runMedData = R_runmed_smooth_spline(x, y, xEval,", "= [yFit, yEval] else: isSuccessfulFit = False # ppmArrs =", "# --Related to K-fold CV--------------------------- L = len(x) N =", "Z = [N for j in Z] R = L", "the prediction error for this smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x,", "this smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars)", "= zeros(len(sparSet), 'd') for i in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x,", "ppmArrs # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars):", "= int(kwargs['K']) # --Related to K-fold CV--------------------------- L = len(x)", "is fit successful? # return isSuccessfulFit, yFit, yEval, runMedData SSE", "# ----------------------------------------------- if useMAD: predErr = 1.4826 * median(abs(allResiduals)) return", "length of pieces W = list(range(L)) Z = list(range(1, K", "30, 300) ## y1 = cos(x1) ## y1 = zeros(len(x1),'d')", "\\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1]) t2", "----------------------------------------------- if useMAD: predErr = 1.4826 * median(abs(allResiduals)) return predErr", "MAD = 1.4826 * median(abs(y)) if useMAD: SSE = MAD", "N = L / K ##min length of pieces W", "plot, show, subplot, savefig, clf, ylim from pprint import pprint", "y, sc, **pars): sparRange = array([float(i) for i in pars['spar", "c() print('done in %s seconds' % (t2 - t1)) subplot(211)", "1 for j in Z[0:R]] # length of the pieces", "= array([]) SSE = sum(y ** 2) # VLAD. Why", "##min length of pieces W = list(range(L)) Z = list(range(1,", "are few ## print('spar ', spar) return spar # -----------------------------------------------------------------------", "useMAD = True # use median absolute deviations instead of", "steps number']) sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5)", "1)) Z = [N for j in Z] R =", "here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest =", "= len(x) N = L / K ##min length of", "SSE = sum(y ** 2) MAD = 1.4826 * median(abs(y))", "pylab import plot, show, subplot, savefig, clf, ylim from pprint", "ylim([-1.5, +1.5]) subplot(212) plot(x1, y1 - yEval, 'go') ylim([-1.5, +1.5])", "= R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars) predErrSet[i] = predErr ##", "test = W[ind:ind + j] test.sort() train = W[0:ind] +", "ylim from pprint import pprint as p from time import", "sparStep) predErrSet = zeros(len(sparSet), 'd') for i in range(len(sparSet)): predErr", "min(predErrSet)][-1] # take the last one (smoothest) if there are", "from numpy import random, array, median, zeros, arange, hstack from", "= int(pars['spar steps number']) sparStep = round((sparRange[1] - sparRange[0]) /", "training/testing subsets------------- test = W[ind:ind + j] test.sort() train =", "** 2) MAD = 1.4826 * median(abs(y)) if useMAD: SSE", "yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs) residualsTest = y[test]", "= ARG3[ind][1] xEval = ARG3[ind][2] # yFit, runMedData = R_runmed_smooth_spline(x,", "+ j:] train.sort() ind += j # ----------------------------------------------- # ---fit", "CV--------------------------- L = len(x) N = L / K ##min", "import clock as c x1 = linspace(0, 30, 300) ##", "median absolute deviations instead of sum of squared residues #", "plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1,", "the last one (smoothest) if there are few ## print('spar", "# yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars)", "residualsTest)) # ----------------------------------------------- if useMAD: predErr = 1.4826 * median(abs(allResiduals))", "(smoothest) if there are few ## print('spar ', spar) return", "parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars) # compare", "seconds' % (t2 - t1)) subplot(211) plot(x1, y1, 'bo') plot(runMedData[0],", "random, array, median, zeros, arange, hstack from win32com.client import Dispatch", "= [j + 1 for j in Z[0:R]] # length", "# ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just", "SSE # is fit successful? # return isSuccessfulFit, yFit, yEval,", "= array([float(i) for i in pars['spar range'].split(',')]) sparStepsNum = int(pars['spar", "# length of the pieces random.shuffle(W) ind = 0 predErr", "Why do I need this??? # ---running through K training/testings-------------", "[yFit, yEval] else: isSuccessfulFit = False # ppmArrs = [[]", "/ sparStepsNum, 5) sparSet = arange(sparRange[0], sparRange[1], sparStep) predErrSet =", "----------------------------------------------------------------------- if __name__ == '__main__': from numpy import linspace, cos,", "+1.5]) subplot(212) plot(x1, y1 - yEval, 'go') ylim([-1.5, +1.5]) show()", "print('spar ', spar) return spar # ----------------------------------------------------------------------- # ----------------------------------------------------------------------- def", "# yFit = zeros(len(x), 'd') yEval = zeros(len(xEval), 'd') #", "= 'R_runmed_spline' useMAD = True # use median absolute deviations", "= predErr ## p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet == min(predErrSet)][-1]", "y1 = x1 * 0.03 y1 += random.normal(scale=0.2, size=y1.shape) ind", "Z = list(range(1, K + 1)) Z = [N for", "K + 1)) Z = [N for j in Z]", "lexsort(keys=(y1, x1)) x1 = x1[ind] y1 = y1[ind] t1 =", "+= sum(residualsTest ** 2) allResiduals = hstack((allResiduals, residualsTest)) # -----------------------------------------------", "i in pars['spar range'].split(',')]) sparStepsNum = int(pars['spar steps number']) sparStep", "% K Z[0:R] = [j + 1 for j in", "length of the pieces random.shuffle(W) ind = 0 predErr =", "pieces W = list(range(L)) Z = list(range(1, K + 1))", "show, subplot, savefig, clf, ylim from pprint import pprint as", "**pars) yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars)", "K training/testings------------- for val in Z: j = math.floor(val) #", "ppmArrs = [[] for i in range(len(ARG3))] for ind in", "returns the prediction error \"\"\" K = int(kwargs['K']) # --Related", "0.1]) t2 = c() print('done in %s seconds' % (t2", "# ppmArrs[ind] = [yFit, yEval] sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs", "2) allResiduals = hstack((allResiduals, residualsTest)) # ----------------------------------------------- if useMAD: predErr", "= L % K Z[0:R] = [j + 1 for", "in range(len(ARG3))] for ind in range(len(ARG3)): x = ARG3[ind][0] y", "% (t2 - t1)) subplot(211) plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1],", "xEval = ARG3[ind][2] # yFit = zeros(len(x), 'd') yEval =", "in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars) predErrSet[i]", "--Related to K-fold CV--------------------------- L = len(x) N = L", "y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1]) t2 = c()", "one (smoothest) if there are few ## print('spar ', spar)", "= 0 predErr = 0 allResiduals = array([]) SSE =", "through K training/testings------------- for val in Z: j = math.floor(val)", "j in Z[0:R]] # length of the pieces random.shuffle(W) ind", "# is fit successful? # return isSuccessfulFit, yFit, yEval, runMedData", "**pars) # ppmArrs[ind] = [yFit, yEval] else: isSuccessfulFit = False", "predErr += sum(residualsTest ** 2) allResiduals = hstack((allResiduals, residualsTest)) #", "2) # VLAD. Why do I need this??? # ---running", "# get the prediction error for this smoothing parameter bestPredErr", "300) ## y1 = cos(x1) ## y1 = zeros(len(x1),'d') #nice", "sum(y ** 2) MAD = 1.4826 * median(abs(y)) if useMAD:", "zeros(len(sparSet), 'd') for i in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y,", "math.floor(val) # ---making training/testing subsets------------- test = W[ind:ind + j]", "# ppmArrs = [[] for i in range(len(ARG3))] for ind", "+ 1)) Z = [N for j in Z] R", "ind += j # ----------------------------------------------- # ---fit runmed_spline here---------------------- yFit,", "# get the best smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y,", "SSE = sum(y ** 2) # VLAD. Why do I", "= zeros(len(x), 'd') yEval = zeros(len(xEval), 'd') # ppmArrs[ind] =", "[[] for i in range(len(ARG3))] for ind in range(len(ARG3)): x", "= L / K ##min length of pieces W =", "K = int(kwargs['K']) # --Related to K-fold CV--------------------------- L =", "\"\"\" just returns the prediction error \"\"\" K = int(kwargs['K'])", "# ----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange = array([float(i)", "+ W[ind + j:] train.sort() ind += j # -----------------------------------------------", "of the pieces random.shuffle(W) ind = 0 predErr = 0", "training/testings------------- for val in Z: j = math.floor(val) # ---making", "import plot, show, subplot, savefig, clf, ylim from pprint import", "False # ppmArrs = [[] for i in range(len(ARG3))] for", "\"\"\" K = int(kwargs['K']) # --Related to K-fold CV--------------------------- L", "ARG3[ind][2] # yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc,", "yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1, y1 - yEval, 'go')", "error for this smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar,", "the prediction error \"\"\" K = int(kwargs['K']) # --Related to", "residualsTest = y[test] - yFit predErr += sum(residualsTest ** 2)", "for i in pars['spar range'].split(',')]) sparStepsNum = int(pars['spar steps number'])", "L % K Z[0:R] = [j + 1 for j", "ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit =", "'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212)", "- sparRange[0]) / sparStepsNum, 5) sparSet = arange(sparRange[0], sparRange[1], sparStep)", "runMedData SSE = sum(y ** 2) MAD = 1.4826 *", "'d') # ppmArrs[ind] = [yFit, yEval] sc.Close() return isSuccessfulFit, bestPredErr,", "need this??? # ---running through K training/testings------------- for val in", "Z: j = math.floor(val) # ---making training/testing subsets------------- test =", "spar=bestSpar, sc=sc, **pars) # compare with original SSE # is", "return predErr # ----------------------------------------------------------------------- if __name__ == '__main__': from numpy", "j # ----------------------------------------------- # ---fit runmed_spline here---------------------- yFit, runMed =", "smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars) #", "y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit = zeros(len(x),", "from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline from numpy import random, array, median,", "use median absolute deviations instead of sum of squared residues", "[j + 1 for j in Z[0:R]] # length of", "if there are few ## print('spar ', spar) return spar", "sum(residualsTest ** 2) allResiduals = hstack((allResiduals, residualsTest)) # ----------------------------------------------- if", "isSuccessfulFit, yFit, yEval, runMedData, predErr = \\ R_runmed_spline_MAIN(x1, y1, x1,", "Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the best smoothing parameter bestSpar =", "for val in Z: j = math.floor(val) # ---making training/testing", "the pieces random.shuffle(W) ind = 0 predErr = 0 allResiduals", "**kwargs) residualsTest = y[test] - yFit predErr += sum(residualsTest **", "sc=sc, **pars) predErrSet[i] = predErr ## p(zip(sparSet, predErrSet)) spar =", "prediction error \"\"\" K = int(kwargs['K']) # --Related to K-fold", "* 0.03 y1 += random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1, x1))", "plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval, 'r+-') ylim([-1.5,", "= ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit", "print('done in %s seconds' % (t2 - t1)) subplot(211) plot(x1,", "range(len(ARG3)): x = ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2]", "in %s seconds' % (t2 - t1)) subplot(211) plot(x1, y1,", "if useMAD: SSE = MAD if bestPredErr < SSE: isSuccessfulFit", "y1 = y1[ind] t1 = c() isSuccessfulFit, yFit, yEval, runMedData,", "----------------------------------------------- # ---fit runmed_spline here---------------------- yFit, runMed = R_runmed_smooth_spline(x[train], y[train],", "----------------------------------------------------------------------- def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange = array([float(i) for", "runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars) # ppmArrs[ind]", "= lexsort(keys=(y1, x1)) x1 = x1[ind] y1 = y1[ind] t1", "for i in range(len(ARG3))] for ind in range(len(ARG3)): x =", "runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars) yEval, runMedData", "[yFit, yEval] sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs # ----------------------------------------------------------------------- #", "- t1)) subplot(211) plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1,", "= True # use median absolute deviations instead of sum", "range(len(ARG3))] for ind in range(len(ARG3)): x = ARG3[ind][0] y =", "R_runmed_smooth_spline from numpy import random, array, median, zeros, arange, hstack", "True # use median absolute deviations instead of sum of", "L / K ##min length of pieces W = list(range(L))", "# take the last one (smoothest) if there are few", "## y1 = zeros(len(x1),'d') #nice test y1 = x1 *", "sparRange=[0.6, 1.1, 0.1]) t2 = c() print('done in %s seconds'", "= c() isSuccessfulFit, yFit, yEval, runMedData, predErr = \\ R_runmed_spline_MAIN(x1,", "sparStepsNum, 5) sparSet = arange(sparRange[0], sparRange[1], sparStep) predErrSet = zeros(len(sparSet),", "train = W[0:ind] + W[ind + j:] train.sort() ind +=", "+ j] test.sort() train = W[0:ind] + W[ind + j:]", "= x1 * 0.03 y1 += random.normal(scale=0.2, size=y1.shape) ind =", "round((sparRange[1] - sparRange[0]) / sparStepsNum, 5) sparSet = arange(sparRange[0], sparRange[1],", "yFit = zeros(len(x), 'd') yEval = zeros(len(xEval), 'd') # ppmArrs[ind]", "prediction error for this smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y,", "the best smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars)", "= zeros(len(x1),'d') #nice test y1 = x1 * 0.03 y1", "yFit predErr += sum(residualsTest ** 2) allResiduals = hstack((allResiduals, residualsTest))", "= ARG3[ind][0] y = ARG3[ind][1] xEval = ARG3[ind][2] # yFit,", "allResiduals = hstack((allResiduals, residualsTest)) # ----------------------------------------------- if useMAD: predErr =", "import math myName = 'R_runmed_spline' useMAD = True # use", "SSE: isSuccessfulFit = True # ppmArrs = [[] for i", "'d') for i in range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i],", "= False # ppmArrs = [[] for i in range(len(ARG3))]", "spar=bestSpar, sc=sc, **pars) # ppmArrs[ind] = [yFit, yEval] else: isSuccessfulFit", "hstack from win32com.client import Dispatch import math myName = 'R_runmed_spline'", "subplot, savefig, clf, ylim from pprint import pprint as p", "subplot(211) plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1], 'y^') plot(x1, yEval, 'r+-')", "= round((sparRange[1] - sparRange[0]) / sparStepsNum, 5) sparSet = arange(sparRange[0],", "parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) # get the", "True # ppmArrs = [[] for i in range(len(ARG3))] for", "## y1 = cos(x1) ## y1 = zeros(len(x1),'d') #nice test", "sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the best smoothing parameter", "runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1]) t2 = c() print('done in", "sum of squared residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars", "= y1[ind] t1 = c() isSuccessfulFit, yFit, yEval, runMedData, predErr", "= x1[ind] y1 = y1[ind] t1 = c() isSuccessfulFit, yFit,", "(t2 - t1)) subplot(211) plot(x1, y1, 'bo') plot(runMedData[0], runMedData[1], 'y^')", "runMedData[1], 'y^') plot(x1, yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1, y1", "= R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars) yEval, runMedData =", "MAD if bestPredErr < SSE: isSuccessfulFit = True # ppmArrs", "isSuccessfulFit = False # ppmArrs = [[] for i in", "x[test], **kwargs) residualsTest = y[test] - yFit predErr += sum(residualsTest", "ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the best smoothing", "this??? # ---running through K training/testings------------- for val in Z:", "# ppmArrs[ind] = [yFit, yEval] else: isSuccessfulFit = False #", "x1 * 0.03 y1 += random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1,", "sparStepsNum = int(pars['spar steps number']) sparStep = round((sparRange[1] - sparRange[0])", "K=10, sparRange=[0.6, 1.1, 0.1]) t2 = c() print('done in %s", "'y^') plot(x1, yEval, 'r+-') ylim([-1.5, +1.5]) subplot(212) plot(x1, y1 -", "- yFit predErr += sum(residualsTest ** 2) allResiduals = hstack((allResiduals,", "/ K ##min length of pieces W = list(range(L)) Z", "from win32com.client import Dispatch import math myName = 'R_runmed_spline' useMAD", "sc=sc, **pars) # get the prediction error for this smoothing", "savefig, clf, ylim from pprint import pprint as p from", "x1)) x1 = x1[ind] y1 = y1[ind] t1 = c()", "len(x) N = L / K ##min length of pieces", "range(len(sparSet)): predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars) predErrSet[i] =", "predErr = 1.4826 * median(abs(allResiduals)) return predErr # ----------------------------------------------------------------------- if", "sc.Init(\"R\") # get the best smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x,", "= \\ R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1])", "= Dispatch(\"StatConnectorSrv.StatConnector\") sc.Init(\"R\") # get the best smoothing parameter bestSpar", "sparSet = arange(sparRange[0], sparRange[1], sparStep) predErrSet = zeros(len(sparSet), 'd') for", "for this smoothing parameter bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc,", "R_runmed_spline_KCV_predErr(x, y, **kwargs): \"\"\" just returns the prediction error \"\"\"", "ind in range(len(ARG3)): x = ARG3[ind][0] y = ARG3[ind][1] xEval", "test y1 = x1 * 0.03 y1 += random.normal(scale=0.2, size=y1.shape)", "original SSE # is fit successful? # return isSuccessfulFit, yFit,", "predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars) predErrSet[i] = predErr", "= ARG3[ind][2] # yFit = zeros(len(x), 'd') yEval = zeros(len(xEval),", "= sum(y ** 2) # VLAD. Why do I need", "j = math.floor(val) # ---making training/testing subsets------------- test = W[ind:ind", "def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars): sparRange = array([float(i) for i", "cos(x1) ## y1 = zeros(len(x1),'d') #nice test y1 = x1", "aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline from numpy import random, array, median, zeros,", "just returns the prediction error \"\"\" K = int(kwargs['K']) #", "= [yFit, yEval] sc.Close() return isSuccessfulFit, bestPredErr, ppmArrs # -----------------------------------------------------------------------", "y[train], x[test], **kwargs) residualsTest = y[test] - yFit predErr +=", "if useMAD: predErr = 1.4826 * median(abs(allResiduals)) return predErr #", "= 1.4826 * median(abs(allResiduals)) return predErr # ----------------------------------------------------------------------- if __name__", "'R_runmed_spline' useMAD = True # use median absolute deviations instead", "sparRange = array([float(i) for i in pars['spar range'].split(',')]) sparStepsNum =", "yFit, yEval, runMedData SSE = sum(y ** 2) MAD =", "linspace, cos, lexsort, zeros, sin from pylab import plot, show,", "best smoothing parameter bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) #", "R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars) # compare with original SSE", "= 1.4826 * median(abs(y)) if useMAD: SSE = MAD if", "yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars) yEval,", "predErrSet[i] = predErr ## p(zip(sparSet, predErrSet)) spar = sparSet[predErrSet ==", "**pars): sparRange = array([float(i) for i in pars['spar range'].split(',')]) sparStepsNum", "t1 = c() isSuccessfulFit, yFit, yEval, runMedData, predErr = \\", "= [[] for i in range(len(ARG3))] for ind in range(len(ARG3)):", "y1 = cos(x1) ## y1 = zeros(len(x1),'d') #nice test y1", "0.03 y1 += random.normal(scale=0.2, size=y1.shape) ind = lexsort(keys=(y1, x1)) x1", "return isSuccessfulFit, yFit, yEval, runMedData SSE = sum(y ** 2)", "ARG3 x = ARG3[0][0] y = ARG3[0][1] sc = Dispatch(\"StatConnectorSrv.StatConnector\")", "----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x", "linspace(0, 30, 300) ## y1 = cos(x1) ## y1 =", "from pprint import pprint as p from time import clock", "Controller.updatedSettings['refiningPars']['regressionSettings'][myName] # ARG3 x = ARG3[0][0] y = ARG3[0][1] sc", "x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1]) t2 = c() print('done", "y, x, spar=bestSpar, sc=sc, **pars) yEval, runMedData = R_runmed_smooth_spline(x, y,", "array([float(i) for i in pars['spar range'].split(',')]) sparStepsNum = int(pars['spar steps", "of squared residues # ----------------------------------------------------------------------- def R_runmed_spline_MAIN(ARG3, Controller): pars =", "K Z[0:R] = [j + 1 for j in Z[0:R]]", "xEval = ARG3[ind][2] # yFit, runMedData = R_runmed_smooth_spline(x, y, x,", "few ## print('spar ', spar) return spar # ----------------------------------------------------------------------- #", "= 0 allResiduals = array([]) SSE = sum(y ** 2)", "R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars) # get the prediction error for", "# use median absolute deviations instead of sum of squared", "compare with original SSE # is fit successful? # return", "ARG3[ind][1] xEval = ARG3[ind][2] # yFit, runMedData = R_runmed_smooth_spline(x, y,", "sc, **pars): sparRange = array([float(i) for i in pars['spar range'].split(',')])", "last one (smoothest) if there are few ## print('spar '," ]
[ "= None import setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD = False", "2.7', 'Programming Language :: Python :: 3.4', 'Programming Language ::", "resolve package version; ' 'this should never happen') setup( name='zstandard',", "buffer types # (like memoryview). # Need feature in 1.11", "SUPPORT_LEGACY = True sys.argv.remove('--legacy') if '--system-zstd' in sys.argv: SYSTEM_ZSTD =", "= True sys.argv.remote('--warning-as-errors') # Code for obtaining the Extension instance", "setup try: import cffi except ImportError: cffi = None import", "reuse in other projects. extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD,", "ffi.gc() to declare size of objects so we avoid #", ":: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language", "except ImportError: cffi = None import setup_zstd SUPPORT_LEGACY = False", "True sys.argv.remote('--warning-as-errors') # Code for obtaining the Extension instance is", "modified and distributed under the terms # of the BSD", ":: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language", "(like memoryview). # Need feature in 1.11 for ffi.gc() to", "continue version = line.split()[2][1:-1] break if not version: raise Exception('could", "if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1] break if", "version = None with open('c-ext/python-zstandard.h', 'r') as fh: for line", "# All rights reserved. # # This software may be", "'--legacy' in sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy') if '--system-zstd' in", "WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') # Code for obtaining the Extension", "= True if '--legacy' in sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy')", "ffi.from_buffer() to handle all buffer types # (like memoryview). #", "in other projects. extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS),", "version=version, description='Zstandard bindings for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>',", "with open('c-ext/python-zstandard.h', 'r') as fh: for line in fh: if", "Approved :: BSD License', 'Programming Language :: C', 'Programming Language", "Extension instance is in its own module to # facilitate", "sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') # Code for obtaining the", "instance is in its own module to # facilitate reuse", "WARNINGS_AS_ERRORS = True if '--legacy' in sys.argv: SUPPORT_LEGACY = True", "facilitate reuse in other projects. extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY,", "[ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = [] if", "url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status :: 4 -", "Language :: C', 'Programming Language :: Python :: 2.7', 'Programming", "See the LICENSE file for details. import os import sys", "= False WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS =", "'License :: OSI Approved :: BSD License', 'Programming Language ::", "should never happen') setup( name='zstandard', version=version, description='Zstandard bindings for Python',", ":: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='zstandard", "software may be modified and distributed under the terms #", "to # facilitate reuse in other projects. extensions = [", "support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = [] if cffi: import", "1.10 for ffi.from_buffer() to handle all buffer types # (like", "'--system-zstd' in sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in", "sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') #", "import cffi except ImportError: cffi = None import setup_zstd SUPPORT_LEGACY", "#!/usr/bin/env python # Copyright (c) 2016-present, <NAME> # All rights", "may be modified and distributed under the terms # of", "if '--legacy' in sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy') if '--system-zstd'", "in sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') # Code for obtaining", "Python :: 3.5', 'Programming Language :: Python :: 3.6', ],", "bindings for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[", "'r') as fh: for line in fh: if not line.startswith('#define", "is in its own module to # facilitate reuse in", "module to # facilitate reuse in other projects. extensions =", "package version; ' 'this should never happen') setup( name='zstandard', version=version,", "None import setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS", ":: Python :: 3.4', 'Programming Language :: Python :: 3.5',", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "C', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "install_requires.append('cffi>=1.11') version = None with open('c-ext/python-zstandard.h', 'r') as fh: for", "own module to # facilitate reuse in other projects. extensions", "reserved. # # This software may be modified and distributed", "import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change in 1.10 for ffi.from_buffer()", "install_requires = [] if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need", "# (like memoryview). # Need feature in 1.11 for ffi.gc()", "warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = [] if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension())", "long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status ::", "'Intended Audience :: Developers', 'License :: OSI Approved :: BSD", "[] if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change in", "line in fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version =", "True if '--legacy' in sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy') if", "under the terms # of the BSD license. See the", "import os import sys from setuptools import setup try: import", "All rights reserved. # # This software may be modified", "terms # of the BSD license. See the LICENSE file", "collection pitfalls. install_requires.append('cffi>=1.11') version = None with open('c-ext/python-zstandard.h', 'r') as", "its own module to # facilitate reuse in other projects.", "# Code for obtaining the Extension instance is in its", "# This software may be modified and distributed under the", "None with open('c-ext/python-zstandard.h', 'r') as fh: for line in fh:", "# Copyright (c) 2016-present, <NAME> # All rights reserved. #", "distributed under the terms # of the BSD license. See", "be modified and distributed under the terms # of the", ":: OSI Approved :: BSD License', 'Programming Language :: C',", "# facilitate reuse in other projects. extensions = [ setup_zstd.get_c_extension(name='zstd',", "# Need change in 1.10 for ffi.from_buffer() to handle all", "license. See the LICENSE file for details. import os import", "Need change in 1.10 for ffi.from_buffer() to handle all buffer", "the terms # of the BSD license. See the LICENSE", "fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1] break", "version = line.split()[2][1:-1] break if not version: raise Exception('could not", "fh: for line in fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue", "cffi except ImportError: cffi = None import setup_zstd SUPPORT_LEGACY =", "to handle all buffer types # (like memoryview). # Need", "- Beta', 'Intended Audience :: Developers', 'License :: OSI Approved", "] install_requires = [] if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) #", "setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS = False", "to declare size of objects so we avoid # garbage", "if not version: raise Exception('could not resolve package version; '", "try: import cffi except ImportError: cffi = None import setup_zstd", "Language :: Python :: 2.7', 'Programming Language :: Python ::", "details. import os import sys from setuptools import setup try:", "True sys.argv.remove('--legacy') if '--system-zstd' in sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd')", "obtaining the Extension instance is in its own module to", "Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming", ":: Developers', 'License :: OSI Approved :: BSD License', 'Programming", "Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming", "Language :: Python :: 3.5', 'Programming Language :: Python ::", "garbage collection pitfalls. install_requires.append('cffi>=1.11') version = None with open('c-ext/python-zstandard.h', 'r')", ":: Python :: 3.6', ], keywords='zstandard zstd compression', packages=['zstandard'], ext_modules=extensions,", "= [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = []", "'--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') # Code for", "# garbage collection pitfalls. install_requires.append('cffi>=1.11') version = None with open('c-ext/python-zstandard.h',", "sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy') if '--system-zstd' in sys.argv: SYSTEM_ZSTD", "objects so we avoid # garbage collection pitfalls. install_requires.append('cffi>=1.11') version", "in sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv:", "sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS", "SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS =", "Need feature in 1.11 for ffi.gc() to declare size of", "in fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1]", "avoid # garbage collection pitfalls. install_requires.append('cffi>=1.11') version = None with", "'Development Status :: 4 - Beta', 'Intended Audience :: Developers',", "(c) 2016-present, <NAME> # All rights reserved. # # This", "2016-present, <NAME> # All rights reserved. # # This software", "BSD license. See the LICENSE file for details. import os", "so we avoid # garbage collection pitfalls. install_requires.append('cffi>=1.11') version =", "= [] if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change", "Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status", "break if not version: raise Exception('could not resolve package version;", "never happen') setup( name='zstandard', version=version, description='Zstandard bindings for Python', long_description=open('README.rst',", "as fh: for line in fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python", "Beta', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "Copyright (c) 2016-present, <NAME> # All rights reserved. # #", "not version: raise Exception('could not resolve package version; ' 'this", "3.5', 'Programming Language :: Python :: 3.6', ], keywords='zstandard zstd", ":: BSD License', 'Programming Language :: C', 'Programming Language ::", "setup( name='zstandard', version=version, description='Zstandard bindings for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard',", "# Need feature in 1.11 for ffi.gc() to declare size", "'Programming Language :: Python :: 3.6', ], keywords='zstandard zstd compression',", "for ffi.from_buffer() to handle all buffer types # (like memoryview).", "This software may be modified and distributed under the terms", "raise Exception('could not resolve package version; ' 'this should never", "'this should never happen') setup( name='zstandard', version=version, description='Zstandard bindings for", "Language :: Python :: 3.6', ], keywords='zstandard zstd compression', packages=['zstandard'],", "'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status :: 4", "make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change in 1.10 for ffi.from_buffer() to", "setuptools import setup try: import cffi except ImportError: cffi =", "SUPPORT_LEGACY = False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS = False if", "False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''):", "= True sys.argv.remove('--legacy') if '--system-zstd' in sys.argv: SYSTEM_ZSTD = True", "not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1] break if not", "projects. extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires", "author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status :: 4 - Beta',", "= False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True if '--legacy'", "WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True if", "if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors') # Code", "# # This software may be modified and distributed under", "= True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS = True", "ImportError: cffi = None import setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD", "cffi = None import setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD =", "for obtaining the Extension instance is in its own module", "file for details. import os import sys from setuptools import", "setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = [] if cffi:", "types # (like memoryview). # Need feature in 1.11 for", "4 - Beta', 'Intended Audience :: Developers', 'License :: OSI", "'Programming Language :: Python :: 3.4', 'Programming Language :: Python", "if cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change in 1.10", "change in 1.10 for ffi.from_buffer() to handle all buffer types", "for ffi.gc() to declare size of objects so we avoid", "name='zstandard', version=version, description='Zstandard bindings for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>',", "Developers', 'License :: OSI Approved :: BSD License', 'Programming Language", "description='Zstandard bindings for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD',", "LICENSE file for details. import os import sys from setuptools", "feature in 1.11 for ffi.gc() to declare size of objects", "for line in fh: if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version", "import setup try: import cffi except ImportError: cffi = None", "we avoid # garbage collection pitfalls. install_requires.append('cffi>=1.11') version = None", "extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires =", "os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True if '--legacy' in sys.argv: SUPPORT_LEGACY", "python # Copyright (c) 2016-present, <NAME> # All rights reserved.", "system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ] install_requires = [] if cffi: import make_cffi", "line.split()[2][1:-1] break if not version: raise Exception('could not resolve package", "sys.argv.remote('--warning-as-errors') # Code for obtaining the Extension instance is in", "1.11 for ffi.gc() to declare size of objects so we", "= None with open('c-ext/python-zstandard.h', 'r') as fh: for line in", "cffi: import make_cffi extensions.append(make_cffi.ffi.distutils_extension()) # Need change in 1.10 for", "handle all buffer types # (like memoryview). # Need feature", "SYSTEM_ZSTD = False WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS", "size of objects so we avoid # garbage collection pitfalls.", "for Python', long_description=open('README.rst', 'r').read(), url='https://github.com/indygreg/python-zstandard', author='<NAME>', author_email='<EMAIL>', license='BSD', classifiers=[ 'Development", "license='BSD', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience", "pitfalls. install_requires.append('cffi>=1.11') version = None with open('c-ext/python-zstandard.h', 'r') as fh:", "False WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True", "Exception('could not resolve package version; ' 'this should never happen')", "BSD License', 'Programming Language :: C', 'Programming Language :: Python", "if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True if '--legacy' in sys.argv:", "and distributed under the terms # of the BSD license.", "author_email='<EMAIL>', license='BSD', classifiers=[ 'Development Status :: 4 - Beta', 'Intended", "''): WARNINGS_AS_ERRORS = True if '--legacy' in sys.argv: SUPPORT_LEGACY =", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "os import sys from setuptools import setup try: import cffi", "from setuptools import setup try: import cffi except ImportError: cffi", "False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''): WARNINGS_AS_ERRORS = True if '--legacy' in", "not resolve package version; ' 'this should never happen') setup(", ":: 3.6', ], keywords='zstandard zstd compression', packages=['zstandard'], ext_modules=extensions, test_suite='tests', install_requires=install_requires,", "extensions.append(make_cffi.ffi.distutils_extension()) # Need change in 1.10 for ffi.from_buffer() to handle", "in 1.11 for ffi.gc() to declare size of objects so", "version; ' 'this should never happen') setup( name='zstandard', version=version, description='Zstandard", "in sys.argv: SUPPORT_LEGACY = True sys.argv.remove('--legacy') if '--system-zstd' in sys.argv:", ":: C', 'Programming Language :: Python :: 2.7', 'Programming Language", ":: Python :: 2.7', 'Programming Language :: Python :: 3.4',", "Code for obtaining the Extension instance is in its own", "for details. import os import sys from setuptools import setup", "if '--system-zstd' in sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if '--warnings-as-errors'", "import setup_zstd SUPPORT_LEGACY = False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS =", "OSI Approved :: BSD License', 'Programming Language :: C', 'Programming", "Python :: 3.6', ], keywords='zstandard zstd compression', packages=['zstandard'], ext_modules=extensions, test_suite='tests',", "sys.argv.remove('--legacy') if '--system-zstd' in sys.argv: SYSTEM_ZSTD = True sys.argv.remove('--system-zstd') if", "in its own module to # facilitate reuse in other", "Language :: Python :: 3.4', 'Programming Language :: Python ::", "3.6', ], keywords='zstandard zstd compression', packages=['zstandard'], ext_modules=extensions, test_suite='tests', install_requires=install_requires, )", "the Extension instance is in its own module to #", "Audience :: Developers', 'License :: OSI Approved :: BSD License',", "declare size of objects so we avoid # garbage collection", "classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience ::", "3.4', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "of objects so we avoid # garbage collection pitfalls. install_requires.append('cffi>=1.11')", "in 1.10 for ffi.from_buffer() to handle all buffer types #", "Status :: 4 - Beta', 'Intended Audience :: Developers', 'License", "= False SYSTEM_ZSTD = False WARNINGS_AS_ERRORS = False if os.environ.get('ZSTD_WARNINGS_AS_ERRORS',", "rights reserved. # # This software may be modified and", "happen') setup( name='zstandard', version=version, description='Zstandard bindings for Python', long_description=open('README.rst', 'r').read(),", "open('c-ext/python-zstandard.h', 'r') as fh: for line in fh: if not", "line.startswith('#define PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1] break if not version:", ":: 4 - Beta', 'Intended Audience :: Developers', 'License ::", "the BSD license. See the LICENSE file for details. import", "version: raise Exception('could not resolve package version; ' 'this should", "of the BSD license. See the LICENSE file for details.", "import sys from setuptools import setup try: import cffi except", "' 'this should never happen') setup( name='zstandard', version=version, description='Zstandard bindings", "all buffer types # (like memoryview). # Need feature in", "License', 'Programming Language :: C', 'Programming Language :: Python ::", "PYTHON_ZSTANDARD_VERSION'): continue version = line.split()[2][1:-1] break if not version: raise", "the LICENSE file for details. import os import sys from", "True sys.argv.remove('--system-zstd') if '--warnings-as-errors' in sys.argv: WARNINGS_AS_ERRORS = True sys.argv.remote('--warning-as-errors')", "other projects. extensions = [ setup_zstd.get_c_extension(name='zstd', support_legacy=SUPPORT_LEGACY, system_zstd=SYSTEM_ZSTD, warnings_as_errors=WARNINGS_AS_ERRORS), ]", "memoryview). # Need feature in 1.11 for ffi.gc() to declare", "# of the BSD license. See the LICENSE file for", "= line.split()[2][1:-1] break if not version: raise Exception('could not resolve", "sys from setuptools import setup try: import cffi except ImportError:", "<NAME> # All rights reserved. # # This software may", "'Programming Language :: C', 'Programming Language :: Python :: 2.7'," ]
[ "import math num = float(input('Digite um numero real qualquer: '))", "numero real qualquer: ')) print('O numero: {} tem a parte", "<gh_stars>0 import math num = float(input('Digite um numero real qualquer:", "')) print('O numero: {} tem a parte inteira {}'.format(num, math.trunc(num)))", "math num = float(input('Digite um numero real qualquer: ')) print('O", "qualquer: ')) print('O numero: {} tem a parte inteira {}'.format(num,", "um numero real qualquer: ')) print('O numero: {} tem a", "num = float(input('Digite um numero real qualquer: ')) print('O numero:", "float(input('Digite um numero real qualquer: ')) print('O numero: {} tem", "= float(input('Digite um numero real qualquer: ')) print('O numero: {}", "real qualquer: ')) print('O numero: {} tem a parte inteira" ]
[ "rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling __all__", "Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.", "from .rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling __all__ =", "(c) Facebook, Inc. and its affiliates. All Rights Reserved. import", "# Copyright (c) Facebook, Inc. and its affiliates. All Rights", "from .active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding", "<filename>mmdet/ops/orn/functions/__init__.py # Copyright (c) Facebook, Inc. and its affiliates. All", ".rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling import", "Reserved. import torch from .active_rotating_filter import active_rotating_filter from .active_rotating_filter import", ".rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling __all__ = ['ActiveRotatingFilter',", "import active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding", "affiliates. All Rights Reserved. import torch from .active_rotating_filter import active_rotating_filter", ".active_rotating_filter import active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding import", "RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling __all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding',", ".active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding import", "from .active_rotating_filter import active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding", "ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding from", "import rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling", "and its affiliates. All Rights Reserved. import torch from .active_rotating_filter", "import RotationInvariantEncoding from .rotation_invariant_pooling import RotationInvariantPooling __all__ = ['ActiveRotatingFilter', 'active_rotating_filter',", "Inc. and its affiliates. All Rights Reserved. import torch from", "active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding from", "All Rights Reserved. import torch from .active_rotating_filter import active_rotating_filter from", "from .rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding from .rotation_invariant_pooling", "import torch from .active_rotating_filter import active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter", "its affiliates. All Rights Reserved. import torch from .active_rotating_filter import", "Facebook, Inc. and its affiliates. All Rights Reserved. import torch", "Rights Reserved. import torch from .active_rotating_filter import active_rotating_filter from .active_rotating_filter", "from .rotation_invariant_pooling import RotationInvariantPooling __all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding', 'RotationInvariantEncoding',", ".rotation_invariant_pooling import RotationInvariantPooling __all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding', 'RotationInvariantEncoding', 'RotationInvariantPooling']", "import ActiveRotatingFilter from .rotation_invariant_encoding import rotation_invariant_encoding from .rotation_invariant_encoding import RotationInvariantEncoding", "torch from .active_rotating_filter import active_rotating_filter from .active_rotating_filter import ActiveRotatingFilter from" ]
[ "\"test docstring\" pass assert testfn.__doc__ == \"test docstring\" assert testfn.__name__", "@cacheit def testfn(): \"test docstring\" pass assert testfn.__doc__ == \"test", "import cacheit def test_cacheit_doc(): @cacheit def testfn(): \"test docstring\" pass", "from sympy.core.cache import cacheit def test_cacheit_doc(): @cacheit def testfn(): \"test", "def testfn(): \"test docstring\" pass assert testfn.__doc__ == \"test docstring\"", "pass assert testfn.__doc__ == \"test docstring\" assert testfn.__name__ == \"testfn\"", "def test_cacheit_doc(): @cacheit def testfn(): \"test docstring\" pass assert testfn.__doc__", "cacheit def test_cacheit_doc(): @cacheit def testfn(): \"test docstring\" pass assert", "docstring\" pass assert testfn.__doc__ == \"test docstring\" assert testfn.__name__ ==", "<reponame>eriknw/sympy from sympy.core.cache import cacheit def test_cacheit_doc(): @cacheit def testfn():", "testfn(): \"test docstring\" pass assert testfn.__doc__ == \"test docstring\" assert", "sympy.core.cache import cacheit def test_cacheit_doc(): @cacheit def testfn(): \"test docstring\"", "test_cacheit_doc(): @cacheit def testfn(): \"test docstring\" pass assert testfn.__doc__ ==" ]
[ "of ii among positives in sorted order rank_pos_target = torch.sum(target_sorted_order)", "minimum fg logit #since changing its score does not have", "current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the target sorted order", "ranking_error[ii]=FP_num/rank # Current sorting error of example ii. (Eq. 7)", "AP and store gradients for relevant bg examples if (max_prec<=current_prec):", "pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number", "score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function and", "#Compute interpolated AP and store gradients for relevant bg examples", "among positives in sorted order rank_pos_target = torch.sum(target_sorted_order) #Compute target", "for Ranking Error if FP_num > eps: #For ii the", "with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total since it", "+= (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that are misranked (the cause", "= (targets == 1) fg_logits = logits[fg_labels] fg_num = len(fg_logits)", "Rank of ii among pos and false positive number (bg", "aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision for this example current_prec=rank_pos/rank[ii]", "bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of the example within fgs and", "is 0, this is also total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target", "aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda()", "sorting_error[ii] #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)", "of ii among pos and false positive number (bg with", "order for ii in order: #x_ij s as score differences", "the ranking error fg_grad[ii] -= ranking_error[ii] #For negatives, distribute error", "= torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda()", "classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def backward(ctx,", "* fg_relations #The rank of ii among positives in sorted", "sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update for Sorting Error if sorting_pmf_denom", "target_sorted_order = iou_relations * fg_relations #The rank of ii among", "current_sorting_error - target_sorting_error #Identity Update for Ranking Error if FP_num", "rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total since it is normalizer also", "missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number of positives", "classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod", "return g1*out_grad1, None, None, None, None class APLoss(torch.autograd.Function): @staticmethod def", "differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function and determine", "the sorting error fg_grad[ii] -= sorting_error[ii] #For positives, distribute error", "formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads)", "distribute error via ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))", "cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def backward(ctx, out_grad1): g1, =ctx.saved_tensors", "misranked (the cause of the error) #These are the ones", "smaller IoU but larger logits missorted_examples = (~ iou_relations) *", "None, None class APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta=1.):", "negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad +=", "positive following the order for ii in order: # Difference", "> eps: #For ii the update is the ranking error", "error on example ii sorting_error[ii] = current_sorting_error - target_sorting_error #Identity", "in rank_pos fg_relations[ii]=0 #x_ij s as score differences with bgs", "that are misranked (the cause of the error) #These are", "= torch.min(fg_logits)-delta #Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda()", ">= fg_targets[ii]) target_sorted_order = iou_relations * fg_relations #The rank of", "#Store the total since it is normalizer also for aLRP", "bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return", "sorting error fg_grad[ii] -= sorting_error[ii] #For positives, distribute error via", "the target sorted order for example ii iou_relations = (fg_targets", "FP_num=torch.sum(bg_relations) # Rank of ii among all examples rank=rank_pos+FP_num #", "compute classification loss prec[ii]=rank_pos/rank[ii] #For stability, set eps to a", "fg_targets[ii]) target_sorted_order = iou_relations * fg_relations #The rank of ii", "ii in order: # Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if", "fg_labels = (targets > 0.) fg_logits = logits[fg_labels] fg_targets =", "= (fg_relations >= 0).float() bg_relations = (bg_relations >= 0).float() #", "+= (bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec)", "Update for Sorting Error if sorting_pmf_denom > eps: #For ii", "scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of ii among all examples", "number of bgs with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the", "ii iou_relations = (fg_targets >= fg_targets[ii]) target_sorted_order = iou_relations *", "if sorting_pmf_denom > eps: #For ii the update is the", "since it is normalizer also for aLRP Regression error rank[ii]=rank_pos+FP_num", "#For ii the update is the ranking error fg_grad[ii] -=", "grads if FP_num > eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad +=", "differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function and determine", "rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of ii among all examples rank=rank_pos+FP_num", "delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets >", "relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients", "sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits order=torch.argsort(fg_logits) #Loops over", "bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations >= 0).float() bg_relations = (bg_relations", "rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg logits order=torch.argsort(fg_logits) #Loops", "error fg_grad[ii] -= ranking_error[ii] #For negatives, distribute error via ranking", "over each positive following the order for ii in order:", "is also total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error", "of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean()", "torch.min(fg_logits)-delta #Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda()", "this example to compute classification loss prec[ii]=rank_pos/rank[ii] #For stability, set", "fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg logits order=torch.argsort(fg_logits) #Loops over each", "/= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank, order @staticmethod def", "logits order=torch.argsort(fg_logits) #Loops over each positive following the order for", "out_grad2): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None class aLRPLoss(torch.autograd.Function):", "of sorting pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update for Sorting", "(i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that are", "Error if sorting_pmf_denom > eps: #For ii the update is", "#Identity Update for Ranking Error if FP_num > eps: #For", "1e-6), then compute grads if FP_num > eps: fg_grad[ii] =", "<gh_stars>0 import torch class RankSort(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets,", "fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad formulation fg gradient classification_grads[fg_labels]= fg_grad", "total since it is normalizer also for aLRP Regression error", "= (bg_relations >= 0).float() # Rank of ii among pos", "backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None", "iou_relations) * fg_relations #Denominotor of sorting pmf sorting_pmf_denom = torch.sum(missorted_examples)", "# Current sorting error of example ii. (Eq. 7) current_sorting_error", "targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets", "#Find examples in the target sorted order for example ii", "(fg_targets >= fg_targets[ii]) target_sorted_order = iou_relations * fg_relations #The rank", "bg_relations = (bg_relations >= 0).float() # Rank of ii among", "def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits", "normalizer also for aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision for", "= torch.sum(missorted_examples) #Identity Update for Sorting Error if sorting_pmf_denom >", "on precision threshold_logit = torch.min(fg_logits)-delta #Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit))", "len(fg_logits) #Do not use bg with scores less than minimum", "formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads)", "changing its score does not have an effect on precision", "error rank[ii]=rank_pos+FP_num #Compute precision for this example to compute classification", "delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets ==", "eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad", "relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda()", "for this example current_prec=rank_pos/rank[ii] #Compute interpolated AP and store gradients", "> 0.) fg_logits = logits[fg_labels] fg_targets = targets[fg_labels] fg_num =", "then compute grads if FP_num > eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]", "forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits", "rank, order @staticmethod def backward(ctx, out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors", "set eps to a infinitesmall value (e.g. 1e-6), then compute", "each positive following the order for ii in order: #", "fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations", "Ranking Error if FP_num > eps: #For ii the update", "eps: #For ii the update is the sorting error fg_grad[ii]", "(targets == 1) fg_logits = logits[fg_labels] fg_num = len(fg_logits) #Do", "this example current_prec=rank_pos/rank[ii] #Compute interpolated AP and store gradients for", "#aLRP with grad formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP with", "order=torch.argsort(fg_logits) #Loops over each positive following the order for ii", "if FP_num > eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))", "in the target sorted order for example ii iou_relations =", "(max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store", ">= 0).float() # Rank of ii among pos and false", "def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg", "classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod def", "ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors return", "target sorting error. (Eq. 8) #Since target ranking error is", "among pos and false positive number (bg with larger scores)", "ii. target_ranking_error is always 0. (Eq. 7) ranking_error[ii]=FP_num/rank # Current", "with smaller IoU but larger logits missorted_examples = (~ iou_relations)", "positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad +=", "function and determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in", "IoU but larger logits missorted_examples = (~ iou_relations) * fg_relations", "for ii in order: # Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii]", "via ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the", "logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels =", "(~ iou_relations) * fg_relations #Denominotor of sorting pmf sorting_pmf_denom =", "out_grad1, out_grad2): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None class", "fg_logits = logits[fg_labels] fg_num = len(fg_logits) #Do not use bg", "fg_relations = (fg_relations >= 0).float() bg_relations = (bg_relations >= 0).float()", "fg_relations #Denominotor of sorting pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update", "not have an effect on precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0)", "always 0. (Eq. 7) ranking_error[ii]=FP_num/rank # Current sorting error of", "rank of the example within fgs and number of bgs", "ii in order: #x_ij s as score differences with fgs", "#Compute precision for this example current_prec=rank_pos/rank[ii] #Compute interpolated AP and", "positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod", "+= (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation fg gradient classification_grads[fg_labels]= fg_grad", "(Eq. 8) #Since target ranking error is 0, this is", "torch class RankSort(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):", "example ii sorting_error[ii] = current_sorting_error - target_sorting_error #Identity Update for", "sorting error of example ii. (Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos", "with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations", "# Rank of ii among all examples rank=rank_pos+FP_num # Ranking", "FP_num > eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP", "torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the target sorted order for example", "#Compute precision for this example to compute classification loss prec[ii]=rank_pos/rank[ii]", "interpolated AP and store gradients for relevant bg examples if", "piecewise linear function and determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute", "@staticmethod def backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors return g1*out_grad1, None,", "ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx, out_grad1, out_grad2): g1,", "in order: #x_ij s as score differences with fgs fg_relations=fg_logits-fg_logits[ii]", "ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def backward(ctx, out_grad1): g1, =ctx.saved_tensors return", "is the sorting error fg_grad[ii] -= sorting_error[ii] #For positives, distribute", "bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of the example within fgs", "import torch class RankSort(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta_RS=0.50,", "Regression error rank[ii]=rank_pos+FP_num #Compute precision for this example to compute", "classification_grads[fg_labels]= fg_grad #aLRP with grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad", "i=j in the summation in rank_pos fg_relations[ii]=0 #x_ij s as", "-(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation fg gradient", "error) #These are the ones with smaller IoU but larger", "= (~ iou_relations) * fg_relations #Denominotor of sorting pmf sorting_pmf_denom", "fg logits fg_labels = (targets > 0.) fg_logits = logits[fg_labels]", "(bg_relations >= 0).float() # Rank of ii among pos and", "relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg logits order=torch.argsort(fg_logits)", "#Apply piecewise linear function and determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)", "larger logits missorted_examples = (~ iou_relations) * fg_relations #Denominotor of", "number of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(),", "rank[ii]=rank_pos+FP_num #Compute precision for this example to compute classification loss", "= targets[fg_labels] fg_num = len(fg_logits) #Do not use bg with", "the update is the ranking error fg_grad[ii] -= ranking_error[ii] #For", "FP_num > eps: #For ii the update is the ranking", "examples rank=rank_pos+FP_num # Ranking error of example ii. target_ranking_error is", "# Rank of ii among pos and false positive number", "None, None, None class APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets,", "order for example ii iou_relations = (fg_targets >= fg_targets[ii]) target_sorted_order", "(the cause of the error) #These are the ones with", "cls_loss, rank, order @staticmethod def backward(ctx, out_grad1, out_grad2, out_grad3): g1,", "fg logits fg_labels = (targets == 1) fg_logits = logits[fg_labels]", "@staticmethod def backward(ctx, out_grad1): g1, =ctx.saved_tensors return g1*out_grad1, None, None", "> eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with", "of bgs with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total", "of the example within fgs and number of bgs with", "error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on example ii sorting_error[ii]", "in the summation in rank_pos fg_relations[ii]=0 #x_ij s as score", "for Sorting Error if sorting_pmf_denom > eps: #For ii the", "with grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num", "following the order for ii in order: #x_ij s as", "update is the sorting error fg_grad[ii] -= sorting_error[ii] #For positives,", "sorting_error.mean() @staticmethod def backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors return g1*out_grad1,", "error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize", "(bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad formulation", "#x_ij s as score differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise", "0.) fg_logits = logits[fg_labels] fg_targets = targets[fg_labels] fg_num = len(fg_logits)", "prec[ii]=rank_pos/rank[ii] #For stability, set eps to a infinitesmall value (e.g.", "also total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on", "the order for ii in order: #x_ij s as score", "ii the update is the sorting error fg_grad[ii] -= sorting_error[ii]", "#For stability, set eps to a infinitesmall value (e.g. 1e-6),", "if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations", "(fg_relations >= 0).float() bg_relations = (bg_relations >= 0).float() # Rank", "positive following the order for ii in order: #x_ij s", "7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the target sorted", "#Apply piecewise linear function and determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)", "logits, targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels", "is always 0. (Eq. 7) ranking_error[ii]=FP_num/rank # Current sorting error", "@staticmethod def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg", "0).float() # Rank of ii among pos and false positive", "with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the summation in rank_pos", "bgs with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total since", "rank_pos_target = torch.sum(target_sorted_order) #Compute target sorting error. (Eq. 8) #Since", "in sorted order rank_pos_target = torch.sum(target_sorted_order) #Compute target sorting error.", "-= sorting_error[ii] #For positives, distribute error via sorting pmf (i.e.", "def backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors return g1*out_grad1, None, None,", "class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):", "the order for ii in order: # Difference Transforms (x_ij)", "gradient classification_grads[fg_labels]= fg_grad #aLRP with grad formulation bg gradient classification_grads[relevant_bg_labels]=", "with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of the example within", "rank of ii among positives in sorted order rank_pos_target =", "the update is the sorting error fg_grad[ii] -= sorting_error[ii] #For", "with grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num)", "of ii among all examples rank=rank_pos+FP_num # Ranking error of", "relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def", "class RankSort(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda()", "Sorting Error if sorting_pmf_denom > eps: #For ii the update", "the error) #These are the ones with smaller IoU but", "order for ii in order: # Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii]", "fg gradient classification_grads[fg_labels]= fg_grad #aLRP with grad formulation bg gradient", "the fg logits order=torch.argsort(fg_logits) #Loops over each positive following the", "max_prec=0 #sort the fg logits order=torch.argsort(fg_logits) #Loops over each positive", "classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets > 0.) fg_logits", "precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels]", "the rank of the example within fgs and number of", "classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank,", "fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number of positives classification_grads[fg_labels]=", "ranking_error[ii] #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)", "=ctx.saved_tensors return g1*out_grad1, None, None, None class aLRPLoss(torch.autograd.Function): @staticmethod def", "APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg", "1) fg_logits = logits[fg_labels] fg_num = len(fg_logits) #Do not use", "larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total since it is", "on precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits =", "if FP_num > eps: #For ii the update is the", "stability, set eps to a infinitesmall value (e.g. 1e-6), then", "#Since target ranking error is 0, this is also total", "fg_num = len(fg_logits) #Do not use bg with scores less", "fg_logits = logits[fg_labels] fg_targets = targets[fg_labels] fg_num = len(fg_logits) #Do", "classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx, out_grad1,", "formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP with grad formulation bg", "bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort", "(Eq. 7) ranking_error[ii]=FP_num/rank # Current sorting error of example ii.", "> 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations >= 0).float()", "examples in the target sorted order for example ii iou_relations", "gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss", "ctx.save_for_backward(classification_grads) return cls_loss, rank, order @staticmethod def backward(ctx, out_grad1, out_grad2,", "@staticmethod def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter", "among all examples rank=rank_pos+FP_num # Ranking error of example ii.", "logit #since changing its score does not have an effect", "logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the", "current_prec=rank_pos/rank[ii] #Compute interpolated AP and store gradients for relevant bg", "relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation fg gradient classification_grads[fg_labels]=", "relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the summation in", "missorted_examples = (~ iou_relations) * fg_relations #Denominotor of sorting pmf", "(targets > 0.) fg_logits = logits[fg_labels] fg_targets = targets[fg_labels] fg_num", "by number of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return", ">= 0).float() bg_relations = (bg_relations >= 0).float() # Rank of", "rank=rank_pos+FP_num # Ranking error of example ii. target_ranking_error is always", "order rank_pos_target = torch.sum(target_sorted_order) #Compute target sorting error. (Eq. 8)", "the example within fgs and number of bgs with larger", "fg_relations #The rank of ii among positives in sorted order", "fg_grad[ii] -= sorting_error[ii] #For positives, distribute error via sorting pmf", "gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad formulation fg gradient classification_grads[fg_labels]=", "function and determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank", "#For ii the update is the sorting error fg_grad[ii] -=", "to compute classification loss prec[ii]=rank_pos/rank[ii] #For stability, set eps to", "example ii iou_relations = (fg_targets >= fg_targets[ii]) target_sorted_order = iou_relations", "else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP", "#since changing its score does not have an effect on", "in order: # Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS", "# Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0:", "ii the update is the ranking error fg_grad[ii] -= ranking_error[ii]", "Update for Ranking Error if FP_num > eps: #For ii", "(bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP", "also for aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision for this", "RankSort(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter", "(Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the target", "else: fg_relations = (fg_relations >= 0).float() bg_relations = (bg_relations >=", "#Do not use bg with scores less than minimum fg", "logits missorted_examples = (~ iou_relations) * fg_relations #Denominotor of sorting", "target_ranking_error is always 0. (Eq. 7) ranking_error[ii]=FP_num/rank # Current sorting", "via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients", "targets, regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels =", "prec[ii]=max_prec #aLRP with grad formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP", "its score does not have an effect on precision threshold_logit", "target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on example ii", "total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on example", "(fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank, order @staticmethod def backward(ctx,", "return cls_loss, rank, order @staticmethod def backward(ctx, out_grad1, out_grad2, out_grad3):", "and store gradients for relevant bg examples if (max_prec<=current_prec): max_prec=current_prec", "than minimum fg logit #since changing its score does not", "prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg logits order=torch.argsort(fg_logits) #Loops over", "scores less than minimum fg logit #since changing its score", "target sorted order for example ii iou_relations = (fg_targets >=", "this is also total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting", "#Find the positives that are misranked (the cause of the", "g1, =ctx.saved_tensors return g1*out_grad1, None, None, None, None class APLoss(torch.autograd.Function):", "order: # Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS >", "#Compute sorting error on example ii sorting_error[ii] = current_sorting_error -", "g1, =ctx.saved_tensors return g1*out_grad1, None, None, None class aLRPLoss(torch.autograd.Function): @staticmethod", "ii among all examples rank=rank_pos+FP_num # Ranking error of example", "and determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the", "to a infinitesmall value (e.g. 1e-6), then compute grads if", "(logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the", "@staticmethod def backward(ctx, out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors return g1*out_grad1,", "all examples rank=rank_pos+FP_num # Ranking error of example ii. target_ranking_error", "> eps: #For ii the update is the sorting error", "return cls_loss @staticmethod def backward(ctx, out_grad1): g1, =ctx.saved_tensors return g1*out_grad1,", "out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None,", "logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits order=torch.argsort(fg_logits)", "infinitesmall value (e.g. 1e-6), then compute grads if FP_num >", "fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations >= 0).float() bg_relations =", "positives that are misranked (the cause of the error) #These", "grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean()", "0. (Eq. 7) ranking_error[ii]=FP_num/rank # Current sorting error of example", "gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss,", "s as score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear", "ii. (Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the", "= logits[fg_labels] fg_num = len(fg_logits) #Do not use bg with", "sorting error on example ii sorting_error[ii] = current_sorting_error - target_sorting_error", "#sort the fg logits order=torch.argsort(fg_logits) #Loops over each positive following", "fg logits order=torch.argsort(fg_logits) #Loops over each positive following the order", "of example ii. (Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples", "gradients by number of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads)", "grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= fg_num cls_loss=1-prec.mean()", "cause of the error) #These are the ones with smaller", "(e.g. 1e-6), then compute grads if FP_num > eps: fg_grad[ii]", "bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations with", "fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the summation in rank_pos fg_relations[ii]=0 #x_ij", "#Filter fg logits fg_labels = (targets > 0.) fg_logits =", "as score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function", "target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on example ii sorting_error[ii] =", "+= (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number of positives classification_grads[fg_labels]= (fg_grad/fg_num)", "=ctx.saved_tensors return g1*out_grad1, None, None, None, None class APLoss(torch.autograd.Function): @staticmethod", "error fg_grad[ii] -= sorting_error[ii] #For positives, distribute error via sorting", "sorting_pmf_denom > eps: #For ii the update is the sorting", "score differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function and", "#Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda()", "examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad +=", "(relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx, out_grad1, out_grad2):", "grad formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP with grad formulation", "7) ranking_error[ii]=FP_num/rank # Current sorting error of example ii. (Eq.", "threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()", "larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of ii among all", "for relevant bg examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii])", "each positive following the order for ii in order: #x_ij", "Error if FP_num > eps: #For ii the update is", "number (bg with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of", "false positive number (bg with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #", "s as score differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear", "8) #Since target ranking error is 0, this is also", "error via ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find", "following the order for ii in order: # Difference Transforms", "bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that are misranked", "regression_losses, delta=1., eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets", "# Ranking error of example ii. target_ranking_error is always 0.", "#Discard i=j in the summation in rank_pos fg_relations[ii]=0 #x_ij s", "max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg", "logits fg_labels = (targets == 1) fg_logits = logits[fg_labels] fg_num", "of example ii. target_ranking_error is always 0. (Eq. 7) ranking_error[ii]=FP_num/rank", "threshold_logit = torch.min(fg_logits)-delta #Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()", "for aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision for this example", "def forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels", "cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank, order @staticmethod def backward(ctx, out_grad1,", "fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation", "-= ranking_error[ii] #For negatives, distribute error via ranking pmf (i.e.", "fg_targets = targets[fg_labels] fg_num = len(fg_logits) #Do not use bg", "positive number (bg with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank", "fg_labels = (targets == 1) fg_logits = logits[fg_labels] fg_num =", "sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by", "iou_relations = (fg_targets >= fg_targets[ii]) target_sorted_order = iou_relations * fg_relations", "return g1*out_grad1, None, None, None class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx,", "error of example ii. (Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find", "if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))", "delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets == 1)", "out_grad2, out_grad3): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None, None", "relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that are misranked (the", "use bg with scores less than minimum fg logit #since", "= torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in the target sorted order for", "bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations with bgs", "torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute sorting error on example ii sorting_error[ii] = current_sorting_error", "update is the ranking error fg_grad[ii] -= ranking_error[ii] #For negatives,", "with grad formulation fg gradient classification_grads[fg_labels]= fg_grad #aLRP with grad", "iou_relations * fg_relations #The rank of ii among positives in", "as score differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function", "precision for this example to compute classification loss prec[ii]=rank_pos/rank[ii] #For", "(bg_relations/rank[ii]) else: relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec", "sorting_error[ii] = current_sorting_error - target_sorting_error #Identity Update for Ranking Error", "= (targets > 0.) fg_logits = logits[fg_labels] fg_targets = targets[fg_labels]", "g1*out_grad1, None, None, None class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits,", "with fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations", "g1*out_grad1, None, None, None, None class APLoss(torch.autograd.Function): @staticmethod def forward(ctx,", "eps: #For ii the update is the ranking error fg_grad[ii]", "but larger logits missorted_examples = (~ iou_relations) * fg_relations #Denominotor", "compute grads if FP_num > eps: fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad", "sorted order for example ii iou_relations = (fg_targets >= fg_targets[ii])", "fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad formulation fg gradient", "it is normalizer also for aLRP Regression error rank[ii]=rank_pos+FP_num #Compute", "and number of bgs with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store", "backward(ctx, out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors return g1*out_grad1, None, None,", "0, this is also total target error target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target #Compute", "for ii in order: #x_ij s as score differences with", "ii among positives in sorted order rank_pos_target = torch.sum(target_sorted_order) #Compute", "and determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of", "delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations >=", "Rank of ii among all examples rank=rank_pos+FP_num # Ranking error", "linear function and determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j", "rank_pos fg_relations[ii]=0 #x_ij s as score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii]", "value (e.g. 1e-6), then compute grads if FP_num > eps:", "scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations) #Store the total since it is normalizer", "positives in sorted order rank_pos_target = torch.sum(target_sorted_order) #Compute target sorting", "linear function and determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the", "order @staticmethod def backward(ctx, out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors return", "an effect on precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit))", "relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of the example", "order: #x_ij s as score differences with fgs fg_relations=fg_logits-fg_logits[ii] #Apply", "None, None class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, regression_losses,", "with scores less than minimum fg logit #since changing its", "None, None, None class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets,", "within fgs and number of bgs with larger scores rank_pos=1+torch.sum(fg_relations)", "= len(fg_logits) #Do not use bg with scores less than", "fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def backward(ctx, out_grad1): g1,", "determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the summation", "classification loss prec[ii]=rank_pos/rank[ii] #For stability, set eps to a infinitesmall", "error rank[ii]=rank_pos+FP_num #Compute precision for this example current_prec=rank_pos/rank[ii] #Compute interpolated", "eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets > 0.)", "#Compute target sorting error. (Eq. 8) #Since target ranking error", "rank[ii]=rank_pos+FP_num #Compute precision for this example current_prec=rank_pos/rank[ii] #Compute interpolated AP", "class APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter", "pos and false positive number (bg with larger scores) rank_pos=torch.sum(fg_relations)", "the summation in rank_pos fg_relations[ii]=0 #x_ij s as score differences", "forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels =", "(bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that are misranked (the cause of", "/= fg_num cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss @staticmethod def backward(ctx, out_grad1):", "eps=1e-5): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets == 1)", "torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda()", "#For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad", "determine relations with bgs bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) #Compute the rank of the", "precision threshold_logit = torch.min(fg_logits)-delta #Get valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels]", "ii sorting_error[ii] = current_sorting_error - target_sorting_error #Identity Update for Ranking", "have an effect on precision threshold_logit = torch.min(fg_logits)-delta #Get valid", "piecewise linear function and determine relations with fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard", "pmf (i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives that", "targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets ==", "= logits[fg_labels] fg_targets = targets[fg_labels] fg_num = len(fg_logits) #Do not", "torch.sum(target_sorted_order) #Compute target sorting error. (Eq. 8) #Since target ranking", "score does not have an effect on precision threshold_logit =", "#Loops over each positive following the order for ii in", "Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)", "is the ranking error fg_grad[ii] -= ranking_error[ii] #For negatives, distribute", "bg with scores less than minimum fg logit #since changing", "bg examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else: relevant_bg_grad", "ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits order=torch.argsort(fg_logits) #Loops over each", "example ii. (Eq. 7) current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos #Find examples in", "relevant bg examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad += (bg_relations/rank[ii]) else:", "relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg logits", "on example ii sorting_error[ii] = current_sorting_error - target_sorting_error #Identity Update", "does not have an effect on precision threshold_logit = torch.min(fg_logits)-delta", "Regression error rank[ii]=rank_pos+FP_num #Compute precision for this example current_prec=rank_pos/rank[ii] #Compute", "for example ii iou_relations = (fg_targets >= fg_targets[ii]) target_sorted_order =", "fg_relations[ii]=0 #x_ij s as score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply", "fgs fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) #Discard i=j in the summation in rank_pos fg_relations[ii]=0", "the positives that are misranked (the cause of the error)", "return ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx, out_grad1, out_grad2): g1, =ctx.saved_tensors", "#For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad", "classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets == 1) fg_logits", "logits fg_labels = (targets > 0.) fg_logits = logits[fg_labels] fg_targets", "(missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]=", "fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits order=torch.argsort(fg_logits) #Loops over each positive", "logits[fg_labels] fg_targets = targets[fg_labels] fg_num = len(fg_logits) #Do not use", "target ranking error is 0, this is also total target", "of the error) #These are the ones with smaller IoU", "== 1) fg_logits = logits[fg_labels] fg_num = len(fg_logits) #Do not", "fg logit #since changing its score does not have an", "sorting error. (Eq. 8) #Since target ranking error is 0,", "example current_prec=rank_pos/rank[ii] #Compute interpolated AP and store gradients for relevant", "store gradients for relevant bg examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad", "None class APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda()", "#Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad formulation fg", "(bg with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of ii", "are misranked (the cause of the error) #These are the", "distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))", "Current sorting error of example ii. (Eq. 7) current_sorting_error =", "Difference Transforms (x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)", "relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0 #sort the fg", "effect on precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) relevant_bg_logits", "loss prec[ii]=rank_pos/rank[ii] #For stability, set eps to a infinitesmall value", "precision for this example current_prec=rank_pos/rank[ii] #Compute interpolated AP and store", "does not have an effect on precision threshold_logit = torch.min(fg_logits)-delta_RS", "relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg", "the ones with smaller IoU but larger logits missorted_examples =", "relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank, order", "not use bg with scores less than minimum fg logit", "None, None, None, None class APLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits,", "= torch.sum(target_sorted_order) #Compute target sorting error. (Eq. 8) #Since target", "example to compute classification loss prec[ii]=rank_pos/rank[ii] #For stability, set eps", "out_grad3): g1, =ctx.saved_tensors return g1*out_grad1, None, None, None, None class", "& (logits>=threshold_logit)) relevant_bg_logits = logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort", "example ii. target_ranking_error is always 0. (Eq. 7) ranking_error[ii]=FP_num/rank #", "(i.e. missorted_examples/sorting_pmf_denom) fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) #Normalize gradients by number of", "@staticmethod def forward(ctx, logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits", "effect on precision threshold_logit = torch.min(fg_logits)-delta #Get valid bg logits", "= iou_relations * fg_relations #The rank of ii among positives", "= (fg_targets >= fg_targets[ii]) target_sorted_order = iou_relations * fg_relations #The", "= -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) #aLRP with grad formulation fg", "* fg_relations #Denominotor of sorting pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity", "ones with smaller IoU but larger logits missorted_examples = (~", "= logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits", "target_sorting_error #Identity Update for Ranking Error if FP_num > eps:", "Ranking error of example ii. target_ranking_error is always 0. (Eq.", "and false positive number (bg with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations)", "fgs and number of bgs with larger scores rank_pos=1+torch.sum(fg_relations) FP_num=torch.sum(bg_relations)", "error of example ii. target_ranking_error is always 0. (Eq. 7)", "#Compute the rank of the example within fgs and number", "eps to a infinitesmall value (e.g. 1e-6), then compute grads", "not have an effect on precision threshold_logit = torch.min(fg_logits)-delta #Get", "example within fgs and number of bgs with larger scores", "logits[fg_labels] fg_num = len(fg_logits) #Do not use bg with scores", "targets[fg_labels] fg_num = len(fg_logits) #Do not use bg with scores", "0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations = (fg_relations >= 0).float() bg_relations", "bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else: fg_relations =", "have an effect on precision threshold_logit = torch.min(fg_logits)-delta_RS relevant_bg_labels=((targets==0) &", "#Normalize gradients by number of positives classification_grads[fg_labels]= (fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)", "ranking pmf (i.e. bg_relations/FP_num) relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) #Find the positives", "classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return cls_loss, rank, order @staticmethod", "#aLRP with grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /=", "is normalizer also for aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision", "forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels", "= current_sorting_error - target_sorting_error #Identity Update for Ranking Error if", "torch.sum(missorted_examples) #Identity Update for Sorting Error if sorting_pmf_denom > eps:", "fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations with fgs", "the total since it is normalizer also for aLRP Regression", "error is 0, this is also total target error target_sorting_error=", "(x_ij) fg_relations=fg_logits-fg_logits[ii] bg_relations=relevant_bg_logits-fg_logits[ii] if delta_RS > 0: fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) else:", "summation in rank_pos fg_relations[ii]=0 #x_ij s as score differences with", "#These are the ones with smaller IoU but larger logits", "0).float() bg_relations = (bg_relations >= 0).float() # Rank of ii", "a infinitesmall value (e.g. 1e-6), then compute grads if FP_num", "- target_sorting_error #Identity Update for Ranking Error if FP_num >", "logits, targets, delta=1.): classification_grads=torch.zeros(logits.shape).cuda() #Filter fg logits fg_labels = (targets", "FP_num=torch.sum(bg_relations) #Store the total since it is normalizer also for", "an effect on precision threshold_logit = torch.min(fg_logits)-delta #Get valid bg", "#Identity Update for Sorting Error if sorting_pmf_denom > eps: #For", "#The rank of ii among positives in sorted order rank_pos_target", "cls_loss @staticmethod def backward(ctx, out_grad1): g1, =ctx.saved_tensors return g1*out_grad1, None,", "def backward(ctx, out_grad1, out_grad2, out_grad3): g1, =ctx.saved_tensors return g1*out_grad1, None,", "sorting pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update for Sorting Error", "sorted order rank_pos_target = torch.sum(target_sorted_order) #Compute target sorting error. (Eq.", "are the ones with smaller IoU but larger logits missorted_examples", "ii among pos and false positive number (bg with larger", "fgs fg_relations=fg_logits-fg_logits[ii] #Apply piecewise linear function and determine relations with", "error. (Eq. 8) #Since target ranking error is 0, this", "#Filter fg logits fg_labels = (targets == 1) fg_logits =", "#Denominotor of sorting pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update for", "relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with", "ranking error is 0, this is also total target error", "#x_ij s as score differences with bgs bg_relations=relevant_bg_logits-fg_logits[ii] #Apply piecewise", "relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() sorting_error=torch.zeros(fg_num).cuda() ranking_error=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() #sort the fg logits order=torch.argsort(fg_logits) #Loops", "with larger scores) rank_pos=torch.sum(fg_relations) FP_num=torch.sum(bg_relations) # Rank of ii among", "fg_grad[ii] -= ranking_error[ii] #For negatives, distribute error via ranking pmf", "aLRP Regression error rank[ii]=rank_pos+FP_num #Compute precision for this example to", "less than minimum fg logit #since changing its score does", "+= (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) #Store fg gradients fg_grad[ii]=-(1-max_prec) prec[ii]=max_prec #aLRP with grad", "pmf sorting_pmf_denom = torch.sum(missorted_examples) #Identity Update for Sorting Error if", "bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads /= (fg_num) cls_loss=1-prec.mean() ctx.save_for_backward(classification_grads) return", "(fg_grad/fg_num) classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) ctx.save_for_backward(classification_grads) return ranking_error.mean(), sorting_error.mean() @staticmethod def backward(ctx,", "for this example to compute classification loss prec[ii]=rank_pos/rank[ii] #For stability,", "None class aLRPLoss(torch.autograd.Function): @staticmethod def forward(ctx, logits, targets, regression_losses, delta=1.,", "gradients for relevant bg examples if (max_prec<=current_prec): max_prec=current_prec relevant_bg_grad +=", "ranking error fg_grad[ii] -= ranking_error[ii] #For negatives, distribute error via", "fg_grad #aLRP with grad formulation bg gradient classification_grads[relevant_bg_labels]= relevant_bg_grad classification_grads", "valid bg logits relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) relevant_bg_logits=logits[relevant_bg_labels] relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() rank=torch.zeros(fg_num).cuda() prec=torch.zeros(fg_num).cuda() fg_grad=torch.zeros(fg_num).cuda() max_prec=0" ]
[ "f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj is not None assert", "= img.SegDCD.parse_txt(f.read()) assert dcd_obj is not None assert len(dcd_obj) ==", "# Test Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR,", "DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files DCD_TXT = os.path.join(DATA_DIR,", "SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause license for this file can", "f: dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj is not None assert", "Copyright (c) 2017-2018 <NAME> # # SPDX-License-Identifier: BSD-3-Clause # The", "import os import pytest from imx import img # Used", "as f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj is not None", "DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): # Prepare test environment", "setup_module(module): # Prepare test environment pass def teardown_module(module): # Clean", "distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest from", "Test Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin')", "pass def test_txt_parser(): with open(DCD_TXT, 'r') as f: dcd_obj =", "BSD-3-Clause # The BSD-3-Clause license for this file can be", "'rb') as f: dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj is not", "img.SegDCD.parse_txt(f.read()) assert dcd_obj is not None assert len(dcd_obj) == 12", "Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files DCD_TXT", "for this file can be found in the LICENSE file", "DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module):", "import pytest from imx import img # Used Directories DATA_DIR", "not None assert len(dcd_obj) == 12 def test_bin_parser(): with open(DCD_BIN,", "os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): # Prepare", "BSD-3-Clause license for this file can be found in the", "None assert len(dcd_obj) == 12 def test_bin_parser(): with open(DCD_BIN, 'rb')", "img.SegDCD.parse(f.read()) assert dcd_obj is not None assert len(dcd_obj) == 12", "as f: dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj is not None", "test_bin_parser(): with open(DCD_BIN, 'rb') as f: dcd_obj = img.SegDCD.parse(f.read()) assert", "Prepare test environment pass def teardown_module(module): # Clean test environment", "in the LICENSE file included with this distribution # or", "assert len(dcd_obj) == 12 def test_bin_parser(): with open(DCD_BIN, 'rb') as", "dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj is not None assert len(dcd_obj)", "file can be found in the LICENSE file included with", "def teardown_module(module): # Clean test environment pass def test_txt_parser(): with", "import img # Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') #", "this file can be found in the LICENSE file included", "be found in the LICENSE file included with this distribution", "Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files DCD_TXT =", "environment pass def test_txt_parser(): with open(DCD_TXT, 'r') as f: dcd_obj", "found in the LICENSE file included with this distribution #", "'data') # Test Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN =", "or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest from imx import", "2017-2018 <NAME> # # SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause license", "# Prepare test environment pass def teardown_module(module): # Clean test", "# Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files", "= os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): #", "from imx import img # Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),", "def setup_module(module): # Prepare test environment pass def teardown_module(module): #", "(c) 2017-2018 <NAME> # # SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause", "pass def teardown_module(module): # Clean test environment pass def test_txt_parser():", "= os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): # Prepare test environment pass", "can be found in the LICENSE file included with this", "assert dcd_obj is not None assert len(dcd_obj) == 12 def", "Clean test environment pass def test_txt_parser(): with open(DCD_TXT, 'r') as", "img # Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test", "os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN", "Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def", "https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest from imx import img #", "is not None assert len(dcd_obj) == 12 def test_bin_parser(): with", "included with this distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os", "# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest from imx", "open(DCD_BIN, 'rb') as f: dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj is", "teardown_module(module): # Clean test environment pass def test_txt_parser(): with open(DCD_TXT,", "the LICENSE file included with this distribution # or at", "dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj is not None assert len(dcd_obj)", "with open(DCD_TXT, 'r') as f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj", "# The BSD-3-Clause license for this file can be found", "== 12 def test_bin_parser(): with open(DCD_BIN, 'rb') as f: dcd_obj", "os import pytest from imx import img # Used Directories", "at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest from imx import img", "test_txt_parser(): with open(DCD_TXT, 'r') as f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert", "# Copyright (c) 2017-2018 <NAME> # # SPDX-License-Identifier: BSD-3-Clause #", "license for this file can be found in the LICENSE", "12 def test_bin_parser(): with open(DCD_BIN, 'rb') as f: dcd_obj =", "this distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import pytest", "'r') as f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj is not", "# Clean test environment pass def test_txt_parser(): with open(DCD_TXT, 'r')", "with this distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import os import", "pytest from imx import img # Used Directories DATA_DIR =", "LICENSE file included with this distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText", "= os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') # Test Files DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt')", "os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): # Prepare test environment pass def", "imx import img # Used Directories DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "with open(DCD_BIN, 'rb') as f: dcd_obj = img.SegDCD.parse(f.read()) assert dcd_obj", "environment pass def teardown_module(module): # Clean test environment pass def", "'dcd_test.bin') def setup_module(module): # Prepare test environment pass def teardown_module(module):", "len(dcd_obj) == 12 def test_bin_parser(): with open(DCD_BIN, 'rb') as f:", "# # SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause license for this", "<NAME> # # SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause license for", "# SPDX-License-Identifier: BSD-3-Clause # The BSD-3-Clause license for this file", "dcd_obj is not None assert len(dcd_obj) == 12 def test_bin_parser():", "test environment pass def test_txt_parser(): with open(DCD_TXT, 'r') as f:", "def test_bin_parser(): with open(DCD_BIN, 'rb') as f: dcd_obj = img.SegDCD.parse(f.read())", "The BSD-3-Clause license for this file can be found in", "open(DCD_TXT, 'r') as f: dcd_obj = img.SegDCD.parse_txt(f.read()) assert dcd_obj is", "test environment pass def teardown_module(module): # Clean test environment pass", "def test_txt_parser(): with open(DCD_TXT, 'r') as f: dcd_obj = img.SegDCD.parse_txt(f.read())", "file included with this distribution # or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText import", "= img.SegDCD.parse(f.read()) assert dcd_obj is not None assert len(dcd_obj) ==", "'dcd_test.txt') DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin') def setup_module(module): # Prepare test" ]
[ "+ \" week ago\" return str(day_diff / 7) + \"", "%Y %X') ago = datetime.now() - dt # print ago", "a json feed. Recent means \"In the last X days.\"", "self.args.days self.date_format = '%a, %d %b %Y %X' def get(self,", "datetime.today() - dt if delta.days > int(self.days): continue items.append(item) if", "= parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url) True >>>", "+ \" years ago\" def main(args): \"\"\" For command-line use.", "enumerate(articles[0]): if i >= args.limit and args.limit > 0: break", "rj.get(url) True >>> xml = rj.parse() >>> print len(xml) 50", "= '%d-0%d-%d' % (dt.year, dt.month, dt.day) if dt.day < 10:", "(dt.year, dt.month, dt.day) article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '')", "\"\"\" Return a feedparser entry object for the last X", "nargs=\"*\") return parser if __name__ == '__main__': \"\"\" \"\"\" parser", "= datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') ago", "365) + \" year ago\" return str(day_diff / 365) +", "entry object for the last X days of feed entries.", "< 7: return str(day_diff) + \" days ago\" if day_diff", "'verbose' in self.args and self.args.verbose: print \"URL: %s\" % url", "print arg rj.get(arg) try: p = rj.parse() except: continue if", "Take a URL, return a json array. >>> url =", "Turn the xml into an object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'", "args.limit and args.limit > 0: break dt = datetime.strptime(' '.join(article['publish_date'].split('", "60: return str(second_diff) + \" seconds ago\" if second_diff <", "(url, response['status'])) self.xml = response.read() return True def parse(self): \"\"\"", "ago\" if second_diff < 86400: return str(second_diff / 3600) +", "= p return p def recently(self): \"\"\" Return a feedparser", "(dt.year, dt.month, dt.day) if dt.day < 10: article['datetime'] = '%d-0%d-0%d'", "+ \" hours ago\" if day_diff == 1: return \"Yesterday\"", "= article['title'].encode('utf-8', 'replace') print 'var hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'],", "pretty_date(ago).lower()) if args.output == 'js': if type(article['title']) is types.UnicodeType: article['title']", "build_parser(): \"\"\" We put the argparse in a method so", "the web. fh = open('json.gz', 'wb') fh.write(self.xml) fh.close() try: gz", "from datetime import datetime, timedelta from time import mktime class", "in self.p: # print item.keys() # [u'body', u'tags', u'url', u'contentId',", "parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes a list of", "fh.close() try: gz = gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read())", "<span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output == 'js': if type(article['title']) is", "year ago\" return str(day_diff / 365) + \" years ago\"", "\" years ago\" def main(args): \"\"\" For command-line use. \"\"\"", "u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories'] # print item['publish_date']", "parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str)", "delta.days > int(self.days): continue items.append(item) if 'verbose' in self.args and", "parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\",", "= urllib2.urlopen(url) if int(response.code) >= 400: if 'verbose' in self.args", "# Fri, 7 Jul 2017 15:16:38 -0400 #dt = datetime.strptime(item['publish_date'],", "< 7200: return \"an hour ago\" if second_diff < 86400:", "print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output == 'js':", "if int(response.code) >= 400: if 'verbose' in self.args and self.args.verbose:", "< 10: article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month, dt.day) article['slug']", "== 0: if second_diff < 10: return \"just now\" if", "from the web. fh = open('json.gz', 'wb') fh.write(self.xml) fh.close() try:", "dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser if __name__", "a URL, return a json array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'", "article['title'], 'id': article['id'], 'description': article['description']}) elif args.output == 'csv': dt", "7 == 1: return str(day_diff / 7) + \" week", "parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\",", "\"\"\" second_diff = ago.seconds day_diff = ago.days if day_diff <", "url raise ValueError(\"URL %s response: %s\" % (url, response['status'])) self.xml", "class RecentJson: \"\"\" Methods for ingesting and publishing JSON feeds.", "p = rj.parse() except: continue if not p: continue articles.append(rj.recently())", "print item['publish_date'] # Fri, 7 Jul 2017 15:16:38 -0400 #dt", "is 0: return None for i, article in enumerate(articles[0]): if", "args if not hasattr(self.args, 'days'): self.args.days = 0 self.days =", "web. fh = open('json.gz', 'wb') fh.write(self.xml) fh.close() try: gz =", "open('json.gz', 'wb') fh.write(self.xml) fh.close() try: gz = gzip.GzipFile('json.gz', 'r').read() p", "'%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser(): \"\"\" We put the argparse", "if day_diff < 31: if day_diff / 7 == 1:", "dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\",", "xml = rj.parse() >>> print len(xml) 50 \"\"\" try: p", "article['title'].encode('utf-8', 'replace') print 'var hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"',", "ago\" if day_diff < 31: if day_diff / 7 ==", "RecentJson(args) if args: articles = [] for arg in args.urls[0]:", "article['title'], pretty_date(ago).lower()) elif args.nostamp == True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'],", "\"\"\" try: p = json.loads(self.xml) except: # Sometimes we download", "import mktime class RecentJson: \"\"\" Methods for ingesting and publishing", "'verbose' in self.args and self.args.verbose: print delta.days, dt self.items =", "== 1: return str(day_diff / 7) + \" week ago\"", "timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds day_diff =", "parser = build_parser() args = parser.parse_args() if args.test: doctest.testmod(verbose=args.verbose) main(args)", "%Y %X %z') dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta", "day_diff / 7 == 1: return str(day_diff / 7) +", "= ago.seconds day_diff = ago.days if day_diff < 0: return", "+ \" months ago\" if day_diff / 365 == 1:", "'<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'],", "dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\",", "gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return None", "'<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp == True: print", "day_diff / 365 == 1: return str(day_diff / 365) +", "ago\" return str(day_diff / 30) + \" months ago\" if", "delta.days, dt self.items = items return items def pretty_date(ago): \"\"\"", "args. Returns the items published today unless otherwise specified.''', epilog='')", "2 days, 15:57:48.578638 if args.output == 'html': if type(article['title']) is", "href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower())", "= rj.parse() except: continue if not p: continue articles.append(rj.recently()) if", "if dt.month < 10: article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month,", "weeks ago\" if day_diff < 365: if day_diff / 30", "\"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser(): \"\"\"", "% (dt.year, dt.month, dt.day) if dt.day < 10: article['datetime'] =", "u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories'] # print item['publish_date'] #", "/ 3600) + \" hours ago\" if day_diff == 1:", "= article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def", "is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') print 'var hed =", "def parse(self): \"\"\" Turn the xml into an object. >>>", "not hasattr(self.args, 'days'): self.args.days = 0 self.days = self.args.days self.date_format", "timedelta from time import mktime class RecentJson: \"\"\" Methods for", "if day_diff / 365 == 1: return str(day_diff / 365)", "rj.get(url) True \"\"\" response = urllib2.urlopen(url) if int(response.code) >= 400:", "object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>>", "= 0 self.days = self.args.days self.date_format = '%a, %d %b", "2017 15:16:38 -0400 #dt = datetime.strptime(item['publish_date'], '%a, %d %b %Y", "p = json.loads(self.xml) except: # Sometimes we download gzipped documents", "if second_diff < 3600: return str(second_diff / 60) + \"", "recent items from a json feed. Recent means \"In the", "= '%a, %d %b %Y %X' def get(self, url): \"\"\"", "print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'],", "unless otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\",", "'%a, %d %b %Y %X' def get(self, url): \"\"\" Wrapper", "\"\"\" rj = RecentJson(args) if args: articles = [] for", ">>> rj.get(url) True \"\"\" response = urllib2.urlopen(url) if int(response.code) >=", "type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') if args.listitem ==", "RecentJson(args) >>> rj.get(url) True >>> xml = rj.parse() >>> print", "json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return None self.p = p return", "RecentJson(args) >>> rj.get(url) True >>> xml = rj.parse() >>> articles", "365: if day_diff / 30 == 1: return str(day_diff /", "return '' if day_diff == 0: if second_diff < 10:", "continue if not p: continue articles.append(rj.recently()) if len(articles) is 0:", "dt self.items = items return items def pretty_date(ago): \"\"\" Process", "= '%s-%s-%s' % (dt.year, dt.month, dt.day) if dt.month < 10:", "type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') print 'var hed", "X days.\" import os import doctest import json import urllib2", "pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output", "use. \"\"\" rj = RecentJson(args) if args: articles = []", "argparse in a method so we can test it outside", "def pretty_date(ago): \"\"\" Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\"", "\"\"\" response = urllib2.urlopen(url) if int(response.code) >= 400: if 'verbose'", "400: if 'verbose' in self.args and self.args.verbose: print \"URL: %s\"", "default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\")", "'%a, %d %b %Y %X %z') dt = datetime.strptime(' '.join(item['publish_date'].split('", "/ 30) + \" month ago\" return str(day_diff / 30)", "urllib2.urlopen(url) if int(response.code) >= 400: if 'verbose' in self.args and", "xml into an object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser", "= rj.parse() >>> print len(xml) 50 \"\"\" try: p =", "'wb') fh.write(self.xml) fh.close() try: gz = gzip.GzipFile('json.gz', 'r').read() p =", "str(second_diff / 3600) + \" hours ago\" if day_diff ==", "second_diff < 120: return \"a minute ago\" if second_diff <", "args.urls[0]: if args.verbose: print arg rj.get(arg) try: p = rj.parse()", "For command-line use. \"\"\" rj = RecentJson(args) if args: articles", "args.output == 'csv': dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d", "< 3600: return str(second_diff / 60) + \" minutes ago\"", "\" month ago\" return str(day_diff / 30) + \" months", "if __name__ == '__main__': \"\"\" \"\"\" parser = build_parser() args", "get(self, url): \"\"\" Wrapper for API requests. Take a URL,", "return items def pretty_date(ago): \"\"\" Process a timedelta object. From", "arg in args.urls[0]: if args.verbose: print arg rj.get(arg) try: p", "ago\" return str(day_diff / 365) + \" years ago\" def", "passed as args. Returns the items published today unless otherwise", "/ 30) + \" months ago\" if day_diff / 365", "feedparser entry object for the last X days of feed", "\" months ago\" if day_diff / 365 == 1: return", "articles = [] for arg in args.urls[0]: if args.verbose: print", "== True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print '<a", "feed. Recent means \"In the last X days.\" import os", "= open('json.gz', 'wb') fh.write(self.xml) fh.close() try: gz = gzip.GzipFile('json.gz', 'r').read()", "datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta = datetime.today() - dt if", "rj = RecentJson(args) >>> rj.get(url) True >>> xml = rj.parse()", "\"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output == 'json':", "1: return \"Yesterday\" if day_diff < 7: return str(day_diff) +", "put the argparse in a method so we can test", "ago = datetime.now() - dt # print ago # 2", "a json array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser =", "articles.append(rj.recently()) if len(articles) is 0: return None for i, article", "- dt if delta.days > int(self.days): continue items.append(item) if 'verbose'", "href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output == 'json': print", "return str(second_diff) + \" seconds ago\" if second_diff < 120:", "u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title',", "'<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output == 'js': if", "build_parser() >>> args = parser.parse_args([url]) >>> rj = RecentJson(args) \"\"\"", "ago\" if second_diff < 7200: return \"an hour ago\" if", "otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\",", "7: return str(day_diff) + \" days ago\" if day_diff <", ">>> print len(xml) 50 \"\"\" try: p = json.loads(self.xml) except:", "if day_diff == 0: if second_diff < 10: return \"just", "print json.dumps({'title': article['title'], 'id': article['id'], 'description': article['description']}) elif args.output ==", "for ingesting and publishing JSON feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'", "'days'): self.args.days = 0 self.days = self.args.days self.date_format = '%a,", "%X') ago = datetime.now() - dt # print ago #", "\"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\",", "= '%d-0%d-0%d' % (dt.year, dt.month, dt.day) article['slug'] = article['title'].lower().replace(' ',", "+ \" seconds ago\" if second_diff < 120: return \"a", "so we can test it outside of the command-line. \"\"\"", "fh = open('json.gz', 'wb') fh.write(self.xml) fh.close() try: gz = gzip.GzipFile('json.gz',", "second_diff < 10: return \"just now\" if second_diff < 60:", "'var hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif", "None self.p = p return p def recently(self): \"\"\" Return", "50 \"\"\" try: p = json.loads(self.xml) except: # Sometimes we", "1: return str(day_diff / 365) + \" year ago\" return", "article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month, dt.day) article['slug'] = article['title'].lower().replace('", "= datetime.now() - dt # print ago # 2 days,", "datetime, timedelta from time import mktime class RecentJson: \"\"\" Methods", "args = parser.parse_args([url]) >>> rj = RecentJson(args) \"\"\" def __init__(self,", "0 self.days = self.args.days self.date_format = '%a, %d %b %Y", "')[:5]), '%a, %d %b %Y %X') article['datetime'] = '%s-%s-%s' %", "rj.get(arg) try: p = rj.parse() except: continue if not p:", "default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser if __name__ ==", "items.append(item) if 'verbose' in self.args and self.args.verbose: print delta.days, dt", "time import mktime class RecentJson: \"\"\" Methods for ingesting and", "= RecentJson(args) >>> rj.get(url) True >>> xml = rj.parse() >>>", "ago\" def main(args): \"\"\" For command-line use. \"\"\" rj =", "self.args and self.args.verbose: print delta.days, dt self.items = items return", "items from a json feed. Recent means \"In the last", "__init__(self, args={}): self.args = args if not hasattr(self.args, 'days'): self.args.days", "u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type',", "rj = RecentJson(args) >>> rj.get(url) True \"\"\" response = urllib2.urlopen(url)", "entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>>", "True >>> xml = rj.parse() >>> articles = rj.recently() \"\"\"", "type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\",", "hours ago\" if day_diff == 1: return \"Yesterday\" if day_diff", ">>> rj = RecentJson(args) \"\"\" def __init__(self, args={}): self.args =", "< 120: return \"a minute ago\" if second_diff < 3600:", "True \"\"\" response = urllib2.urlopen(url) if int(response.code) >= 400: if", "last X days of feed entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'", "args.output == 'html': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8',", "True def parse(self): \"\"\" Turn the xml into an object.", "article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' %", "parser if __name__ == '__main__': \"\"\" \"\"\" parser = build_parser()", "if second_diff < 10: return \"just now\" if second_diff <", "args.nostamp == True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print", "items published today unless otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\",", "from time import mktime class RecentJson: \"\"\" Methods for ingesting", "is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') if args.listitem == True:", "dt # print ago # 2 days, 15:57:48.578638 if args.output", "\"\"\" parser = build_parser() args = parser.parse_args() if args.test: doctest.testmod(verbose=args.verbose)", "article['title'] = article['title'].encode('utf-8', 'replace') print 'var hed = \"<a href=\\'{0}\\'>{1}</a>", "365) + \" years ago\" def main(args): \"\"\" For command-line", "1: return str(day_diff / 30) + \" month ago\" return", "'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return None self.p", "\"\"\" parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes a list", "7) + \" weeks ago\" if day_diff < 365: if", "rj = RecentJson(args) \"\"\" def __init__(self, args={}): self.args = args", "- dt # print ago # 2 days, 15:57:48.578638 if", "dt if delta.days > int(self.days): continue items.append(item) if 'verbose' in", "p def recently(self): \"\"\" Return a feedparser entry object for", "< 10: return \"just now\" if second_diff < 60: return", "[u'body', u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date',", "%X') article['datetime'] = '%s-%s-%s' % (dt.year, dt.month, dt.day) if dt.month", "15:57:48.578638 if args.output == 'html': if type(article['title']) is types.UnicodeType: article['title']", "% article def build_parser(): \"\"\" We put the argparse in", "article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"',", "week ago\" return str(day_diff / 7) + \" weeks ago\"", "in a method so we can test it outside of", "parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\",", "= article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] =", "= article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\") #", "requests. Take a URL, return a json array. >>> url", "default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False,", "gz = gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError:", "as args. Returns the items published today unless otherwise specified.''',", "\"\"\" For command-line use. \"\"\" rj = RecentJson(args) if args:", "30) + \" months ago\" if day_diff / 365 ==", "ago # 2 days, 15:57:48.578638 if args.output == 'html': if", "try: p = rj.parse() except: continue if not p: continue", "types import gzip from datetime import datetime, timedelta from time", "command-line. \"\"\" parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes a", "7 Jul 2017 15:16:38 -0400 #dt = datetime.strptime(item['publish_date'], '%a, %d", "JSON feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser()", "-*- coding: utf-8 -*- # Return recent items from a", "\"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\",", "= datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') article['datetime']", "the last X days.\" import os import doctest import json", "self.args.verbose: print delta.days, dt self.items = items return items def", "test it outside of the command-line. \"\"\" parser = argparse.ArgumentParser(usage='$", "parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url) True >>> xml", "of feed entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser =", "str(day_diff / 7) + \" week ago\" return str(day_diff /", "args = parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url) True", "+ \" days ago\" if day_diff < 31: if day_diff", "self.items = items return items def pretty_date(ago): \"\"\" Process a", "# -*- coding: utf-8 -*- # Return recent items from", "dt.day) if dt.month < 10: article['datetime'] = '%d-0%d-%d' % (dt.year,", "API requests. Take a URL, return a json array. >>>", "if day_diff < 0: return '' if day_diff == 0:", "outside of the command-line. \"\"\" parser = argparse.ArgumentParser(usage='$ python recentjson.py", "1: return str(day_diff / 7) + \" week ago\" return", "return str(day_diff / 30) + \" months ago\" if day_diff", "utf-8 -*- # Return recent items from a json feed.", "return \"an hour ago\" if second_diff < 86400: return str(second_diff", "<span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output == 'json': print json.dumps({'title':", "for API requests. Take a URL, return a json array.", "article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print", "str(day_diff / 30) + \" month ago\" return str(day_diff /", "From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds day_diff = ago.days if", ">>> rj.get(url) True >>> xml = rj.parse() >>> articles =", "'csv': dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y", "'r').read()) except IOError: return None self.p = p return p", "action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int)", "'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>> args = parser.parse_args([url]) >>>", "article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month, dt.day) if dt.day <", "Fri, 7 Jul 2017 15:16:38 -0400 #dt = datetime.strptime(item['publish_date'], '%a,", "\"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\",", "RecentJson(args) >>> rj.get(url) True \"\"\" response = urllib2.urlopen(url) if int(response.code)", "the items published today unless otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\",", "import datetime, timedelta from time import mktime class RecentJson: \"\"\"", "if args.output == 'js': if type(article['title']) is types.UnicodeType: article['title'] =", "published today unless otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False,", "return True def parse(self): \"\"\" Turn the xml into an", "self.days = self.args.days self.date_format = '%a, %d %b %Y %X'", "article['id'], 'description': article['description']}) elif args.output == 'csv': dt = datetime.strptime('", "mktime class RecentJson: \"\"\" Methods for ingesting and publishing JSON", "import argparse import types import gzip from datetime import datetime,", "'replace') print 'var hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'),", "article['datetime'] = '%s-%s-%s' % (dt.year, dt.month, dt.day) if dt.month <", "datetime.strptime(item['publish_date'], '%a, %d %b %Y %X %z') dt = datetime.strptime('", "\"\"\" Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff =", "if second_diff < 86400: return str(second_diff / 3600) + \"", "p: continue articles.append(rj.recently()) if len(articles) is 0: return None for", "u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories']", "u'title', u'type', u'categories'] # print item['publish_date'] # Fri, 7 Jul", "except: continue if not p: continue articles.append(rj.recently()) if len(articles) is", "self.p = p return p def recently(self): \"\"\" Return a", "\"URL: %s\" % url raise ValueError(\"URL %s response: %s\" %", "< 31: if day_diff / 7 == 1: return str(day_diff", "%z') dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta = datetime.today()", "if day_diff / 30 == 1: return str(day_diff / 30)", "== 1: return str(day_diff / 30) + \" month ago\"", "')[:5]), self.date_format) delta = datetime.today() - dt if delta.days >", "%d %b %Y %X') article['datetime'] = '%s-%s-%s' % (dt.year, dt.month,", "= RecentJson(args) >>> rj.get(url) True \"\"\" response = urllib2.urlopen(url) if", "day_diff < 7: return str(day_diff) + \" days ago\" if", "minutes ago\" if second_diff < 7200: return \"an hour ago\"", "We put the argparse in a method so we can", "self.args and self.args.verbose: print \"URL: %s\" % url raise ValueError(\"URL", "years ago\" def main(args): \"\"\" For command-line use. \"\"\" rj", "the xml into an object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>>", "#dt = datetime.strptime(item['publish_date'], '%a, %d %b %Y %X %z') dt", "Wrapper for API requests. Take a URL, return a json", "import json import urllib2 import argparse import types import gzip", "'.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') ago = datetime.now()", "Methods for ingesting and publishing JSON feeds. >>> url =", "365 == 1: return str(day_diff / 365) + \" year", "== True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp", "= RecentJson(args) if args: articles = [] for arg in", "parse(self): \"\"\" Turn the xml into an object. >>> url", "item.keys() # [u'body', u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle',", "items return items def pretty_date(ago): \"\"\" Process a timedelta object.", "not p: continue articles.append(rj.recently()) if len(articles) is 0: return None", "7) + \" week ago\" return str(day_diff / 7) +", "= article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"'", "%b %Y %X' def get(self, url): \"\"\" Wrapper for API", "and publishing JSON feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser", "%s\" % (url, response['status'])) self.xml = response.read() return True def", "rj.parse() except: continue if not p: continue articles.append(rj.recently()) if len(articles)", "hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output", "article['title'], pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if", "/ 7) + \" weeks ago\" if day_diff < 365:", "= RecentJson(args) \"\"\" def __init__(self, args={}): self.args = args if", "= self.args.days self.date_format = '%a, %d %b %Y %X' def", "array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>>", "pretty_date(ago).lower()) elif args.nostamp == True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower())", "import gzip from datetime import datetime, timedelta from time import", "a method so we can test it outside of the", "\"just now\" if second_diff < 60: return str(second_diff) + \"", "'%a, %d %b %Y %X') ago = datetime.now() - dt", "str(day_diff / 30) + \" months ago\" if day_diff /", "dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta = datetime.today() -", "method so we can test it outside of the command-line.", "'%d-0%d-%d' % (dt.year, dt.month, dt.day) if dt.day < 10: article['datetime']", "= [] for arg in args.urls[0]: if args.verbose: print arg", "<span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp == True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'],", "urllib2 import argparse import types import gzip from datetime import", "ago\" if second_diff < 3600: return str(second_diff / 60) +", "types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') if args.listitem == True: print", "article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '') article['iframe_url'] = article['media_player']['url'] article['image_url'] =", "/ 365) + \" year ago\" return str(day_diff / 365)", "today unless otherwise specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\")", "\" minutes ago\" if second_diff < 7200: return \"an hour", "\"\"\" Turn the xml into an object. >>> url =", "'%d-0%d-0%d' % (dt.year, dt.month, dt.day) article['slug'] = article['title'].lower().replace(' ', '-').replace('--',", "action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser", "rj.parse() >>> articles = rj.recently() \"\"\" items = [] for", "def build_parser(): \"\"\" We put the argparse in a method", "\"\"\" Methods for ingesting and publishing JSON feeds. >>> url", "'-').replace(':', '') article['iframe_url'] = article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] =", "/ 30 == 1: return str(day_diff / 30) + \"", "return str(day_diff / 7) + \" weeks ago\" if day_diff", "= datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta = datetime.today() - dt", "for arg in args.urls[0]: if args.verbose: print arg rj.get(arg) try:", "10: article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month, dt.day) if dt.day", "default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return", "< 365: if day_diff / 30 == 1: return str(day_diff", "articles = rj.recently() \"\"\" items = [] for item in", "of the command-line. \"\"\" parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/',", ">>> xml = rj.parse() >>> articles = rj.recently() \"\"\" items", "%d %b %Y %X %z') dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]),", "86400: return str(second_diff / 3600) + \" hours ago\" if", "u'type', u'categories'] # print item['publish_date'] # Fri, 7 Jul 2017", "day_diff = ago.days if day_diff < 0: return '' if", "dt.month, dt.day) if dt.day < 10: article['datetime'] = '%d-0%d-0%d' %", "% url raise ValueError(\"URL %s response: %s\" % (url, response['status']))", "https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds day_diff = ago.days if day_diff", "it outside of the command-line. \"\"\" parser = argparse.ArgumentParser(usage='$ python", "# Return recent items from a json feed. Recent means", "self.xml = response.read() return True def parse(self): \"\"\" Turn the", "/ 7 == 1: return str(day_diff / 7) + \"", "__name__ == '__main__': \"\"\" \"\"\" parser = build_parser() args =", "rj.get(url) True >>> xml = rj.parse() >>> articles = rj.recently()", "/ 365) + \" years ago\" def main(args): \"\"\" For", "%b %Y %X') article['datetime'] = '%s-%s-%s' % (dt.year, dt.month, dt.day)", "X days of feed entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>>", "= build_parser() >>> args = parser.parse_args([url]) >>> rj = RecentJson(args)", "if second_diff < 7200: return \"an hour ago\" if second_diff", "print delta.days, dt self.items = items return items def pretty_date(ago):", "print len(xml) 50 \"\"\" try: p = json.loads(self.xml) except: #", "IOError: return None self.p = p return p def recently(self):", "try: p = json.loads(self.xml) except: # Sometimes we download gzipped", "str(day_diff) + \" days ago\" if day_diff < 31: if", "continue articles.append(rj.recently()) if len(articles) is 0: return None for i,", "hour ago\" if second_diff < 86400: return str(second_diff / 3600)", "== 1: return \"Yesterday\" if day_diff < 7: return str(day_diff)", "'-').replace('--', '-').replace(':', '') article['iframe_url'] = article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url']", "main(args): \"\"\" For command-line use. \"\"\" rj = RecentJson(args) if", "item['publish_date'] # Fri, 7 Jul 2017 15:16:38 -0400 #dt =", "# 2 days, 15:57:48.578638 if args.output == 'html': if type(article['title'])", "doctest import json import urllib2 import argparse import types import", "return None self.p = p return p def recently(self): \"\"\"", "120: return \"a minute ago\" if second_diff < 3600: return", "15:16:38 -0400 #dt = datetime.strptime(item['publish_date'], '%a, %d %b %Y %X", "list of URLs passed as args. Returns the items published", "break dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y", "\" days ago\" if day_diff < 31: if day_diff /", "dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X')", "i >= args.limit and args.limit > 0: break dt =", "print item.keys() # [u'body', u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated',", "recently(self): \"\"\" Return a feedparser entry object for the last", "= [] for item in self.p: # print item.keys() #", "+ \" year ago\" return str(day_diff / 365) + \"", "if not p: continue articles.append(rj.recently()) if len(articles) is 0: return", "type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\")", "python # -*- coding: utf-8 -*- # Return recent items", "self.p: # print item.keys() # [u'body', u'tags', u'url', u'contentId', u'abstract',", "u'images', u'title', u'type', u'categories'] # print item['publish_date'] # Fri, 7", "\"an hour ago\" if second_diff < 86400: return str(second_diff /", "response.read() return True def parse(self): \"\"\" Turn the xml into", "for i, article in enumerate(articles[0]): if i >= args.limit and", "url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>> args =", "')[:5]), '%a, %d %b %Y %X') ago = datetime.now() -", "from a json feed. Recent means \"In the last X", ">>> args = parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url)", "\"a minute ago\" if second_diff < 3600: return str(second_diff /", "article in enumerate(articles[0]): if i >= args.limit and args.limit >", "args.listitem == True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif", "a list of URLs passed as args. Returns the items", "dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\")", "# print item['publish_date'] # Fri, 7 Jul 2017 15:16:38 -0400", "%d %b %Y %X' def get(self, url): \"\"\" Wrapper for", "download gzipped documents from the web. fh = open('json.gz', 'wb')", "last X days.\" import os import doctest import json import", "command-line use. \"\"\" rj = RecentJson(args) if args: articles =", "'%s-%s-%s' % (dt.year, dt.month, dt.day) if dt.month < 10: article['datetime']", "second_diff < 3600: return str(second_diff / 60) + \" minutes", "\"\"\" Wrapper for API requests. Take a URL, return a", "article['title'], pretty_date(ago).lower()) if args.output == 'js': if type(article['title']) is types.UnicodeType:", "import types import gzip from datetime import datetime, timedelta from", "else: print '<a href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output ==", "article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description", "json import urllib2 import argparse import types import gzip from", "rj.recently() \"\"\" items = [] for item in self.p: #", "second_diff < 60: return str(second_diff) + \" seconds ago\" if", "pretty_date(ago).lower()) elif args.output == 'json': print json.dumps({'title': article['title'], 'id': article['id'],", "\" seconds ago\" if second_diff < 120: return \"a minute", "datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') article['datetime'] =", "response: %s\" % (url, response['status'])) self.xml = response.read() return True", "day_diff < 31: if day_diff / 7 == 1: return", "= article['title'].encode('utf-8', 'replace') if args.listitem == True: print '<li><a href=\"{0}\">{1}</a>", "of URLs passed as args. Returns the items published today", "'.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') article['datetime'] = '%s-%s-%s'", "7200: return \"an hour ago\" if second_diff < 86400: return", "= argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes a list of URLs", "return parser if __name__ == '__main__': \"\"\" \"\"\" parser =", "raise ValueError(\"URL %s response: %s\" % (url, response['status'])) self.xml =", "an object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser()", "second_diff = ago.seconds day_diff = ago.days if day_diff < 0:", "os import doctest import json import urllib2 import argparse import", "epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\")", "args.limit > 0: break dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a,", "# print item.keys() # [u'body', u'tags', u'url', u'contentId', u'abstract', u'author',", "return p def recently(self): \"\"\" Return a feedparser entry object", "day_diff / 30 == 1: return str(day_diff / 30) +", "p return p def recently(self): \"\"\" Return a feedparser entry", "article['description']}) elif args.output == 'csv': dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]),", "Sometimes we download gzipped documents from the web. fh =", "', '-').replace('--', '-').replace(':', '') article['iframe_url'] = article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url']", "< 60: return str(second_diff) + \" seconds ago\" if second_diff", "parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url) True \"\"\" response", "elif args.nostamp == True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else:", "rj.parse() >>> print len(xml) 50 \"\"\" try: p = json.loads(self.xml)", "+ \" month ago\" return str(day_diff / 30) + \"", "True: print '<li><a href=\"{0}\">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) else: print '<a href=\"{0}\">{1}</a>", "dt.day < 10: article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month, dt.day)", "parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser if __name__ == '__main__': \"\"\"", "< 10: article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month, dt.day) if", "print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp == True:", "into an object. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser =", ">>> parser = build_parser() >>> args = parser.parse_args([url]) >>> rj", "parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\",", "try: gz = gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except", "0: break dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b", "fh.write(self.xml) fh.close() try: gz = gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz',", "a feedparser entry object for the last X days of", "article['description'] = article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article", "print ago # 2 days, 15:57:48.578638 if args.output == 'html':", "True >>> xml = rj.parse() >>> print len(xml) 50 \"\"\"", "now\" if second_diff < 60: return str(second_diff) + \" seconds", "article['title'] = article['title'].encode('utf-8', 'replace') if args.listitem == True: print '<li><a", "== '__main__': \"\"\" \"\"\" parser = build_parser() args = parser.parse_args()", "= rj.parse() >>> articles = rj.recently() \"\"\" items = []", "0: if second_diff < 10: return \"just now\" if second_diff", "day_diff == 1: return \"Yesterday\" if day_diff < 7: return", "= parser.parse_args([url]) >>> rj = RecentJson(args) >>> rj.get(url) True \"\"\"", "href=\"{0}\">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower()) if args.output == 'js': if type(article['title'])", "Return recent items from a json feed. Recent means \"In", "feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>>", "self.args.verbose: print \"URL: %s\" % url raise ValueError(\"URL %s response:", "argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes a list of URLs passed", "json.dumps({'title': article['title'], 'id': article['id'], 'description': article['description']}) elif args.output == 'csv':", "url): \"\"\" Wrapper for API requests. Take a URL, return", "str(day_diff / 365) + \" year ago\" return str(day_diff /", "response['status'])) self.xml = response.read() return True def parse(self): \"\"\" Turn", "ago\" if second_diff < 120: return \"a minute ago\" if", "ingesting and publishing JSON feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>>", "publishing JSON feeds. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser =", "self.args = args if not hasattr(self.args, 'days'): self.args.days = 0", "if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') if args.listitem", "if args.listitem == True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower())", "recentjson.py http://domain.com/json/', description='''Takes a list of URLs passed as args.", "days.\" import os import doctest import json import urllib2 import", "% (dt.year, dt.month, dt.day) article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':',", "return \"a minute ago\" if second_diff < 3600: return str(second_diff", "except: # Sometimes we download gzipped documents from the web.", "%b %Y %X %z') dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format)", "ago.days if day_diff < 0: return '' if day_diff ==", ">>> xml = rj.parse() >>> print len(xml) 50 \"\"\" try:", "# Sometimes we download gzipped documents from the web. fh", "= 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>> args = parser.parse_args([url])", "http://domain.com/json/', description='''Takes a list of URLs passed as args. Returns", "dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\",", "default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\",", "ago\" return str(day_diff / 7) + \" weeks ago\" if", "\" weeks ago\" if day_diff < 365: if day_diff /", "[] for item in self.p: # print item.keys() # [u'body',", "if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') print 'var", "ago\" if day_diff / 365 == 1: return str(day_diff /", "documents from the web. fh = open('json.gz', 'wb') fh.write(self.xml) fh.close()", "article['description'].replace('\"', \"'\") # date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser():", "'json': print json.dumps({'title': article['title'], 'id': article['id'], 'description': article['description']}) elif args.output", "days of feed entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser", "i, article in enumerate(articles[0]): if i >= args.limit and args.limit", "xml = rj.parse() >>> articles = rj.recently() \"\"\" items =", "%X' def get(self, url): \"\"\" Wrapper for API requests. Take", "for item in self.p: # print item.keys() # [u'body', u'tags',", "print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser(): \"\"\" We put the", "minute ago\" if second_diff < 3600: return str(second_diff / 60)", "Jul 2017 15:16:38 -0400 #dt = datetime.strptime(item['publish_date'], '%a, %d %b", "self.args.days = 0 self.days = self.args.days self.date_format = '%a, %d", "day_diff == 0: if second_diff < 10: return \"just now\"", "if i >= args.limit and args.limit > 0: break dt", "description='''Takes a list of URLs passed as args. Returns the", "we download gzipped documents from the web. fh = open('json.gz',", "delta = datetime.today() - dt if delta.days > int(self.days): continue", ">>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser() >>> args", "if 'verbose' in self.args and self.args.verbose: print \"URL: %s\" %", "\"--limit\", dest=\"limit\", default=0, type=int) parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\",", "return str(day_diff / 30) + \" month ago\" return str(day_diff", "args.output == 'js': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8',", "in self.args and self.args.verbose: print \"URL: %s\" % url raise", "# [u'body', u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl',", "return a json array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser", "second_diff < 86400: return str(second_diff / 3600) + \" hours", "article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output == 'json': print json.dumps({'title': article['title'],", "'') article['iframe_url'] = article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url']", "except IOError: return None self.p = p return p def", ">>> rj = RecentJson(args) >>> rj.get(url) True >>> xml =", "== 'html': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace')", "RecentJson: \"\"\" Methods for ingesting and publishing JSON feeds. >>>", "def get(self, url): \"\"\" Wrapper for API requests. Take a", "# date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser(): \"\"\" We", "if day_diff < 7: return str(day_diff) + \" days ago\"", "if args.verbose: print arg rj.get(arg) try: p = rj.parse() except:", "= args if not hasattr(self.args, 'days'): self.args.days = 0 self.days", "args: articles = [] for arg in args.urls[0]: if args.verbose:", "in args.urls[0]: if args.verbose: print arg rj.get(arg) try: p =", ">>> articles = rj.recently() \"\"\" items = [] for item", "second_diff < 7200: return \"an hour ago\" if second_diff <", "len(articles) is 0: return None for i, article in enumerate(articles[0]):", "= article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '') article['iframe_url'] = article['media_player']['url'] article['image_url']", ">>> rj.get(url) True >>> xml = rj.parse() >>> print len(xml)", "json feed. Recent means \"In the last X days.\" import", "u'categories'] # print item['publish_date'] # Fri, 7 Jul 2017 15:16:38", "\"Yesterday\" if day_diff < 7: return str(day_diff) + \" days", "parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser if", "'__main__': \"\"\" \"\"\" parser = build_parser() args = parser.parse_args() if", "article['title'].encode('utf-8', 'replace') if args.listitem == True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'],", "60) + \" minutes ago\" if second_diff < 7200: return", "import urllib2 import argparse import types import gzip from datetime", "= response.read() return True def parse(self): \"\"\" Turn the xml", "and self.args.verbose: print \"URL: %s\" % url raise ValueError(\"URL %s", "'%a, %d %b %Y %X') article['datetime'] = '%s-%s-%s' % (dt.year,", "+ \" weeks ago\" if day_diff < 365: if day_diff", "str(day_diff / 365) + \" years ago\" def main(args): \"\"\"", "in self.args and self.args.verbose: print delta.days, dt self.items = items", "if 'verbose' in self.args and self.args.verbose: print delta.days, dt self.items", "if day_diff < 365: if day_diff / 30 == 1:", "specified.''', epilog='') parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False,", "return \"just now\" if second_diff < 60: return str(second_diff) +", "= \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output ==", "Returns the items published today unless otherwise specified.''', epilog='') parser.add_argument(\"-v\",", "#!/usr/bin/env python # -*- coding: utf-8 -*- # Return recent", "if second_diff < 60: return str(second_diff) + \" seconds ago\"", "article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description'] = article['description'].replace('\"', \"'\")", "types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') print 'var hed = \"<a", "dt.month, dt.day) article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '') article['iframe_url']", "the argparse in a method so we can test it", "if args: articles = [] for arg in args.urls[0]: if", "% (url, response['status'])) self.xml = response.read() return True def parse(self):", "== 'js': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace')", "Return a feedparser entry object for the last X days", "'id': article['id'], 'description': article['description']}) elif args.output == 'csv': dt =", "print 'var hed = \"<a href=\\'{0}\\'>{1}</a> <span>({2})</span>\";'.format(article['url'], article['title'].replace('\"', '\\\\\\\\\"'), pretty_date(ago).lower())", "3600) + \" hours ago\" if day_diff == 1: return", "0: return '' if day_diff == 0: if second_diff <", "%s\" % url raise ValueError(\"URL %s response: %s\" % (url,", "30) + \" month ago\" return str(day_diff / 30) +", "if second_diff < 120: return \"a minute ago\" if second_diff", "3600: return str(second_diff / 60) + \" minutes ago\" if", "str(day_diff / 7) + \" weeks ago\" if day_diff <", "ago\" if day_diff == 1: return \"Yesterday\" if day_diff <", "the last X days of feed entries. >>> url =", "day_diff < 365: if day_diff / 30 == 1: return", "/ 7) + \" week ago\" return str(day_diff / 7)", "(dt.year, dt.month, dt.day) if dt.month < 10: article['datetime'] = '%d-0%d-%d'", "10: return \"just now\" if second_diff < 60: return str(second_diff)", "ValueError(\"URL %s response: %s\" % (url, response['status'])) self.xml = response.read()", "\" year ago\" return str(day_diff / 365) + \" years", "continue items.append(item) if 'verbose' in self.args and self.args.verbose: print delta.days,", "we can test it outside of the command-line. \"\"\" parser", "action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\",", "'description': article['description']}) elif args.output == 'csv': dt = datetime.strptime(' '.join(article['publish_date'].split('", "action=\"append\", nargs=\"*\") return parser if __name__ == '__main__': \"\"\" \"\"\"", "object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds day_diff = ago.days", "elif args.output == 'csv': dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a,", "u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images',", "if delta.days > int(self.days): continue items.append(item) if 'verbose' in self.args", "item in self.p: # print item.keys() # [u'body', u'tags', u'url',", "< 0: return '' if day_diff == 0: if second_diff", "/ 60) + \" minutes ago\" if second_diff < 7200:", "seconds ago\" if second_diff < 120: return \"a minute ago\"", "'html': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') if", "means \"In the last X days.\" import os import doctest", "href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp == True: print '<li><a", "args.verbose: print arg rj.get(arg) try: p = rj.parse() except: continue", "if args.output == 'html': if type(article['title']) is types.UnicodeType: article['title'] =", "'replace') if args.listitem == True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'],", "URL, return a json array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>>", "return str(second_diff / 60) + \" minutes ago\" if second_diff", "% (dt.year, dt.month, dt.day) if dt.month < 10: article['datetime'] =", "days, 15:57:48.578638 if args.output == 'html': if type(article['title']) is types.UnicodeType:", "items def pretty_date(ago): \"\"\" Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python", "def recently(self): \"\"\" Return a feedparser entry object for the", "\" hours ago\" if day_diff == 1: return \"Yesterday\" if", "dt.day) if dt.day < 10: article['datetime'] = '%d-0%d-0%d' % (dt.year,", "datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X') ago =", "import os import doctest import json import urllib2 import argparse", ">>> rj = RecentJson(args) >>> rj.get(url) True \"\"\" response =", "Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds", "and args.limit > 0: break dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]),", "# print ago # 2 days, 15:57:48.578638 if args.output ==", "in enumerate(articles[0]): if i >= args.limit and args.limit > 0:", "return None for i, article in enumerate(articles[0]): if i >=", "u'publish_date', u'images', u'title', u'type', u'categories'] # print item['publish_date'] # Fri,", "parser = build_parser() >>> args = parser.parse_args([url]) >>> rj =", "\"\"\" We put the argparse in a method so we", "date,title,id,slug,player_url,image_url,image_large_url,keywords,description print '%(datetime)s,\"%(title)s\",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,\"%(media_keywords)s\",\"%(description)s\"' % article def build_parser(): \"\"\" We put", "the command-line. \"\"\" parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/', description='''Takes", "gzip from datetime import datetime, timedelta from time import mktime", "= json.loads(self.xml) except: # Sometimes we download gzipped documents from", "args={}): self.args = args if not hasattr(self.args, 'days'): self.args.days =", "len(xml) 50 \"\"\" try: p = json.loads(self.xml) except: # Sometimes", "if day_diff == 1: return \"Yesterday\" if day_diff < 7:", "action=\"store_true\") parser.add_argument(\"urls\", action=\"append\", nargs=\"*\") return parser if __name__ == '__main__':", "/ 365 == 1: return str(day_diff / 365) + \"", "json.loads(self.xml) except: # Sometimes we download gzipped documents from the", "'.join(item['publish_date'].split(' ')[:5]), self.date_format) delta = datetime.today() - dt if delta.days", ">>> args = parser.parse_args([url]) >>> rj = RecentJson(args) \"\"\" def", "return \"Yesterday\" if day_diff < 7: return str(day_diff) + \"", "Recent means \"In the last X days.\" import os import", "10: article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month, dt.day) article['slug'] =", "True: print '<li><a href=\"{0}\">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower()) elif args.nostamp ==", "dt.month, dt.day) if dt.month < 10: article['datetime'] = '%d-0%d-%d' %", "ago.seconds day_diff = ago.days if day_diff < 0: return ''", "return str(day_diff / 7) + \" week ago\" return str(day_diff", "30 == 1: return str(day_diff / 30) + \" month", "'' if day_diff == 0: if second_diff < 10: return", "str(second_diff / 60) + \" minutes ago\" if second_diff <", "31: if day_diff / 7 == 1: return str(day_diff /", "> 0: break dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d", "months ago\" if day_diff / 365 == 1: return str(day_diff", "[] for arg in args.urls[0]: if args.verbose: print arg rj.get(arg)", "'\\\\\\\\\"'), pretty_date(ago).lower()) elif args.output == 'json': print json.dumps({'title': article['title'], 'id':", "parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False, action=\"store_true\") parser.add_argument(\"urls\", action=\"append\",", "== 'json': print json.dumps({'title': article['title'], 'id': article['id'], 'description': article['description']}) elif", "RecentJson(args) \"\"\" def __init__(self, args={}): self.args = args if not", "= gzip.GzipFile('json.gz', 'r').read() p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return", "p = json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return None self.p =", "u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories'] # print", "= json.loads(gzip.GzipFile('json.gz', 'r').read()) except IOError: return None self.p = p", "datetime.now() - dt # print ago # 2 days, 15:57:48.578638", "article['iframe_url'] = article['media_player']['url'] article['image_url'] = article['media_thumbnail'][0]['url'] article['image_large_url'] = article['media_thumbnail'][1]['url'] article['description']", "build_parser() >>> args = parser.parse_args([url]) >>> rj = RecentJson(args) >>>", "import doctest import json import urllib2 import argparse import types", "return str(day_diff / 365) + \" years ago\" def main(args):", "month ago\" return str(day_diff / 30) + \" months ago\"", "if not hasattr(self.args, 'days'): self.args.days = 0 self.days = self.args.days", "dt.day) article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '') article['iframe_url'] =", "object for the last X days of feed entries. >>>", "str(second_diff) + \" seconds ago\" if second_diff < 120: return", "coding: utf-8 -*- # Return recent items from a json", "and self.args.verbose: print delta.days, dt self.items = items return items", "datetime import datetime, timedelta from time import mktime class RecentJson:", "u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories'] #", "'js': if type(article['title']) is types.UnicodeType: article['title'] = article['title'].encode('utf-8', 'replace') print", "dt.month < 10: article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month, dt.day)", "default=False, action=\"store_true\") parser.add_argument(\"--test\", dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0)", "%X %z') dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format) delta =", "parser.parse_args([url]) >>> rj = RecentJson(args) \"\"\" def __init__(self, args={}): self.args", "-0400 #dt = datetime.strptime(item['publish_date'], '%a, %d %b %Y %X %z')", "== 'csv': dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b", "elif args.output == 'json': print json.dumps({'title': article['title'], 'id': article['id'], 'description':", "items = [] for item in self.p: # print item.keys()", "%d %b %Y %X') ago = datetime.now() - dt #", "= rj.recently() \"\"\" items = [] for item in self.p:", "= datetime.strptime(item['publish_date'], '%a, %d %b %Y %X %z') dt =", "-*- # Return recent items from a json feed. Recent", "dest=\"test\", default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\",", "for the last X days of feed entries. >>> url", "days ago\" if day_diff < 31: if day_diff / 7", "== 1: return str(day_diff / 365) + \" year ago\"", "< 86400: return str(second_diff / 3600) + \" hours ago\"", "rj = RecentJson(args) if args: articles = [] for arg", "= parser.parse_args([url]) >>> rj = RecentJson(args) \"\"\" def __init__(self, args={}):", "%b %Y %X') ago = datetime.now() - dt # print", "\"\"\" items = [] for item in self.p: # print", "can test it outside of the command-line. \"\"\" parser =", "if day_diff / 7 == 1: return str(day_diff / 7)", "%Y %X' def get(self, url): \"\"\" Wrapper for API requests.", "+ \" minutes ago\" if second_diff < 7200: return \"an", "article def build_parser(): \"\"\" We put the argparse in a", "json array. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser()", "hasattr(self.args, 'days'): self.args.days = 0 self.days = self.args.days self.date_format =", "if len(articles) is 0: return None for i, article in", "def __init__(self, args={}): self.args = args if not hasattr(self.args, 'days'):", "%s response: %s\" % (url, response['status'])) self.xml = response.read() return", "URLs passed as args. Returns the items published today unless", "default=False, action=\"store_true\") parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0) parser.add_argument(\"-l\", \"--limit\", dest=\"limit\", default=0,", "0: return None for i, article in enumerate(articles[0]): if i", "int(response.code) >= 400: if 'verbose' in self.args and self.args.verbose: print", "self.date_format) delta = datetime.today() - dt if delta.days > int(self.days):", "article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '') article['iframe_url'] = article['media_player']['url']", ">= args.limit and args.limit > 0: break dt = datetime.strptime('", "self.date_format = '%a, %d %b %Y %X' def get(self, url):", "a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff = ago.seconds day_diff", "= ago.days if day_diff < 0: return '' if day_diff", "dest=\"output\", default=\"html\", type=str) parser.add_argument(\"--li\", dest=\"listitem\", default=False, action=\"store_true\") parser.add_argument(\"--ns\", dest=\"nostamp\", default=False,", "\"\"\" \"\"\" parser = build_parser() args = parser.parse_args() if args.test:", "day_diff < 0: return '' if day_diff == 0: if", "int(self.days): continue items.append(item) if 'verbose' in self.args and self.args.verbose: print", "ago\" if day_diff < 365: if day_diff / 30 ==", "\"\"\" def __init__(self, args={}): self.args = args if not hasattr(self.args,", "args.output == 'json': print json.dumps({'title': article['title'], 'id': article['id'], 'description': article['description']})", "%Y %X') article['datetime'] = '%s-%s-%s' % (dt.year, dt.month, dt.day) if", ">= 400: if 'verbose' in self.args and self.args.verbose: print \"URL:", "print \"URL: %s\" % url raise ValueError(\"URL %s response: %s\"", "feed entries. >>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628' >>> parser = build_parser()", "pretty_date(ago): \"\"\" Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python \"\"\" second_diff", "\"In the last X days.\" import os import doctest import", "argparse import types import gzip from datetime import datetime, timedelta", "python recentjson.py http://domain.com/json/', description='''Takes a list of URLs passed as", "arg rj.get(arg) try: p = rj.parse() except: continue if not", "if dt.day < 10: article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month,", "return str(second_diff / 3600) + \" hours ago\" if day_diff", "return str(day_diff) + \" days ago\" if day_diff < 31:", "= datetime.today() - dt if delta.days > int(self.days): continue items.append(item)", "gzipped documents from the web. fh = open('json.gz', 'wb') fh.write(self.xml)", "response = urllib2.urlopen(url) if int(response.code) >= 400: if 'verbose' in", "def main(args): \"\"\" For command-line use. \"\"\" rj = RecentJson(args)", "return str(day_diff / 365) + \" year ago\" return str(day_diff", "= items return items def pretty_date(ago): \"\"\" Process a timedelta", "\" week ago\" return str(day_diff / 7) + \" weeks", "> int(self.days): continue items.append(item) if 'verbose' in self.args and self.args.verbose:", "None for i, article in enumerate(articles[0]): if i >= args.limit" ]
[ "that are the # Script Extensions properties of certain characters.", "get_other_case, 0) # The grapheme breaking rules were changed for", "print(\"};\\n\") # Extract the unique combinations of properties into records", "= len(script_lists) script_lists.extend(script_numbers) return -return_value # Read the whole table", "'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag', 'Goth',", "!= \"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1),", "values table = tuple(table) for i in range(0, len(table), block_size):", "# 01-October-2018: Added the 'Unknown' script name # 03-October-2018: Added", "unicode_version == \"\": unicode_version = version elif unicode_version != version:", "struct {\\n' for i in range(len(records[0])): record_slice = [record[i] for", "(6,), (1,)], 1 ), \\ ( [(300,), (600,), (600,), (100,)],", "examples are correct for the Unicode 11.0.0 database. Future #", "print(\"/* These are the main two-stage UCD tables. The fields", "been upgraded to Python 3 for PCRE2, and should be", "the index # number of the required record in the", "Adjusted data file names to take from the Unicode.tables directory", "get_type_size(table) ELEMS_PER_LINE = 16 s = \"const %s %s[] =", "0) # which often come after a line which has", "== 1: return script_abbrevs.index(this_script_list[0]) script_numbers = [] for d in", "value, character type, grapheme break type, # offset to caseless", "f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1) version_pat = r\"^#", "= read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The grapheme breaking rules were", "(\"Unicode Emoji\"), # for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt # #", "table, records = combine_tables(script, category, break_props, caseless_offsets, other_case, scriptx, padding_dummy)", "A character with more than one script listed for its", "updates may make change the actual lookup values. # #", "Unicode web site; GraphemeBreakProperty.txt is # in the \"auxiliary\" subdirectory.", "(U+3042) is in block 96 (0x60) # lookup 96 in", "block %d */\\n\" + fmt) % ((i / block_size,) +", "'Orkh', 'Samr', 'Lana', 'Tavt', #New for Unicode 6.0.0 'Batk', 'Brah',", "# observing that many characters have the same record, and", "for c in range(MAX_UNICODE): o = c + other_case[c] #", "for i in range(char, last + 1): if break_props[i] !=", "is # required. The ucd_stage1 table is indexed by a", "else: el = block_size fmt = \"%3d,\" * el +", "or two # are different because they are part of", "the new Script Extensions field, we need some padding #", "'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf',", "print(\" 0, /* other case */\") print(\" ucp_Unknown, /* script", "the name has the correct index value. break_property_names = ['CR',", "script_abbrevs_default: scriptx[i] = script[i] # With the addition of the", "required record in the ucd_records vector. # # The following", "'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version = \"\"", "tables: # Find the optimum block size for 3-stage table", "in the table that are inserted into the main table.", "Updated for PCRE2 # 03-June-2014: Updated for Python 3 #", "'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size()", "= 1 # If we have not added to an", "'Zanb', #New for Unicode 11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf',", "ELEMS_PER_LINE = 16 s = \"const %s %s[] = {", "_pcre2_xxx to xxxx, thereby avoiding name clashes\") print(\"with the library.", "table 90 in stage2 yields 564 # record 564 is", "'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', # New for", "other_case[c] != 0 and other_case[c + other_case[c]] == 0: other_case[c", "process. # # This script constructs six tables. The ucd_caseless_sets", "modify it by hand. Instead modify the script and run", "matter whether it is compiled or not. However\") print(\"a comment", "greater than 255 to make the field 16 bits. padding_dummy", "(0, 4294967295), (-128, 127), (-32768, 32767), (-2147483648, 2147483647)] minval =", "in range(char, last + 1): if break_props[i] != break_property_names.index('Other'): print(\"WARNING:", "+ \\ '/* %d bytes, record size %d */' %", "it, so the resulting table is # not much bigger", "'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt', #New for Unicode 6.0.0 'Batk',", "len(table), block_size): block = table[i:i+block_size] start = blocks.get(block) if start", "of 4). Set a value # greater than 255 to", "name has the correct index value. break_property_names = ['CR', 'LF',", "a float # # Added code to scan the emoji-data.txt", "'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue',", "find the Extended Pictographic # property, which is used by", "used to ensure that digits # in script runs all", "# The grapheme breaking rules were changed for Unicode 11.0.0", "the unique combinations of properties into records def combine_tables(*tables): records", "stage3) # print \"/* %5d / %3d => %5d bytes", "of a set. If so, unite the existing set with", "code points for the '9' characters in each\") print(\"set of", "has a negative value in its record. This is the", "0 } # 34 = ucp_Latin => Latin script #", "block = table[i:i+block_size] start = blocks.get(block) if start is None:", "digitsets.append(first + 9) first += 10 file.close() digitsets.sort() print(\"/* This", "print('const ucd_record PRIV(ucd_records)[] = { ' + \\ '/* %d", "'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic',", "'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt', #New for", "= sys.maxsize for block_size in [2 ** i for i", "(block_size, size) if size < min_size: min_size = size min_stage1,", "'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', # New for Unicode 6.0.0 'Batak',", "than two characters that must match # each other caselessly.", "'Siddham', 'Tirhuta', 'Warang_Citi', # New for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs',", "the library. At present, just one of these tables is", "= ucp_Mn => Non-spacing mark # 3 = ucp_gbExtend =>", "--- digitsets = [] file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for", ">../src/pcre2_ucd.c # # It requires six Unicode data tables: DerivedGeneralCategory.txt,", "script: # Added code to add a grapheme break property", "found in the \"extracted\" subdirectory of the # Unicode database", "'Wcho' ] category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll',", "the maint subdirectory, using the command # # [python3] ./MultiStage2.py", "# 0 => Not part of a caseless set #", "listed for its # Script Extension property has a negative", "=> No other case # 27 = ucp_Hiragana => No", "break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has break property %s,", "= None): type, size = get_type_size(table) ELEMS_PER_LINE = 16 s", "property for emoji characters. This # can be set as", "*/\") print() print(\"/* Unicode character database. */\") print(\"/* This file", "def get_script_extension(chardata): this_script_list = list(chardata[1].split(' ')) if len(this_script_list) == 1:", "'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B',", "print(\"Do not modify it by hand. Instead modify the script", "category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo',", "there are lines # to be ignored (returning the default", "The Script Extensions property default value is the Script value.", "It scans # the other_case table to find sets of", "needed 2.5 # Consequent code tidy # Adjusted data file", "code speeds up property # matching many times. The script", "scripts for Unicode 6.1.0 # 20-August-2012: Added scan of GraphemeBreakProperty.txt", "property. # 01-October-2018: Added the 'Unknown' script name # 03-October-2018:", "'Sund', 'Vaii', #New for Unicode 5.2 'Avst', 'Bamu', 'Egyp', 'Armi',", "when not needed. But don't leave\") print(\"a totally empty module", "/* %3d */\" % count, end='') print(\"\\n};\\n\") # Output the", "(minlimit, maxlimit) in enumerate(limits): if minlimit <= minval and maxval", "'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii', #New for Unicode", "<= 1: continue value = get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0])", "of extensions have been added to the original script. #", "# CaseFolding.txt, and emoji-data.txt. These must be in the #", "scan again and create equivalence sets. sets = [] for", "55 # lookup 80 (0x50) in table 55 in stage2", "* MAX_UNICODE for line in file: line = re.sub(r'#.*', '',", "line = re.sub(r'#.*', '', line) chardata = list(map(str.strip, line.split(';'))) if", "table contains the blocks with property values table = tuple(table)", "isn't used; # removed completely in 2012. # Corrected size", "scan it and fill in the default from Scripts. Code", "is # in order, and is terminated by NOTACHAR (0xffffffff),", "int(chardata[0], 16) return 0 # Parse a line of ScriptExtensions.txt", "'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn',", "record 564 is { 27, 7, 12, 0, 0, 27,", "= [record[i] for record in records] slice_type, slice_size = get_type_size(record_slice)", "table_name, block_size = None): type, size = get_type_size(table) ELEMS_PER_LINE =", "\"/* %5d / %3d => %5d bytes */\" % (stage2_block,", "in the \"extracted\" subdirectory of the # Unicode database (UCD)", "s.append(y) appended = 1 # If we have not added", "'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs'", "UCP support is needed. # Update for PCRE2: name changes,", "script constructs six tables. The ucd_caseless_sets table contains # lists", "for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian',", "None: continue first = int(m.group(1),16) last = int(m.group(2),16) if ((last", "character. The first list is empty; this is used for", "because an int is # required and the result of", "and # CaseFolding.txt are directly in the UCD directory. The", "= block_size fmt = \"%3d,\" * el + \"\\n\" if", "code to add a Script Extensions field to records. This", "#51 (\"Unicode Emoji\"), # for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt #", "), \\ ( [(3, 100000), (6, 6), (6, 123456), (1,", "one instance of every unique record that is # required.", "'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul',", "'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', # New for Unicode", "stage_i, stage3 = compress_table(table, stage3_block) for stage2_block in [2 **", "New for Unicode 5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', #", "#included in pcre2test, we don't need the table of digit\")", "a multi-character caseless set (for # example, k, K and", "\"a\" (U+0061) is in block 0 # lookup 0 in", "#print struct def print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[] = {", "Not part of a caseless set # 0 => No", "in a list of lists of # multiple scripts. Initialize", "+ 9) first += 10 file.close() digitsets.sort() print(\"/* This table", "a table of records (of type ucd_record), containing a #", "(GraphemeBreakProperty.txt). It comes from the emoji-data.txt # file, but we", "0 for s in sets: found = 0 for x", "names to take from the Unicode.tables directory # Adjusted global", "find the list 3, 15, 107, 29, # and terminator", "The first list is empty; this is used for characters", "of each block. The result of a lookup in ucd_stage1", "script_lists = [0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension,", "script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i", "{' + '%6d, ' * len(record[0]) + '}, /* %3d", "< min_size: min_size = size min_stage1, min_stage2 = stage1, stage2", "1 print(\"\\n};\\n\") print(\"/* This vector is a list of lists", "'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd', #New for Unicode 12.0.0 'Elym',", "t = o + other_case[o] # Scan the existing sets", "% (table[i:i+ELEMS_PER_LINE] + (int(i * mult),))) else: if block_size >", "break property (8 bits),\") print(\"offset to multichar other cases or", "to this script: # Added code to add a grapheme", "# Commented out stuff relating to the casefolding table, which", "< last: digitsets.append(first + 9) first += 10 file.close() digitsets.sort()", "if m is None: continue first = int(m.group(1),16) last =", "0 print(\" 0x%05x,\" % d, end='') count += 1 print(\"\\n};\\n\")", "lists of characters that all match each other caselessly. Each", "'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian',", "entry, as the zeroth # element is never used. script_lists", "Added code to add a grapheme break property field to", "# # The main tables generated by this script are", "in the CaseFolding file there are lines # to be", "=> Hiragana script # 7 = ucp_Lo => Other letter", "'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean',", "count = 8 for d in digitsets: if count ==", "4 stage1, stage2 = compress_table(stage_i, stage2_block) size += get_tables_size(stage1, stage2,", "nearest power of slice_size size = (size + slice_size -", "table in tables: type, size = get_type_size(table) total_size += size", "was received about space saving - maybe the guy linked\")", "Get the smallest possible C language type for the values", "set. for c in range(MAX_UNICODE): if other_case[c] != 0 and", "is a float # # Added code to scan the", "far too big. It can be efficiently compressed by #", "x in s: caseless_offsets[x] = offset offset += len(s) +", "may be encountered. For these we set up a\") print(\"special", "change\") print(\"table names from _pcre2_xxx to xxxx, thereby avoiding name", "'Saurashtra', 'Sundanese', 'Vai', # New for Unicode 5.2 'Avestan', 'Bamum',", "Output the main UCD tables. print(\"/* These are the main", "# Major modifications made to this script: # Added code", "extensions have been added to the original script. # #", "blocks stage1 = [] # Stage 1 table contains block", "negated offset to the start of the relevant list in", "from pcre2_internal.h (the actual\\n' + \\ 'field names will be", "'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr',", "Commented out stuff relating to the casefolding table, which isn't", "table of \"virtual\" blocks; each block is indexed by #", "of those scripts, which are Bengali, Devanagari, Grantha, and Kannada.", "of # characters (taking 128 characters in a block) have", "the\\n' + \\ 'types in this structure definition from pcre2_internal.h", "negated offsets in a list of lists of # multiple", "rewriting two statements that needed 2.5 # Consequent code tidy", "read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props =", "records (of type ucd_record), containing a # script number, script", "CaseFolding.txt are directly in the UCD directory. The emoji-data.txt file", "%d. */\" % (min_size, min_block_size)) print() print(\"/* The tables herein", "is the size # of each block. The result of", "then scan it and fill in the default from Scripts.", "-other_case[c] # Now scan again and create equivalence sets. sets", "UCP support is built,\") print(\"and in PCRE2 that happens automatically", "+= 1 print(\"\\n};\\n\") print(\"/* This vector is a list of", "'Nkoo', 'Phag', 'Phnx', #New for Unicode 5.1 'Cari', 'Cham', 'Kali',", "print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print()", "recompiling tables with a new Unicode version, please check the\\n'", "make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt',", "into C types\") def get_tables_size(*tables): total_size = 0 for table", "there is a table of records (of type ucd_record), containing", "global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1) version_pat", "len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i * mult),))) else:", "range(char, last + 1): if break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji", "(int(i * mult),))) else: if block_size > ELEMS_PER_LINE: el =", "% count, end='') print(\"\\n};\\n\") # Output the main UCD tables.", "which is the character's code point divided by 128, since", "certain characters. Each list is terminated # by zero (ucp_Unknown).", "these we set up a\") print(\"special record. */\") print() print(\"#if", "01-October-2018: Added the 'Unknown' script name # 03-October-2018: Added new", "lines # to be ignored (returning the default value of", "offset to the start of the relevant list in the", "following examples are correct for the Unicode 11.0.0 database. Future", "stage3_block in [2 ** i for i in range(2,6)]: stage_i,", "size = (size + slice_size - 1) & -slice_size size", "and emoji-data.txt. These must be in the # maint/Unicode.tables subdirectory.", "+ other_case[c] # Trigger when this character's other case does", "%3d => %5d bytes */\" % (stage2_block, stage3_block, size) if", "terminator 0. This means that this character is expected to", "string.strip to str.strip # . Added encoding='utf-8' to the open()", "'Tfng', 'Ugar', 'Yiii', #New for Unicode 5.0 'Bali', 'Xsux', 'Nkoo',", "new set. appended = 0 for s in sets: found", "for i in range(0, MAX_UNICODE): if scriptx[i] == script_abbrevs_default: scriptx[i]", "tests = [ \\ ( [(3,), (6,), (6,), (1,)], 1", "x in s: print(' 0x%04x,' % x, end=' ') print('", "all Unicode # characters would be far too big. It", "tuple(table) if block_size is None: fmt = \"%3d,\" * ELEMS_PER_LINE", "range(len(records[0])): record_slice = [record[i] for record in records] slice_type, slice_size", "'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian',", "6), (123456, 6), (1, 690)], 8 ), \\ ] for", "does not point back here. We # now have three", "s: print(' 0x%04x,' % x, end=' ') print(' NOTACHAR,') print('};')", "29, # and terminator 0. This means that this character", "'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt',", "code was part of the original contribution, but is commented", "'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd',", "10 characters\" % (first, last), file=sys.stderr) while first < last:", "for c in range(MAX_UNICODE): if other_case[c] != 0 and other_case[c", "'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version = \"\" script =", "the number of subsequent elements, which are in ascending order.", "# record 17 is { 34, 5, 12, 0, -32,", "table. # The CaseFolding.txt file lists pairs, but the common", "0 for table in tables: type, size = get_type_size(table) total_size", "Find the optimum block size for the two-stage table min_size", "pcre2test program, which redefines the PRIV macro to change\") print(\"table", "# and terminator 0. This means that this character is", "property \"Other\" # 0 => Not part of a caseless", "that is # required. The ucd_stage1 table is indexed by", "# 34 = ucp_Latin => Latin script # 5 =", "case does not point back here. We # now have", "= '/* When recompiling tables with a new Unicode version,", "record in enumerate(records): print((' {' + '%6d, ' * len(record[0])", "*/\" mult = MAX_UNICODE / len(table) for i in range(0,", "'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu',", "case, for # every Unicode character. However, a real table", "#New for Unicode 5.2 'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti',", "used for just a single script for a # code", "10 digits. --- digitsets = [] file = open('Unicode.tables/Scripts.txt', 'r',", "first += 10 file.close() digitsets.sort() print(\"/* This table lists the", "'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym', 'Mong',", "and other_case[c + other_case[c]] == 0: other_case[c + other_case[c]] =", "80 (0x50) in table 55 in stage2 yields 458 #", "# Read the whole table in memory, setting/checking the Unicode", "re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16) if m.group(3) is None:", "character type, grapheme break type, # offset to caseless matching", "the common logic for reading data # sets only one", "ucp_Lo => Other letter # 12 = ucp_gbOther => Grapheme", "'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg',", "sets only one value, so first we go through the", "by the MultiStage2.py script. */\") print(\"/* Total size: %d bytes,", "dummy tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] =", "'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana',", "Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', #", "# With the addition of the new Script Extensions field,", "emoji characters. This # can be set as an additional", "in each\") print(\"set of decimal digits. It is used to", "# # The ucd_script_sets vector contains lists of script numbers", "size, struct = get_record_size_struct(test[0]) assert(size == test[1]) #print struct def", "is found in the \"extracted\" subdirectory of the # Unicode", "True if script_lists[i+j] != script_numbers[j]: found = False break if", "those scripts, which are Bengali, Devanagari, Grantha, and Kannada. #", "This vector is a list of lists of scripts for", "are:\") print(\"script (8 bits), character type (8 bits), grapheme break", "min_size: min_size = size min_stage1, min_stage2, min_stage3 = stage1, stage2,", "'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb', #New for", "ucd_stage1 a \"virtual\" block number. # # The ucd_stage2 table", "break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The grapheme breaking", "generated by the maint/MultiStage2.py script.\") print(\"Do not modify it by", "=> %5d bytes */\" % (block_size, size) if size <", "same record. One or two # are different because they", "One or two # are different because they are part", "Kannada. # # <NAME>, 03 July 2008 # Last Updated:", "in tests: size, struct = get_record_size_struct(test[0]) assert(size == test[1]) #print", "same set. The first element in the vector # contains", "Unicode property support. The new code speeds up property #", "the UCD directory. The emoji-data.txt file is # in files", "if block_size > ELEMS_PER_LINE: fmt = fmt * int(block_size /", "(6, 6), (6, 123456), (1, 690)], 8 ), \\ (", "print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE !=", "to 12 bytes (multiple of 4). Set a value #", "if other_case[o] != -other_case[c]: t = o + other_case[o] #", "unicode_version = version elif unicode_version != version: print(\"WARNING: Unicode version", "have been added to the original script. # # The", "latin characters resolve to the same record. One or two", "# New for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha',", "*/\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\")", "== 8: print(\"\\n \", end='') count = 0 print(\" 0x%05x,\"", "chardata[0]) char = int(m.group(1), 16) if m.group(3) is None: last", "see if any of the three characters are already #", "% d, end='') count += 1 if d == 0:", "stage2_block) size += get_tables_size(stage1, stage2, stage3) # print \"/* %5d", "HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /*", "# Print a table def print_table(table, table_name, block_size = None):", "'Saur', 'Sund', 'Vaii', #New for Unicode 5.2 'Avst', 'Bamu', 'Egyp',", "= read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) #", "'Mand', #New for Unicode 6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd',", "record_size, record_struct = get_record_size_struct(list(records.keys())) # Find the optimum block size", "if count == 8: print(\"\\n \", end='') count = 0", "12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho' ] category_names = ['Cc', 'Cf',", "referenced otherwise, so\") print(\"it should not matter whether it is", "Processed with 2to3, but that didn't fix everything # .", "its record. This is the # negated offset to the", "len(table) return total_size # Compress the table into the two", "it and fill in the default from Scripts. Code added", "Unicode 12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho' ] category_names = ['Cc',", "caseless_offsets, other_case, scriptx, padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys())) # Find", "dummy filler */\") print(\" }};\") print(\"#endif\") print() print(record_struct) # ---", "This table lists the code points for the '9' characters", "*/\" % len(digitsets), end='') count = 8 for d in", "its own block, and the result is the index #", "'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl',", "script. */\") print(\"/* Total size: %d bytes, block size: %d.", "and the result of the division is a float #", "a line of CaseFolding.txt def get_other_case(chardata): if chardata[1] == 'C'", "6), (690, 1)], 4 ), \\ ( [(3, 300), (6,", "to this script: # Added #! line at start #", "subdirectory. # # DerivedGeneralCategory.txt is found in the \"extracted\" subdirectory", "prefixing _pcre_. # Commented out stuff relating to the casefolding", "# 13-May-2014: Updated for PCRE2 # 03-June-2014: Updated for Python", "*/\") # This code was part of the original contribution,", "size min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3 min_stage2_block, min_stage3_block", "{} index = [] for t in zip(*tables): i =", "into the file # 19-June-2015: Updated for Unicode 8.0.0 #", "* MAX_UNICODE offset = 1; for s in sets: for", "'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\" % min_block_size) print(\"#error Please", "+ \\ 'types in this structure definition from pcre2_internal.h (the", "# Adjusted data file names to take from the Unicode.tables", "we find the list 3, 15, 107, 29, # and", "type_size = [(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed char\",", "= sorted(s) for x in s: print(' 0x%04x,' % x,", "17 is { 34, 5, 12, 0, -32, 34, 0", "'/* %d bytes, record size %d */' % (len(records) *", "same set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\") print(\" %d,", "# sets only one value, so first we go through", "range(0, len(table), block_size): block = table[i:i+block_size] start = blocks.get(block) if", "create a new one. if not appended: sets.append([c, o, t])", "to make the whole thing a multiple of 4 bytes.", "definition from pcre2_internal.h (the actual\\n' + \\ 'field names will", "10 of which are currently used. # # 01-March-2010: Updated", "any of the three characters are already # part of", "of 10 characters\" % (first, last), file=sys.stderr) while first <", "by # observing that many characters have the same record,", "255), (0, 65535), (0, 4294967295), (-128, 127), (-32768, 32767), (-2147483648,", "block_size,) + table[i:i+block_size])) print(\"};\\n\") # Extract the unique combinations of", "are inserted into the main table. # The CaseFolding.txt file", "leave\") print(\"a totally empty module because some compilers barf at", "Added RegionalIndicator break property from Unicode 6.2.0 # 13-May-2014: Updated", "module is #included\") print(\"by the pcre2test program, which redefines the", "= ucp_gbExtend => Grapheme break property \"Extend\" # 0 =>", "file.readline()) version = f.group(1) if unicode_version == \"\": unicode_version =", "print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") print()", "But don't leave\") print(\"a totally empty module because some compilers", "32-bit library is run in non-32-bit mode, character values\") print(\"greater", "has increased # their size from 8 to 12 bytes,", "= [0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default)", "offsets in a list of lists of # multiple scripts.", "'Sidd', 'Tirh', 'Wara', #New for Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr',", "print(record_struct) # --- Added by PH: output the table of", "case # 27 = ucp_Hiragana => No special Script Extension", "of code that contains no branches, which makes for greater", "Code added by PH # in October 2018. Positive values", "[2 ** i for i in range(5,10)]: size = len(records)", "0 for y in [c, o, t]: for x in", "database (UCD) on the Unicode web site; GraphemeBreakProperty.txt is #", "ELEMS_PER_LINE: el = ELEMS_PER_LINE else: el = block_size fmt =", "in block 96 (0x60) # lookup 96 in stage1 table", "for Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi',", "7 = ucp_Lo => Other letter # 12 = ucp_gbOther", "Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw', #New for", "-slice_size size += slice_size structure += '%s property_%d;\\n' % (slice_type,", "all # match each other caselessly. Later in this script", "'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', # New for Unicode", "print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\")", "print() print(\"/* If the 32-bit library is run in non-32-bit", "break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)", "Unicode 11.0.0 # 07-July-2018: Added code to scan emoji-data.txt for", "= 1 # Add new characters to an existing set", "# # Conceptually, there is a table of records (of", "file there are lines # to be ignored (returning the", "yields 0 # lookup 97 (0x61) in the first table", "'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd',", "fix everything # . Changed string.strip to str.strip # .", "fill in the default from Scripts. Code added by PH", "appropriate offsets for the characters. caseless_offsets = [0] * MAX_UNICODE", "SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[]", "characters. Each list is terminated # by zero (ucp_Unknown). A", "the field 16 bits. padding_dummy = [0] * MAX_UNICODE padding_dummy[0]", "1; for s in sets: for x in s: caseless_offsets[x]", "} # 27 = ucp_Hiragana => Hiragana script # 7", "print(\"As well as being part of the PCRE2 library, this", "+= get_tables_size(stage1, stage2, stage3) # print \"/* %5d / %3d", "is a table of \"virtual\" blocks; each block is indexed", "bytes, block size: %d. */\" % (min_size, min_block_size)) print() print(\"/*", "# 02-July-2017: Updated for Unicode 10.0.0 # 03-July-2018: Updated for", "'Deva', 'Ethi', 'Geor', 'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang',", "= get_record_size_struct(test[0]) assert(size == test[1]) #print struct def print_records(records, record_size):", "each other caselessly. Later in this script a table of", "Extensions properties of certain characters. Each list is terminated #", "size) if size < min_size: min_size = size min_stage1, min_stage2,", "table are added to the main output records. This new", "be efficiently compressed by # observing that many characters have", "slice_size - 1) & -slice_size size += slice_size structure +=", "just a single script for a # code point. Negative", "slice_type, slice_size = get_type_size(record_slice) # add padding: round up to", "characters are already # part of a set. If so,", "of a caseless set # 0 => No other case", "Added code to scan the emoji-data.txt file to find the", "Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', #", "= False break if found: return -i # Not found", "% min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif", "the Unicode web site; GraphemeBreakProperty.txt is # in the \"auxiliary\"", "# code scans CaseFolding.txt instead of UnicodeData.txt, which is no", "commented out as it # was never used. A two-stage", "d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers) for i", "need the table of digit\") print(\"sets, nor the the large", "Added code to search for sets of more than two", "= ucp_gbOther => Grapheme break property \"Other\" # 0 =>", "( [(3, 300), (6, 6), (6, 340), (1, 690)], 4", "for line in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m", "\"virtual\" blocks; each block is indexed by # the offset", "for i in range(5,10)]: size = len(records) * record_size stage1,", "line.split(';'))) if len(chardata) <= 1: continue if chardata[1] != \"Extended_Pictographic\":", "*/\" % (block_size, size) if size < min_size: min_size =", "== default_value: table[i] = value file.close() return table # Get", "of scripts for Unicode 6.0.0 # July-2012: Updated list of", "This file was autogenerated by the MultiStage2.py script. */\") print(\"/*", "+ 1 # End of block of code for creating", "the use of PCRE maintainers, to # generate the pcre2_ucd.c", "'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', # New for", "in digitsets: if count == 8: print(\"\\n \", end='') count", "the addition of the new Script Extensions field, we need", "'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic',", "instance of every unique record that is # required. The", "same set of records as # other blocks. This leads", "f.group(1) version_pat = r\"^# \" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file", "new field for Script Extensions # 27-July-2019: Updated for Unicode", "records = combine_tables(script, category, break_props, caseless_offsets, other_case, scriptx, padding_dummy) record_size,", "characters. caseless_offsets = [0] * MAX_UNICODE offset = 1; for", "of the Unicode # data tables. A number of extensions", "line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum): return lambda", "# greater than 255 to make the field 16 bits.", "=> Latin script # 5 = ucp_Ll => Lower case", "'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh',", "Add #ifndef SUPPORT_UCP to use dummy tables when no UCP", "--- Added by PH: output the table of caseless character", "grapheme break property, because the default for # all the", "property values table = tuple(table) for i in range(0, len(table),", "to put Unicode version into the file # 19-June-2015: Updated", "bytes, only 10 of which are currently used. # #", "'Unknown' script name # 03-October-2018: Added new field for Script", "5.2.0 # 30-April-2011: Updated list of scripts for Unicode 6.0.0", "saving - maybe the guy linked\") print(\"all the modules rather", "'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', # New for Unicode 5.1", "is #included\") print(\"by the pcre2test program, which redefines the PRIV", "records. This new # code scans CaseFolding.txt instead of UnicodeData.txt,", "12-August-2014: Updated to put Unicode version into the file #", "'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala',", "block numbers (indices into stage 2 table) stage2 = []", "\\ 'types in this structure definition from pcre2_internal.h (the actual\\n'", "=> Dummy value, unused at present # # Example: vedic", "the new set. appended = 0 for s in sets:", "'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara', #New for Unicode 8.0.0 'Ahom',", "not. However\") print(\"a comment was received about space saving -", "'Medf', 'Sogo', 'Sogd', #New for Unicode 12.0.0 'Elym', 'Nand', 'Hmnp',", "'', line) chardata = list(map(str.strip, line.split(';'))) if len(chardata) <= 1:", "containing a # script number, script extension value, character type,", "too big. It can be efficiently compressed by # observing", "the file # 19-June-2015: Updated for Unicode 8.0.0 # 02-July-2017:", "[0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for", "# written out. However, we have to do this work", "For these we set up a\") print(\"special record. */\") print()", "is in block 57 (0x39) # lookup 57 in stage1", "UCD directory. The emoji-data.txt file is # in files associated", "# Added code to add a Script Extensions field to", "break property \"Extend\" # 0 => Not part of a", "Unicode character properties using short # sequences of code that", "\\ ( [(3, 100000), (6, 6), (6, 123456), (1, 690)],", "'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd', #New for Unicode 12.0.0", "of the relevant list in the ucd_script_sets # vector. #", "file, but we list it here so that the name", "record_slice = [record[i] for record in records] slice_type, slice_size =", "for Unicode 5.1 'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck',", "letter # 12 = ucp_gbOther => Grapheme break property \"Other\"", "in order, and is terminated by NOTACHAR (0xffffffff), which is", "sublist is zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] = {\") count", "table contains # lists of characters that all match each", "optimum block size for 3-stage table min_size = sys.maxint for", "return script_abbrevs.index(this_script_list[0]) script_numbers = [] for d in this_script_list: script_numbers.append(script_abbrevs.index(d))", "print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\")", "len(chardata) <= 1: continue value = get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$',", "m.group(3) is None: last = char else: last = int(m.group(3),", "(len(records) * record_size, record_size)) records = list(zip(list(records.keys()), list(records.values()))) records.sort(key =", "if found: return -i # Not found in existing lists", "character's block number, # which is the character's code point", "else: last = int(m.group(3), 16) for i in range(char, last", "from the same set. The first element in the vector", "support is needed. # Update for PCRE2: name changes, and", "= f.group(1) version_pat = r\"^# \" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\"", "use of PCRE maintainers, to # generate the pcre2_ucd.c file", "we need to find the Extended_Pictographic property for emoji characters.", "be a Script Extension # value), then scan it and", "'Zanabazar_Square', # New for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar',", "(UCD) on the Unicode web site; GraphemeBreakProperty.txt is # in", "<= minval and maxval <= maxlimit: return type_size[num] else: raise", "Updated for Unicode 7.0.0 # 12-August-2014: Updated to put Unicode", "value. break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark', 'L',", "look up Unicode character properties using short # sequences of", "done when updating to Unicode 11.0.0 (July 2018). # #", "type for the values def get_type_size(table): type_size = [(\"uint8_t\", 1),", "# # <NAME>, 03 July 2008 # Last Updated: 07", "for the Extended # Pictographic property. # 01-October-2018: Added the", "/* script extension */\") print(\" 0, /* dummy filler */\")", "\"other\". We scan the emoji-data.txt file and modify the #", "large main UCD tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() #", "pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\") print() print(\"#endif /* PCRE2_PCRE2TEST", "+= '%s property_%d;\\n' % (slice_type, i) # round up to", "'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me',", "for s in sets: for x in s: caseless_offsets[x] =", "lookup values. # # Example: lowercase \"a\" (U+0061) is in", "print(\"#if UCD_BLOCK_SIZE != %d\" % min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE", "2018 ############################################################################## import re import string import sys MAX_UNICODE =", "\"return\" # offsets for those that are not already set.", "PCRE2_PCRE2TEST */\") print() print(\"/* Unicode character database. */\") print(\"/* This", "support. The new code speeds up property # matching many", "+= \", block = %d\" % block_size print(s + \"", "print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include", "of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum): return lambda chardata:", "(6, 340), (1, 690)], 4 ), \\ ( [(300, 300),", "code scans CaseFolding.txt instead of UnicodeData.txt, which is no longer", "# Update for Python3: # . Processed with 2to3, but", "print('};\\n') script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese',", "= ucp_Latin => No special Script Extension property # 0", "Unicode version differs in %s\", file_name, file=sys.stderr) table = [default_value]", "digitsets.sort() print(\"/* This table lists the code points for the", "set if found: found = 0 for y in [c,", "first + 1) % 10) != 0: print(\"ERROR: %04x..%04x does", "(6, 6), (340, 6), (690, 1)], 4 ), \\ (", "script_numbers_length): found = True if script_lists[i+j] != script_numbers[j]: found =", "if ((last - first + 1) % 10) != 0:", "to get the Unicode records up to 12 bytes (multiple", "end='') print(\"\\n};\\n\") # Output the main UCD tables. print(\"/* These", "%5d => %5d bytes */\" % (block_size, size) if size", "Extended_Pictographic property for emoji characters. This # can be set", "# in October 2018. Positive values are used for just", "of scripts for Unicode 5.2.0 # 30-April-2011: Updated list of", "uses the # final hole in the structure. # 30-September-2012:", "sets is # written out. However, we have to do", "d in script_lists: print(\" %3d,\" % d, end='') count +=", "for the values def get_type_size(table): type_size = [(\"uint8_t\", 1), (\"uint16_t\",", "<= maxlimit: return type_size[num] else: raise OverflowError(\"Too large to fit", "range(2,6)]: stage_i, stage3 = compress_table(table, stage3_block) for stage2_block in [2", "1): if break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has break", "chardata: enum.index(chardata[1]) # Parse a line of CaseFolding.txt def get_other_case(chardata):", "stuff relating to the casefolding table, which isn't used; #", "first we go through the table and set \"return\" #", "OverflowError(\"Too large to fit into C types\") def get_tables_size(*tables): total_size", "'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati',", "'Wara', #New for Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung',", "def get_other_case(chardata): if chardata[1] == 'C' or chardata[1] == 'S':", "[ \\ ( [(3,), (6,), (6,), (1,)], 1 ), \\", "characters in each\") print(\"set of decimal digits. It is used", "different because they are part of a multi-character caseless set", "is empty; this is used for characters # that are", "(34, 6), (68, 1)], 2 ), \\ ( [(300, 3),", "set). # # Example: hiragana letter A (U+3042) is in", "PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t", "_pcre_. # Commented out stuff relating to the casefolding table,", "file to find the Extended Pictographic # property, which is", "the same record. One or two # are different because", "min_size = sys.maxsize for block_size in [2 ** i for", "# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.", "guy linked\") print(\"all the modules rather than using a library", "code to search for sets of more than two characters", "code tidy # Adjusted data file names to take from", "min_size: min_size = size min_stage1, min_stage2 = stage1, stage2 min_block_size", "print() print(record_struct) # --- Added by PH: output the table", "'Sogdian', # New for Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho'", "(multiple of 4). Set a value # greater than 255", "'Sind', 'Lina', 'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm',", "#New for Unicode 6.0.0 'Batk', 'Brah', 'Mand', #New for Unicode", "total_size = 0 for table in tables: type, size =", "t]: for x in s: if x == y: found", "16 bits. padding_dummy = [0] * MAX_UNICODE padding_dummy[0] = 256", "# 0 => No other case # 27 = ucp_Hiragana", "list of scripts for Unicode 6.1.0 # 20-August-2012: Added scan", "This leads to a 2-stage lookup process. # # This", "optimum block size for the two-stage table min_size = sys.maxsize", "= {0};\") print(\"#else\") print() print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print()", "/* %3d */') % (record[0] + (i,))) print('};\\n') script_names =", "block_size stage2 += block blocks[block] = start stage1.append(start) return stage1,", "command # # [python3] ./MultiStage2.py >../src/pcre2_ucd.c # # It requires", "to the open() call # . Inserted 'int' before blocksize/ELEMS_PER_LINE", "0 structure = '/* When recompiling tables with a new", "The main tables generated by this script are used by", "print() print(\"#endif /* PCRE2_PCRE2TEST */\") print() print(\"/* Unicode character database.", "# Multistage table builder # (c) <NAME>, 2008 ############################################################################## #", "contains the number of subsequent elements, which are in ascending", "is { 34, 5, 12, 0, -32, 34, 0 }", "# Find the optimum block size for the two-stage table", "special Script Extension property # 0 => Dummy value, unused", "change the actual lookup values. # # Example: lowercase \"a\"", "emoji-data.txt for the Extended # Pictographic property. # 01-October-2018: Added", "ucp_Hiragana => Hiragana script # 7 = ucp_Lo => Other", "for Unicode 11.0.0 # 07-July-2018: Added code to scan emoji-data.txt", "in tables: type, size = get_type_size(table) total_size += size *", "[default_value] * MAX_UNICODE for line in file: line = re.sub(r'#.*',", "len(s) + 1 # End of block of code for", "Number of subsequent values */\" % len(digitsets), end='') count =", "the sets of 10 digits. --- digitsets = [] file", "record_struct = get_record_size_struct(list(records.keys())) # Find the optimum block size for", "ucd_record PRIV(ucd_records)[] = { ' + \\ '/* %d bytes,", "+ slice_size - 1) & -slice_size structure += '} ucd_record;\\n*/\\n'", "category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case,", "file, setting 'Unknown' as the default (this will never be", "= {{\") print(\" ucp_Unknown, /* script */\") print(\" ucp_Cn, /*", "[python3] ./MultiStage2.py >../src/pcre2_ucd.c # # It requires six Unicode data", "def make_get_names(enum): return lambda chardata: enum.index(chardata[1]) # Parse a line", "of lists of # multiple scripts. Initialize this list with", "'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii',", "Script Extensions # 27-July-2019: Updated for Unicode 12.1.0 # ----------------------------------------------------------------------------", "print(\"Instead, just supply some small dummy tables. */\") print() print(\"#ifndef", "is used for characters # that are not part of", "two-stage UCD tables. The fields in each record are:\") print(\"script", "Parse a line of ScriptExtensions.txt def get_script_extension(chardata): this_script_list = list(chardata[1].split('", "= char else: last = int(m.group(3), 16) for i in", "line in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is", "'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu',", "in the default from Scripts. Code added by PH #", "now been upgraded to Python 3 for PCRE2, and should", "code that contains no branches, which makes for greater speed.", "caseless sets. # Now scan the sets and set appropriate", "4294967295), (-128, 127), (-32768, 32767), (-2147483648, 2147483647)] minval = min(table)", "'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', #", "# New for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin',", "chardata[1] == 'S': return int(chardata[2], 16) - int(chardata[0], 16) return", "values are negated offsets in a list of lists of", "of the three characters are already # part of a", "UTF support.\") print(\"This module should not be referenced otherwise, so\")", "55 in stage2 yields 458 # record 458 is {", "'int' before blocksize/ELEMS_PER_LINE because an int is # required and", "57 in stage1 table yields 55 # lookup 80 (0x50)", "two characters that must all # match each other caselessly.", "tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0", "'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\" % min_block_size)", "uint32_t PRIV(ucd_digit_sets)[] = {\") print(\" %d, /* Number of subsequent", "out as it # was never used. A two-stage table", "the optimum block size for 3-stage table min_size = sys.maxint", "lists of script numbers that are the # Script Extensions", "the actual lookup values. # # Example: lowercase \"a\" (U+0061)", "len(digitsets), end='') count = 8 for d in digitsets: if", "had a hole in it, so the resulting table is", "'Hung', 'Sgnw', #New for Unicode 10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa',", "cut out the tables when not needed. But don't leave\")", "6.0.0 'Batk', 'Brah', 'Mand', #New for Unicode 6.1.0 'Cakm', 'Merc',", "(8 bits), offset to other case\") print(\"or zero (32 bits,", "tables. The ucd_caseless_sets table contains # lists of characters that", "+= block blocks[block] = start stage1.append(start) return stage1, stage2 #", "addition of the new Script Extensions field, we need some", "'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ]", "print(\" %3d,\" % d, end='') count += 1 if d", "one. if not appended: sets.append([c, o, t]) # End of", "./MultiStage2.py >../src/pcre2_ucd.c # # It requires six Unicode data tables:", "'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii', #New for Unicode 5.0", "other case */\") print(\" ucp_Unknown, /* script extension */\") print(\"", "of more than two characters that must match # each", "'S': return int(chardata[2], 16) - int(chardata[0], 16) return 0 #", "'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', # New for Unicode 5.1 'Carian',", "logic for reading data # sets only one value, so", "in range(MAX_UNICODE): o = c + other_case[c] # Trigger when", "value in its record. This is the # negated offset", "[(300,), (600,), (600,), (100,)], 2 ), \\ ( [(25, 3),", "'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii', #New for Unicode", "Now scan again and create equivalence sets. sets = []", "Not found in existing lists return_value = len(script_lists) script_lists.extend(script_numbers) return", "a new Unicode version, please check the\\n' + \\ 'types", "Stage 2 table contains the blocks with property values table", "# file, setting 'Unknown' as the default (this will never", "lists of scripts for the Script Extension\") print(\"property. Each sublist", "+= 1 if d == 0: print(\"\\n /* %3d */\"", "last = int(m.group(3), 16) for i in range(char, last +", "# 27 = ucp_Hiragana => No special Script Extension property", "# New for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada',", "'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham',", "actual lookup values. # # Example: lowercase \"a\" (U+0061) is", "read Scripts.txt again for the sets of 10 digits. ---", "characters in a block) have the same set of records", "block. The result of a lookup in ucd_stage1 a \"virtual\"", "UCD tables. The fields in each record are:\") print(\"script (8", "/* Number of subsequent values */\" % len(digitsets), end='') count", "'%6d, ' * len(record[0]) + '}, /* %3d */') %", "'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani', 'Mend', 'Modi',", "# that are not part of any list. # #", "and run it\") print(\"to regenerate this code.\") print() print(\"As well", "case-equivalent. if other_case[o] != -other_case[c]: t = o + other_case[o]", "one character. Each list is terminated by NOTACHAR. */\\n\") print(\"const", "# 03-July-2018: Updated for Unicode 11.0.0 # 07-July-2018: Added code", "Set a value # greater than 255 to make the", "in range(0, MAX_UNICODE): if scriptx[i] == script_abbrevs_default: scriptx[i] = script[i]", "1 if not found: s.append(y) appended = 1 # If", "def get_record_size_struct(records): size = 0 structure = '/* When recompiling", "#! line at start # Removed tabs # Made it", "previously set # value because in the CaseFolding file there", "= re.sub(r'#.*', '', line) chardata = list(map(str.strip, line.split(';'))) if len(chardata)", "at that.\") print(\"Instead, just supply some small dummy tables. */\")", "# now have three characters that are case-equivalent. if other_case[o]", "table contains one instance of every unique record that is", "the sets and set appropriate offsets for the characters. caseless_offsets", "%5d bytes */\" % (block_size, size) if size < min_size:", "records as # other blocks. This leads to a 2-stage", "*/\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] =", "0: print(\"ERROR: %04x..%04x does not contain a multiple of 10", "one value, so first we go through the table and", "big. It can be efficiently compressed by # observing that", "'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', # New for Unicode", "%d\" % block_size print(s + \" */\") table = tuple(table)", "script a table of these sets is # written out.", "structure in array record_slice = [record[0] for record in records]", "= [default_value] * MAX_UNICODE for line in file: line =", "minlimit <= minval and maxval <= maxlimit: return type_size[num] else:", "updating to Unicode 11.0.0 (July 2018). # # Added code", "used to ensure that all the digits in\") print(\"a script", "0 } # 28 = ucp_Inherited => Script inherited from", "5.1 'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur',", "the # maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt is found in", "range(char, last + 1): # It is important not to", "such a set). # # Example: hiragana letter A (U+3042)", "# http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- # Minor modifications made to", "{ 27, 7, 12, 0, 0, 27, 0 } #", "we set up a\") print(\"special record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH", "script extension */\") print(\" 0, /* dummy filler */\") print(\"", "'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp',", "up to 12 bytes (multiple of 4). Set a value", "NOTACHAR,\") for s in sets: s = sorted(s) for x", "False break if found: return -i # Not found in", "97 (0x61) in the first table in stage2 yields 17", "some padding # to get the Unicode records up to", "= size min_stage1, min_stage2 = stage1, stage2 min_block_size = block_size", "# (c) <NAME>, 2008 ############################################################################## # This script was submitted", "please check the\\n' + \\ 'types in this structure definition", "[(300, 300), (6, 6), (6, 340), (1, 690)], 4 ),", "UnicodeData.txt, which is no longer # used. # # Update", "stage1 = [] # Stage 1 table contains block numbers", "must match # each other caselessly. A new table is", "set (for # example, k, K and the Kelvin symbol", "of # multiple scripts. Initialize this list with a single", "min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\" % min_block_size) print(\"#error Please correct", "ucd_caseless_sets table contains # lists of characters that all match", "el = ELEMS_PER_LINE else: el = block_size fmt = \"%3d,\"", ". Processed with 2to3, but that didn't fix everything #", "Extensions # 27-July-2019: Updated for Unicode 12.1.0 # ---------------------------------------------------------------------------- #", "No other case # 27 = ucp_Hiragana => No special", "if chardata[1] != \"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char", "size # of each block. The result of a lookup", "stage2_block, stage3_block print \"/* Total size: %d bytes\" % min_size", "break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close() # The Script Extensions", "= [0] * MAX_UNICODE offset = 1; for s in", "690)], 8 ), \\ ( [(100000, 300), (6, 6), (123456,", "0x10ffff may be encountered. For these we set up a\")", "non-32-bit mode, character values\") print(\"greater than 0x10ffff may be encountered.", "len(script_numbers) for i in range(1, len(script_lists) - script_numbers_length + 1):", "property # matching many times. The script is for the", "is # in the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and #", "file=sys.stderr) table = [default_value] * MAX_UNICODE for line in file:", "4 ), \\ ( [(3, 300), (6, 6), (6, 340),", "Unicode Technical Standard #51 (\"Unicode Emoji\"), # for example: #", "are used for just a single script for a #", "# If we have not added to an existing set,", "script_numbers_length = len(script_numbers) for i in range(1, len(script_lists) - script_numbers_length", "= [] file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line in", "character type (8 bits), grapheme break property (8 bits),\") print(\"offset", "# print \"/* %5d / %3d => %5d bytes */\"", "'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc',", "subsequent elements, which are in ascending order. # # The", "set with the new set. appended = 0 for s", "for the two-stage table min_size = sys.maxsize for block_size in", "10.0.0 # 03-July-2018: Updated for Unicode 11.0.0 # 07-July-2018: Added", "to multichar other cases or zero (8 bits), offset to", "caseless set # 0 => No other case # -101", "return stage1, stage2 # Print a table def print_table(table, table_name,", "Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic', # New for Unicode 6.1.0", "correct for the Unicode 11.0.0 database. Future # updates may", "'Hatr', 'Mult', 'Hung', 'Sgnw', #New for Unicode 10.0.0 'Adlm', 'Bhks',", "(16 bits, signed), and a dummy\") print(\"16-bit field to make", "for Unicode 12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho' ] category_names =", "20-June-2014: Updated for Unicode 7.0.0 # 12-August-2014: Updated to put", "or zero (8 bits), offset to other case\") print(\"or zero", "version = f.group(1) if unicode_version == \"\": unicode_version = version", "return index, records def get_record_size_struct(records): size = 0 structure =", "(-0x20) => Other case is U+0041 # 34 = ucp_Latin", "for line in file: line = re.sub(r'#.*', '', line) chardata", "i in range(len(records[0])): record_slice = [record[i] for record in records]", "other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The grapheme breaking rules", "# any valid character. The first list is empty; this", "abolished. # # Major modifications made to this script: #", "more than two characters that must all # match each", "record 458 is { 28, 12, 3, 0, 0, -101,", "for PCRE2 # 03-June-2014: Updated for Python 3 # 20-June-2014:", "690)], 4 ), \\ ( [(300, 300), (6, 6), (6,", "# negated offset to the start of the relevant list", "New for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng',", "or not. However\") print(\"a comment was received about space saving", "None: # Allocate a new block start = len(stage2) /", "script name # 03-October-2018: Added new field for Script Extensions", "for greater speed. # # Conceptually, there is a table", "observing that many characters have the same record, and many", "is commented out as it # was never used. A", "with Unicode Technical Standard #51 (\"Unicode Emoji\"), # for example:", "break property %s, not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] =", "bigger than before. # 18-September-2012: Added code for multiple caseless", "*/\") print(\" }};\") print(\"#endif\") print() print(record_struct) # --- Added by", "to do this work here in order to compute the", "valid character. The first list is empty; this is used", "== 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown, /*", "'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', #", "size * len(table) return total_size # Compress the table into", "file = open(file_name, 'r', encoding='utf-8') f = re.match(version_pat, file.readline()) version", "96 (0x60) # lookup 96 in stage1 table yields 90", "already set # data. if table[i] == default_value: table[i] =", "'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', # New for Unicode 8.0.0", "to an existing set, create a new one. if not", "o + other_case[o] # Scan the existing sets to see", "first item of the next structure in array record_slice =", "offsets into the table are added to the main output", "ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i * mult),))) else: if", "efficiently compressed by # observing that many characters have the", "Example: hiragana letter A (U+3042) is in block 96 (0x60)", "Grapheme break property \"Extend\" # 0 => Not part of", "%d */\\n\" + fmt) % ((i / block_size,) + table[i:i+block_size]))", "is actually\") print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\")", "part of the original contribution, but is commented out as", "'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani', 'Mend',", "Script value. Parse the # file, setting 'Unknown' as the", "PCRE maintainers, to # generate the pcre2_ucd.c file that contains", "to be used with # any of those scripts, which", "are the # Script Extensions properties of certain characters. Each", "*PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the 32-bit library is", "the CaseFolding file there are lines # to be ignored", "'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar',", "\" */\") table = tuple(table) if block_size is None: fmt", "list of lists of # multiple scripts. Initialize this list", "in s: caseless_offsets[x] = offset offset += len(s) + 1", "- maybe the guy linked\") print(\"all the modules rather than", "the character's code point divided by 128, since 128 is", "1: continue value = get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char", "maxval = max(table) for num, (minlimit, maxlimit) in enumerate(limits): if", "relating to the casefolding table, which isn't used; # removed", "come after a line which has already set # data.", "thing a multiple of 4 bytes. */\\n\") print_records(records, record_size) print_table(min_stage1,", "min_size = sys.maxint for stage3_block in [2 ** i for", "# Extract the unique combinations of properties into records def", "(slice_type, i) # round up to the first item of", "of 10 digits. --- digitsets = [] file = open('Unicode.tables/Scripts.txt',", "ucp_Inherited => Script inherited from predecessor # 12 = ucp_Mn", "are different because they are part of a multi-character caseless", "char = int(m.group(1), 16) if m.group(3) is None: last =", "barf at that.\") print(\"Instead, just supply some small dummy tables.", "all match each other caselessly. Each list is # in", "in %s\", file_name, file=sys.stderr) table = [default_value] * MAX_UNICODE for", "print(\"by the pcre2test program, which redefines the PRIV macro to", "4 ), \\ ( [(300, 300), (6, 6), (6, 340),", "'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese',", "script.\") print(\"Do not modify it by hand. Instead modify the", "= \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the 32-bit library is run", "# It requires six Unicode data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt,", "5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu',", "tables. print(\"/* These are the main two-stage UCD tables. The", "min_size = size min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3", "# # Example: vedic tone karshana (U+1CD0) is in block", "offset of a character within its own block, and the", "for stage2_block in [2 ** i for i in range(5,10)]:", "Unicode # data tables. A number of extensions have been", "and Kannada. # # <NAME>, 03 July 2008 # Last", "file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close() # The Script Extensions property", "bits, signed), script extension (16 bits, signed), and a dummy\")", "list is # in order, and is terminated by NOTACHAR", "count += 1 if d == 0: print(\"\\n /* %3d", "# July-2012: Updated list of scripts for Unicode 6.1.0 #", "127), (-32768, 32767), (-2147483648, 2147483647)] minval = min(table) maxval =", "record size %d */' % (len(records) * record_size, record_size)) records", "and the Kelvin symbol are such a set). # #", "min_block_size = block_size print(\"/* This module is generated by the", "character values\") print(\"greater than 0x10ffff may be encountered. For these", "set of records as # other blocks. This leads to", "have not added to an existing set, create a new", "two characters that must match # each other caselessly. A", "ucd_script_sets vector we find the list 3, 15, 107, 29,", "'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham',", "part of a set. If so, unite the existing set", "table that are inserted into the main table. # The", "six Unicode data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, #", "Conceptually, there is a table of records (of type ucd_record),", "(6, 340), (1, 690)], 4 ), \\ ( [(3, 100000),", "PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown, /* script */\") print(\" ucp_Cn,", "- first + 1) % 10) != 0: print(\"ERROR: %04x..%04x", "part of # the upgrading of Unicode property support. The", "for caseless matching sets. # Combine the tables table, records", "stage3_block print \"/* Total size: %d bytes\" % min_size */", "comment was received about space saving - maybe the guy", "12, 0, -32, 34, 0 } # 34 = ucp_Latin", "two-stage table has sufficed. \"\"\" # Three-stage tables: # Find", "main two-stage UCD tables. The fields in each record are:\")", "# Added code to scan the emoji-data.txt file to find", "value # greater than 255 to make the field 16", "At offset 101 in the ucd_script_sets vector we find the", "(\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits = [(0, 255), (0, 65535),", "if x == y: found = 1 if not found:", "min_block_size)) print() print(\"/* The tables herein are needed only when", "from the same set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\")", "print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\") print() print(\"#endif /* PCRE2_PCRE2TEST */\")", "# Script Extensions properties of certain characters. Each list is", "chardata = list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue if", "used. script_lists = [0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt',", "PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print()", "print(\"/* This module is generated by the maint/MultiStage2.py script.\") print(\"Do", "not matter whether it is compiled or not. However\") print(\"a", "'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro',", "value, unused at present # # Almost all lowercase latin", "blocks; each block is indexed by # the offset of", "all lowercase latin characters resolve to the same record. One", "yields 458 # record 458 is { 28, 12, 3,", "value of 0) # which often come after a line", "six tables. The ucd_caseless_sets table contains # lists of characters", "now have three characters that are case-equivalent. if other_case[o] !=", "print(\"condition to cut out the tables when not needed. But", "stage2_block in [2 ** i for i in range(5,10)]: size", "two # are different because they are part of a", "['CR', 'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark', 'L', 'V', 'T', 'LV',", "# data tables. A number of extensions have been added", ". Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is #", "# Example: vedic tone karshana (U+1CD0) is in block 57", "690)], 8 ), \\ ] for test in tests: size,", "read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The", "mode, character values\") print(\"greater than 0x10ffff may be encountered. For", "# 12 = ucp_Mn => Non-spacing mark # 3 =", "get_value, default_value): global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base =", "num, (minlimit, maxlimit) in enumerate(limits): if minlimit <= minval and", "version into the file # 19-June-2015: Updated for Unicode 8.0.0", "Unicode 6.2.0 # 13-May-2014: Updated for PCRE2 # 03-June-2014: Updated", "char else: last = int(m.group(3), 16) for i in range(char,", "block size for 3-stage table min_size = sys.maxint for stage3_block", "*/\") print(\"/* Total size: %d bytes, block size: %d. */\"", "file: line = re.sub(r'#.*', '', line) chardata = list(map(str.strip, line.split(';')))", "new # field in the record to hold the value.", "Unicode version def read_table(file_name, get_value, default_value): global unicode_version f =", "grapheme breaking property. This was # done when updating to", "'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt',", "actual\\n' + \\ 'field names will be different):\\n\\ntypedef struct {\\n'", "minval = min(table) maxval = max(table) for num, (minlimit, maxlimit)", "This module is generated by the maint/MultiStage2.py script.\") print(\"Do not", "in range(MAX_UNICODE): if other_case[c] != 0 and other_case[c + other_case[c]]", "print(\" ucp_gbOther, /* grapheme break property */\") print(\" 0, /*", "make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt',", "Unicode 5.1 'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng',", "i in range(0, MAX_UNICODE): if scriptx[i] == script_abbrevs_default: scriptx[i] =", "\\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST */\")", "%s %s[] = { /* %d bytes\" % (type, table_name,", "# required. The ucd_stage1 table is indexed by a character's", "'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square',", "6), (1, 690)], 8 ), \\ ] for test in", "Three-stage tables: # Find the optimum block size for 3-stage", "ucd_script_sets vector contains lists of script numbers that are the", "re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r', encoding='utf-8') f =", "set. appended = 0 for s in sets: found =", "Updated list of scripts for Unicode 6.0.0 # July-2012: Updated", "upgraded to Python 3 for PCRE2, and should be run", "slice_size = get_type_size(record_slice) size = (size + slice_size - 1)", "Updated to put Unicode version into the file # 19-June-2015:", "types\") def get_tables_size(*tables): total_size = 0 for table in tables:", "table = [default_value] * MAX_UNICODE for line in file: line", "A (U+3042) is in block 96 (0x60) # lookup 96", "found = True if script_lists[i+j] != script_numbers[j]: found = False", "'Sm', 'So', 'Zl', 'Zp', 'Zs' ] # The Extended_Pictographic property", "vector. # # The following examples are correct for the", "{} # Dictionary for finding identical blocks stage1 = []", "ascending order. # # The ucd_script_sets vector contains lists of", "break property \"Other\" # 0 => Not part of a", "20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new #", "appended: sets.append([c, o, t]) # End of loop looking for", "return -return_value # Read the whole table in memory, setting/checking", "set # -32 (-0x20) => Other case is U+0041 #", "the # structure had a hole in it, so the", "'Zp', 'Zs' ] # The Extended_Pictographic property is not found", "65535), (0, 4294967295), (-128, 127), (-32768, 32767), (-2147483648, 2147483647)] minval", "which are in ascending order. # # The ucd_script_sets vector", "size = get_type_size(table) ELEMS_PER_LINE = 16 s = \"const %s", "= list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda x: x[1]) for i,", "'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian',", "# all the emojis is \"other\". We scan the emoji-data.txt", "2to3, but that didn't fix everything # . Changed string.strip", "of a character within its own block, and the result", "divided by 128, since 128 is the size # of", "We # now have three characters that are case-equivalent. if", "size < min_size: min_size = size min_stage1, min_stage2 = stage1,", "in range(0, len(table), block_size): print((\"/* block %d */\\n\" + fmt)", "table to find sets of more than two characters that", "'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk',", "of GraphemeBreakProperty.txt and added a new # field in the", "Updated for Unicode 10.0.0 # 03-July-2018: Updated for Unicode 11.0.0", "*/ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2', min_stage2_block) print_table(min_stage3, 'ucd_stage3', min_stage3_block)", "scans CaseFolding.txt instead of UnicodeData.txt, which is no longer #", "'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii', #New for", "caseless sets. This uses the # final hole in the", "13-May-2014: Updated for PCRE2 # 03-June-2014: Updated for Python 3", "back here. We # now have three characters that are", "vector is a list of lists of scripts for the", "character's code point divided by 128, since 128 is the", "11.0.0 # 07-July-2018: Added code to scan emoji-data.txt for the", "fields in each record are:\") print(\"script (8 bits), character type", "covering all Unicode # characters would be far too big.", "script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid',", "Script Extensions properties of certain characters. Each list is terminated", "scan the emoji-data.txt file and modify the # break-props table.", "do this work here in order to compute the #", "table lists the code points for the '9' characters in", "# Script Extension property has a negative value in its", "is larger than # any valid character. The first list", "0, -32, 34, 0 } # 34 = ucp_Latin =>", "get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16) if", "pairs, but the common logic for reading data # sets", "the # Unicode database (UCD) on the Unicode web site;", "'Ethi', 'Geor', 'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano',", "everything # . Changed string.strip to str.strip # . Added", "value is the Script value. Parse the # file, setting", "of characters that are caseless sets of\") print(\"more than one", "12, 0, 0, 27, 0 } # 27 = ucp_Hiragana", "* len(table)) if block_size: s += \", block = %d\"", "min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2', min_stage2_block) print_table(min_stage3, 'ucd_stage3',", "Added scan of GraphemeBreakProperty.txt and added a new # field", "# Made it work with Python 2.4 by rewriting two", "def test_record_size(): tests = [ \\ ( [(3,), (6,), (6,),", "Instead modify the script and run it\") print(\"to regenerate this", "[] # Stage 2 table contains the blocks with property", "\" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r', encoding='utf-8')", "set, offset to the character's other case, for # every", "= len(script_numbers) for i in range(1, len(script_lists) - script_numbers_length +", "Extension property # 0 => Dummy value, unused at present", "structure += '%s property_%d;\\n' % (slice_type, i) # round up", "0 => Dummy value, unused at present # # At", "the two-stage table min_size = sys.maxsize for block_size in [2", "the same set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\") print(\"", "uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const", "c or x == o or x == t: found", "is run in non-32-bit mode, character values\") print(\"greater than 0x10ffff", "sys.maxsize for block_size in [2 ** i for i in", "much bigger than before. # 18-September-2012: Added code for multiple", "= get_record_size_struct(list(records.keys())) # Find the optimum block size for the", "'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag',", "work with Python 2.4 by rewriting two statements that needed", "in script_lists: print(\" %3d,\" % d, end='') count += 1", "ucp_gbExtend => Grapheme break property \"Extend\" # 0 => Not", "code to scan the emoji-data.txt file to find the Extended", "set # 0 => No other case # 27 =", "get_type_size(table): type_size = [(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed", "+ (int(i * mult),))) else: if block_size > ELEMS_PER_LINE: el", "'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret',", "by PH: output the table of caseless character sets ---", "block_size fmt = \"%3d,\" * el + \"\\n\" if block_size", "8.0.0 # 02-July-2017: Updated for Unicode 10.0.0 # 03-July-2018: Updated", "# 20-June-2014: Updated for Unicode 7.0.0 # 12-August-2014: Updated to", "table min_size = sys.maxint for stage3_block in [2 ** i", "table) stage2 = [] # Stage 2 table contains the", "+ '}, /* %3d */') % (record[0] + (i,))) print('};\\n')", "of ScriptExtensions.txt def get_script_extension(chardata): this_script_list = list(chardata[1].split(' ')) if len(this_script_list)", "# required and the result of the division is a", "be set as an additional grapheme break property, because the", "print(\"/* Unicode character database. */\") print(\"/* This file was autogenerated", "tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt.", "# Unicode database (UCD) on the Unicode web site; GraphemeBreakProperty.txt", "to scan the emoji-data.txt file to find the Extended Pictographic", "'Latn', 'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo',", "it\") print(\"to regenerate this code.\") print() print(\"As well as being", "print(\"\\n};\\n\") # Output the main UCD tables. print(\"/* These are", "== 'S': return int(chardata[2], 16) - int(chardata[0], 16) return 0", "=> Dummy value, unused at present # # At offset", "can be efficiently compressed by # observing that many characters", "if start is None: # Allocate a new block start", "short # sequences of code that contains no branches, which", "matching set, offset to the character's other case, for #", "*/\") print(\" 0, /* case set */\") print(\" 0, /*", "print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\") for s in", "'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic',", "size += get_tables_size(stage1, stage2, stage3) # print \"/* %5d /", "since 128 is the size # of each block. The", "table builder # (c) <NAME>, 2008 ############################################################################## # This script", "branches, which makes for greater speed. # # Conceptually, there", "to Python 3 for PCRE2, and should be run in", "name clashes\") print(\"with the library. At present, just one of", "print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown, /* script */\")", "UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\") print() print(\"#endif", "is the character's code point divided by 128, since 128", "%3d,\" % d, end='') count += 1 if d ==", "the first item of the next structure in array record_slice", "print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\")", "size = 0 structure = '/* When recompiling tables with", "# New for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian',", "'Prepend', 'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ',", "dummy tables when no UCP support is needed. # Update", "6.2.0 # 13-May-2014: Updated for PCRE2 # 03-June-2014: Updated for", "get_tables_size(stage1, stage2) #print \"/* block size %5d => %5d bytes", "U+0041 # 34 = ucp_Latin => No special Script Extension", "MAX_UNICODE): if scriptx[i] == script_abbrevs_default: scriptx[i] = script[i] # With", "table yields 0 # lookup 97 (0x61) in the first", "0) # The grapheme breaking rules were changed for Unicode", "greater speed. # # Conceptually, there is a table of", "DerivedGeneralCategory.txt def make_get_names(enum): return lambda chardata: enum.index(chardata[1]) # Parse a", "/ len(table) for i in range(0, len(table), ELEMS_PER_LINE): print(fmt %", "version: print(\"WARNING: Unicode version differs in %s\", file_name, file=sys.stderr) table", "point. Negative values are negated offsets in a list of", "03 July 2008 # Last Updated: 07 October 2018 ##############################################################################", "new one. if not appended: sets.append([c, o, t]) # End", "# 0 => No other case # -101 => Script", "ucd_script_sets # vector. # # The ucd_records table contains one", "ScriptExtensions.txt def get_script_extension(chardata): this_script_list = list(chardata[1].split(' ')) if len(this_script_list) ==", "get_record_size_struct(test[0]) assert(size == test[1]) #print struct def print_records(records, record_size): print('const", "= re.match(version_pat, file.readline()) version = f.group(1) if unicode_version == \"\":", "# The main tables generated by this script are used", "in zip(*tables): i = records.get(t) if i is None: i", "found = 0 for x in s: if x ==", "'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr',", "# GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt. These must", "\"%3d,\" * ELEMS_PER_LINE + \" /* U+%04X */\" mult =", "* el + \"\\n\" if block_size > ELEMS_PER_LINE: fmt =", "names from _pcre2_xxx to xxxx, thereby avoiding name clashes\") print(\"with", "support is built,\") print(\"and in PCRE2 that happens automatically with", "any list. # # The ucd_digit_sets table contains the code", "size) if size < min_size: min_size = size min_stage1, min_stage2", "# number of the required record in the ucd_records vector.", "# in the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt", "through the table and set \"return\" # offsets for those", "to find the Extended Pictographic # property, which is used", "for t in zip(*tables): i = records.get(t) if i is", "# 30-April-2011: Updated list of scripts for Unicode 6.0.0 #", "# 27 = ucp_Hiragana => Hiragana script # 7 =", "\"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names),", "Extensions field to records. This has increased # their size", "that are case-equivalent. if other_case[o] != -other_case[c]: t = o", "--- print(\"/* This table contains lists of characters that are", "compress_table(stage_i, stage2_block) size += get_tables_size(stage1, stage2, stage3) # print \"/*", "the '9' characters in each\") print(\"set of decimal digits. It", "the MultiStage2.py script. */\") print(\"/* Total size: %d bytes, block", "list is terminated # by zero (ucp_Unknown). A character with", "start = blocks.get(block) if start is None: # Allocate a", "stage 2 table) stage2 = [] # Stage 2 table", "'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', # New for Unicode 11.0.0 'Dogra',", "Python # programmer, so the style is probably dreadful, but", "#New for Unicode 5.1 'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi',", "sets of\") print(\"more than one character. Each list is terminated", "the division is a float # # Added code to", "#ifndef SUPPORT_UCP to use dummy tables when no UCP support", "New for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting',", "Added code to add a Script Extensions field to records.", "'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', # New for Unicode 10.0.0 'Adlam',", "0 */\", end='') for d in script_lists: print(\" %3d,\" %", "PCRE2_PCRE2TEST */\") # This code was part of the original", "removed completely in 2012. # Corrected size calculation # Add", "code for creating offsets for caseless matching sets. # Combine", "34, 5, 12, 0, -32, 34, 0 } # 34", "Updated list of scripts for Unicode 6.1.0 # 20-August-2012: Added", "11.0.0 (July 2018). # # Added code to add a", "is # written out. However, we have to do this", "for Unicode 7.0.0 # 12-August-2014: Updated to put Unicode version", "'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ] #", "next structure in array record_slice = [record[0] for record in", "This has increased # their size from 8 to 12", "an additional grapheme break property, because the default for #", "s: caseless_offsets[x] = offset offset += len(s) + 1 #", "CaseFolding.txt def get_other_case(chardata): if chardata[1] == 'C' or chardata[1] ==", "03-June-2014: Updated for Python 3 # 20-June-2014: Updated for Unicode", "3 # 20-June-2014: Updated for Unicode 7.0.0 # 12-August-2014: Updated", "0x%05x,\" % d, end='') count += 1 print(\"\\n};\\n\") print(\"/* This", "'Extend', 'Prepend', 'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other',", "A new table is output containing these sets, and #", "and # offsets into the table are added to the", "block number, # which is the character's code point divided", "The script has now been upgraded to Python 3 for", "Extensions property default value is the Script value. Parse the", "script run come from the same set. */\\n\") print(\"const uint32_t", "block_size): block = table[i:i+block_size] start = blocks.get(block) if start is", "script number, script extension value, character type, grapheme break type,", "caseless sets of\") print(\"more than one character. Each list is", "'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',", "many times. The script is for the use of PCRE", "= get_type_size(record_slice) size = (size + slice_size - 1) &", "has the correct index value. break_property_names = ['CR', 'LF', 'Control',", "to make the field 16 bits. padding_dummy = [0] *", "print(\"with the library. At present, just one of these tables", "defined in # pcre2_internal.h. They look up Unicode character properties", "lookup 66 (0x42) in table 90 in stage2 yields 564", "'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale',", "the emojis is \"other\". We scan the emoji-data.txt file and", "GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt. These must be", "a Script Extensions field to records. This has increased #", "the character's other case, for # every Unicode character. However,", "modifications made to this script: # Added code to add", "character within its own block, and the result is the", "a lookup in ucd_stage1 a \"virtual\" block number. # #", "set of 10 decimal digits in Unicode. This is used", "# Last Updated: 07 October 2018 ############################################################################## import re import", "[0] * MAX_UNICODE offset = 1; for s in sets:", "*/\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\")", "print(\" %d, /* Number of subsequent values */\" % len(digitsets),", "pcre2_internal.h. They look up Unicode character properties using short #", "generate the pcre2_ucd.c file that contains a digested form of", "multi-character caseless set (for # example, k, K and the", "2), (\"pcre_int32\", 4)] limits = [(0, 255), (0, 65535), (0,", "%3d */') % (record[0] + (i,))) print('};\\n') script_names = ['Unknown',", "PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\") for s in sets: s", "the main table. # The CaseFolding.txt file lists pairs, but", "'Lina', 'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng',", "= int(m.group(3), 16) for i in range(char, last + 1):", "=> Non-spacing mark # 3 = ucp_gbExtend => Grapheme break", "of these sets is # written out. However, we have", "i in range(5,10)]: size = len(records) * record_size stage1, stage2", "\"const %s %s[] = { /* %d bytes\" % (type,", "& -slice_size structure += '} ucd_record;\\n*/\\n' return size, structure def", "and many blocks of # characters (taking 128 characters in", "characters that all match each other caselessly. Each list is", "'Ugar', 'Yiii', #New for Unicode 5.0 'Bali', 'Xsux', 'Nkoo', 'Phag',", "each set of 10 decimal digits in Unicode. This is", "'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara', #New for Unicode", "= records.get(t) if i is None: i = records[t] =", "open() call # . Inserted 'int' before blocksize/ELEMS_PER_LINE because an", "'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr', #New for Unicode 7.0.0", "# # Update for Python3: # . Processed with 2to3,", "code point divided by 128, since 128 is the size", "Unicode data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt,", "Script Extension list offset = 101 # 0 => Dummy", "for i in range(0, len(table), block_size): print((\"/* block %d */\\n\"", "found = 0 for y in [c, o, t]: for", "# Not found in existing lists return_value = len(script_lists) script_lists.extend(script_numbers)", "Added new field for Script Extensions # 27-July-2019: Updated for", "107, 29, # and terminator 0. This means that this", "list(records.values()))) records.sort(key = lambda x: x[1]) for i, record in", "never be a Script Extension # value), then scan it", "supply some small dummy tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const", "if m.group(3) is None: last = char else: last =", "0x%04x,' % x, end=' ') print(' NOTACHAR,') print('};') print() #", "contains a digested form of the Unicode # data tables.", "= table[i:i+block_size] start = blocks.get(block) if start is None: #", "first = int(m.group(1),16) last = int(m.group(2),16) if ((last - first", "default for # all the emojis is \"other\". We scan", "contains # lists of characters that all match each other", "stage3_block, size) if size < min_size: min_size = size min_stage1,", "= { ' + \\ '/* %d bytes, record size", "10 file.close() digitsets.sort() print(\"/* This table lists the code points", "Emoji 0x%x has break property %s, not 'Other'\", i, break_property_names[break_props[i]],", "uint8_t PRIV(ucd_script_sets)[] = {\") count = 0 print(\" /* 0", "as it # was never used. A two-stage table has", "'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt',", "line which has already set # data. if table[i] ==", "record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\"", "order to compute the # offsets in the table that", "output the table of caseless character sets --- print(\"/* This", "records] slice_type, slice_size = get_type_size(record_slice) size = (size + slice_size", "the Extended Pictographic # property, which is used by PCRE2", "to # generate the pcre2_ucd.c file that contains a digested", "that needed 2.5 # Consequent code tidy # Adjusted data", "'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng',", "# # Added code to search for sets of more", "well as being part of the PCRE2 library, this module", "# lookup 57 in stage1 table yields 55 # lookup", "6), (68, 1)], 2 ), \\ ( [(300, 3), (6,", "# value), then scan it and fill in the default", "characters (taking 128 characters in a block) have the same", "Each list is # in order, and is terminated by", "'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor',", "space saving - maybe the guy linked\") print(\"all the modules", "This is used to ensure that digits # in script", "used by PCRE2 as a grapheme breaking property. This was", "*/' % (len(records) * record_size, record_size)) records = list(zip(list(records.keys()), list(records.values())))", "564 # record 564 is { 27, 7, 12, 0,", "vedic tone karshana (U+1CD0) is in block 57 (0x39) #", "version, please check the\\n' + \\ 'types in this structure", "by <NAME>owski as part of # the upgrading of Unicode", "Allocate a new block start = len(stage2) / block_size stage2", "'Samr', 'Lana', 'Tavt', #New for Unicode 6.0.0 'Batk', 'Brah', 'Mand',", "- so we include a\") print(\"condition to cut out the", "for i in range(2,6)]: stage_i, stage3 = compress_table(table, stage3_block) for", "print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[] = { ' + \\", "are part of a multi-character caseless set (for # example,", "= start stage1.append(start) return stage1, stage2 # Print a table", "# round up to the first item of the next", "2012. I am not a Python # programmer, so the", "(-32768, 32767), (-2147483648, 2147483647)] minval = min(table) maxval = max(table)", "x: x[1]) for i, record in enumerate(records): print((' {' +", "# Pictographic property. # 01-October-2018: Added the 'Unknown' script name", "data tables. A number of extensions have been added to", "the 'Unknown' script name # 03-October-2018: Added new field for", "table contains the code points of the '9' characters in", "the ucd_script_sets # vector. # # The ucd_records table contains", "# Trigger when this character's other case does not point", "0 } # 27 = ucp_Hiragana => Hiragana script #", "Script inherited from predecessor # 12 = ucp_Mn => Non-spacing", "is indexed by # the offset of a character within", "# which is the character's code point divided by 128,", "'} ucd_record;\\n*/\\n' return size, structure def test_record_size(): tests = [", "block_size print(s + \" */\") table = tuple(table) if block_size", "print(\"/* The tables herein are needed only when UCP support", "offsets for caseless matching sets. # Combine the tables table,", "some small dummy tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record", "The ucd_script_sets vector contains lists of script numbers that are", "in the first table in stage2 yields 17 # record", "'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao',", "differs in %s\", file_name, file=sys.stderr) table = [default_value] * MAX_UNICODE", "= tuple(table) for i in range(0, len(table), block_size): block =", "and is terminated by NOTACHAR (0xffffffff), which is larger than", "import string import sys MAX_UNICODE = 0x110000 NOTACHAR = 0xffffffff", "value because in the CaseFolding file there are lines #", "y: found = 1 if not found: s.append(y) appended =", "07-July-2018: Added code to scan emoji-data.txt for the Extended #", "block of code was added by PH in September 2012.", "the tables when not needed. But don't leave\") print(\"a totally", "Lower case letter # 12 = ucp_gbOther => Grapheme break", "Extract the unique combinations of properties into records def combine_tables(*tables):", "emoji-data.txt # file, but we list it here so that", "== test[1]) #print struct def print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[]", "in files associated with Unicode Technical Standard #51 (\"Unicode Emoji\"),", "other case # -101 => Script Extension list offset =", "submitted to the PCRE project by <NAME>owski as part of", "upgrading of Unicode property support. The new code speeds up", "# lists of characters that all match each other caselessly.", "2.5 # Consequent code tidy # Adjusted data file names", "slice_size size = (size + slice_size - 1) & -slice_size", "in sets: s = sorted(s) for x in s: print('", "lowercase \"a\" (U+0061) is in block 0 # lookup 0", "= 0 for table in tables: type, size = get_type_size(table)", "for Unicode 5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', # New", "blocks[block] = start stage1.append(start) return stage1, stage2 # Print a", "to records. This has increased # their size from 8", "unite the existing set with the new set. appended =", "lambda chardata: enum.index(chardata[1]) # Parse a line of CaseFolding.txt def", "version differs in %s\", file_name, file=sys.stderr) table = [default_value] *", "the nearest power of slice_size size = (size + slice_size", "(600,), (600,), (100,)], 2 ), \\ ( [(25, 3), (6,", "Extended_Pictographic property is not found in the file where all", "# the other_case table to find sets of more than", "being part of the PCRE2 library, this module is #included\")", "program, which redefines the PRIV macro to change\") print(\"table names", "If so, unite the existing set with the new set.", "} # 28 = ucp_Inherited => Script inherited from predecessor", "slice_size - 1) & -slice_size structure += '} ucd_record;\\n*/\\n' return", "int(m.group(1),16) last = int(m.group(2),16) if ((last - first + 1)", "bytes */\" % (stage2_block, stage3_block, size) if size < min_size:", "the script and run it\") print(\"to regenerate this code.\") print()", "multiple caseless sets. This uses the # final hole in", "# their size from 8 to 12 bytes, only 10", "in a block) have the same set of records as", "runs all come from the same set. The first element", "(6, 6), (123456, 6), (1, 690)], 8 ), \\ ]", "to caseless matching set, offset to the character's other case,", "is None: fmt = \"%3d,\" * ELEMS_PER_LINE + \" /*", "!= break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has break property %s, not", "'Tibt', 'Tfng', 'Ugar', 'Yiii', #New for Unicode 5.0 'Bali', 'Xsux',", "lists the code points for the '9' characters in each\")", "(340, 6), (690, 1)], 4 ), \\ ( [(3, 300),", "yields 90 # lookup 66 (0x42) in table 90 in", "'r', encoding='utf-8') for line in file: line = re.sub(r'#.*', '',", "This was # done when updating to Unicode 11.0.0 (July", "Unicode version, please check the\\n' + \\ 'types in this", "*/\") print(\" ucp_Unknown, /* script extension */\") print(\" 0, /*", "casefolding table, which isn't used; # removed completely in 2012.", "min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3 min_stage2_block, min_stage3_block =", "in s: if x == y: found = 1 if", "10) != 0: print(\"ERROR: %04x..%04x does not contain a multiple", "not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close() #", "'Soyo', 'Zanb', #New for Unicode 11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka',", "return 0 # Parse a line of ScriptExtensions.txt def get_script_extension(chardata):", "= int(m.group(1), 16) if m.group(3) is None: last = char", "is used to ensure that digits # in script runs", "added to an existing set, create a new one. if", "was submitted to the PCRE project by <NAME>owski as part", "is U+0041 # 34 = ucp_Latin => No special Script", "Added by PH: read Scripts.txt again for the sets of", "if d == 0: print(\"\\n /* %3d */\" % count,", "of the original contribution, but is commented out as it", "((last - first + 1) % 10) != 0: print(\"ERROR:", "############################################################################## # This script was submitted to the PCRE project", "for Script Extensions # 27-July-2019: Updated for Unicode 12.1.0 #", "= sys.maxint for stage3_block in [2 ** i for i", "# The ucd_stage2 table is a table of \"virtual\" blocks;", "'Mult', 'Hung', 'Sgnw', #New for Unicode 10.0.0 'Adlm', 'Bhks', 'Marc',", "category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))", "a new # field in the record to hold the", "is expected to be used with # any of those", "sets = [] for c in range(MAX_UNICODE): o = c", "Script Extension\") print(\"property. Each sublist is zero-terminated. */\\n\") print(\"const uint8_t", "(8 bits), character type (8 bits), grapheme break property (8", "required and the result of the division is a float", "1 ), \\ ( [(300,), (600,), (600,), (100,)], 2 ),", "############################################################################## import re import string import sys MAX_UNICODE = 0x110000", "pcre2_internal.h (the actual\\n' + \\ 'field names will be different):\\n\\ntypedef", "file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is None: continue", "characters\" % (first, last), file=sys.stderr) while first < last: digitsets.append(first", "(600,), (100,)], 2 ), \\ ( [(25, 3), (6, 6),", "'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw', #New for Unicode 10.0.0 'Adlm',", "record. This is the # negated offset to the start", "len(script_lists) - script_numbers_length + 1): for j in range(0, script_numbers_length):", "# in files associated with Unicode Technical Standard #51 (\"Unicode", "# # The ucd_records table contains one instance of every", "property, because the default for # all the emojis is", "6), (34, 6), (68, 1)], 2 ), \\ ( [(300,", "'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc',", "0x%x has break property %s, not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr)", "'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb',", "print(\"a comment was received about space saving - maybe the", "\", end='') count = 0 print(\" 0x%05x,\" % d, end='')", "tables with a new Unicode version, please check the\\n' +", "'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi',", "# can be set as an additional grapheme break property,", "is \"other\". We scan the emoji-data.txt file and modify the", "is a list of lists of scripts for the Script", "') print(' NOTACHAR,') print('};') print() # ------ print(\"/* When #included", "c + other_case[c] # Trigger when this character's other case", "file.close() digitsets.sort() print(\"/* This table lists the code points for", "and fill in the default from Scripts. Code added by", "a real table covering all Unicode # characters would be", "print(\"16-bit field to make the whole thing a multiple of", "'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic',", "1 if d == 0: print(\"\\n /* %3d */\" %", "index value. break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark',", "2012. # Corrected size calculation # Add #ifndef SUPPORT_UCP to", "(6, 6), (34, 6), (68, 1)], 2 ), \\ (", "'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta',", "6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', # New", "# # DerivedGeneralCategory.txt is found in the \"extracted\" subdirectory of", "Unicode 10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu',", "record in records] slice_type, slice_size = get_type_size(record_slice) size = (size", "( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8", "in the file where all the # others are (GraphemeBreakProperty.txt).", "# Conceptually, there is a table of records (of type", "'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le',", "unicode_version = \"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category =", "(size + slice_size - 1) & -slice_size size += slice_size", "x == c or x == o or x ==", "in s: print(' 0x%04x,' % x, end=' ') print(' NOTACHAR,')", "of decimal digits. It is used to ensure that all", "compressed by # observing that many characters have the same", "a character's block number, # which is the character's code", "%d bytes, record size %d */' % (len(records) * record_size,", "# Add #ifndef SUPPORT_UCP to use dummy tables when no", "a grapheme breaking property. This was # done when updating", "that digits # in script runs all come from the", "Technical Standard #51 (\"Unicode Emoji\"), # for example: # #", "table def print_table(table, table_name, block_size = None): type, size =", "each record are:\") print(\"script (8 bits), character type (8 bits),", "field, we need some padding # to get the Unicode", "mark # 3 = ucp_gbExtend => Grapheme break property \"Extend\"", "offset = 101 # 0 => Dummy value, unused at", "[] file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line in file:", "'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', # New for Unicode", "characters that are caseless sets of\") print(\"more than one character.", "of records as # other blocks. This leads to a", "(8 bits), grapheme break property (8 bits),\") print(\"offset to multichar", "bytes. */\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if", "subdirectory. Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt are directly in the", "i = records[t] = len(records) index.append(i) return index, records def", "= ['CR', 'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark', 'L', 'V', 'T',", "print(\"/* When #included in pcre2test, we don't need the table", "%5d / %3d => %5d bytes */\" % (stage2_block, stage3_block,", "Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A',", "block 96 (0x60) # lookup 96 in stage1 table yields", "to see if any of the three characters are already", "need some padding # to get the Unicode records up", "fit into C types\") def get_tables_size(*tables): total_size = 0 for", "# This script was submitted to the PCRE project by", "% d, end='') count += 1 print(\"\\n};\\n\") print(\"/* This vector", "predecessor # 12 = ucp_Mn => Non-spacing mark # 3", "# Now scan again and create equivalence sets. sets =", "finding identical blocks stage1 = [] # Stage 1 table", "chardata = list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue value", "1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits = [(0, 255), (0,", "field for Script Extensions # 27-July-2019: Updated for Unicode 12.1.0", "Major modifications made to this script: # Added code to", "8 ), \\ ] for test in tests: size, struct", "# End of loop looking for caseless sets. # Now", "s = sorted(s) for x in s: print(' 0x%04x,' %", "are currently used. # # 01-March-2010: Updated list of scripts", "UCD tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() # --- Added", "/* PCRE2_PCRE2TEST */\") # This code was part of the", "It can be efficiently compressed by # observing that many", "value, unused at present # # At offset 101 in", "* int(block_size / ELEMS_PER_LINE) for i in range(0, len(table), block_size):", "create equivalence sets. sets = [] for c in range(MAX_UNICODE):", "print(\"to regenerate this code.\") print() print(\"As well as being part", "# lookup 97 (0x61) in the first table in stage2", "[(300, 3), (6, 6), (340, 6), (690, 1)], 4 ),", "in # the maint subdirectory, using the command # #", "set # data. if table[i] == default_value: table[i] = value", "each block. The result of a lookup in ucd_stage1 a", "a table of these sets is # written out. However,", "in it, so the resulting table is # not much", "used for characters # that are not part of any", "# break-props table. file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line", "!= 0 and other_case[c + other_case[c]] == 0: other_case[c +", "grapheme break property field to records. # # Added code", "table contains lists of characters that are caseless sets of\")", "all come from the same set. The first element in", "have to do this work here in order to compute", "case\") print(\"or zero (32 bits, signed), script extension (16 bits,", "directly in the UCD directory. The emoji-data.txt file is #", "Added code to scan emoji-data.txt for the Extended # Pictographic", "*/\") table = tuple(table) if block_size is None: fmt =", "s in sets: found = 0 for x in s:", "block is indexed by # the offset of a character", "* ELEMS_PER_LINE + \" /* U+%04X */\" mult = MAX_UNICODE", "start of the relevant list in the ucd_script_sets # vector.", "/* script */\") print(\" ucp_Cn, /* type unassigned */\") print(\"", "for caseless sets. # Now scan the sets and set", "table_name, size * len(table)) if block_size: s += \", block", "part of any list. # # The ucd_digit_sets table contains", "] for test in tests: size, struct = get_record_size_struct(test[0]) assert(size", "print(\" 0x%05x,\" % d, end='') count += 1 print(\"\\n};\\n\") print(\"/*", "block number. # # The ucd_stage2 table is a table", "min(table) maxval = max(table) for num, (minlimit, maxlimit) in enumerate(limits):", "New for Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian',", "for Unicode 6.1.0 # 20-August-2012: Added scan of GraphemeBreakProperty.txt and", "script and run it\") print(\"to regenerate this code.\") print() print(\"As", "are needed only when UCP support is built,\") print(\"and in", "/* type unassigned */\") print(\" ucp_gbOther, /* grapheme break property", "i in range(char, last + 1): # It is important", "= size min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3 min_stage2_block,", "round up to the first item of the next structure", "= re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16) if m.group(3) is", "tables table, records = combine_tables(script, category, break_props, caseless_offsets, other_case, scriptx,", "> ELEMS_PER_LINE: fmt = fmt * int(block_size / ELEMS_PER_LINE) for", "'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ] # The Extended_Pictographic", "Negative values are negated offsets in a list of lists", "size += get_tables_size(stage1, stage2) #print \"/* block size %5d =>", "as the zeroth # element is never used. script_lists =", "%s[] = { /* %d bytes\" % (type, table_name, size", "block 0 # lookup 0 in stage1 table yields 0", "return total_size # Compress the table into the two stages", "# characters would be far too big. It can be", "as # other blocks. This leads to a 2-stage lookup", "file=sys.stderr) while first < last: digitsets.append(first + 9) first +=", "'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew',", "in PCRE2 that happens automatically with UTF support.\") print(\"This module", "= \"%3d,\" * el + \"\\n\" if block_size > ELEMS_PER_LINE:", "received about space saving - maybe the guy linked\") print(\"all", "whether it is compiled or not. However\") print(\"a comment was", "print(\"/* This table lists the code points for the '9'", "string import sys MAX_UNICODE = 0x110000 NOTACHAR = 0xffffffff #", "the # others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt", "cases or zero (8 bits), offset to other case\") print(\"or", "is never used. script_lists = [0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx", "* record_size, record_size)) records = list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda", "-32, 34, 0 } # 34 = ucp_Latin => Latin", "in its record. This is the # negated offset to", "# The CaseFolding.txt file lists pairs, but the common logic", "It requires six Unicode data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt,", "unique combinations of properties into records def combine_tables(*tables): records =", "for Unicode 5.2 'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java',", "# New for Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic', # New", "count == 8: print(\"\\n \", end='') count = 0 print(\"", "Unicode 5.0 'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx', #New for Unicode", "# Find the optimum block size for 3-stage table min_size", "for test in tests: size, struct = get_record_size_struct(test[0]) assert(size ==", "0: other_case[c + other_case[c]] = -other_case[c] # Now scan again", "a # code point. Negative values are negated offsets in", "Extensions field, we need some padding # to get the", "* record_size stage1, stage2 = compress_table(table, block_size) size += get_tables_size(stage1,", "'%s property_%d;\\n' % (slice_type, i) # round up to the", "'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ] # The", "enumerate(limits): if minlimit <= minval and maxval <= maxlimit: return", "len(records) index.append(i) return index, records def get_record_size_struct(records): size = 0", "block_size): print((\"/* block %d */\\n\" + fmt) % ((i /", "possible C language type for the values def get_type_size(table): type_size", "scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i in range(0, MAX_UNICODE):", "lookup 0 in stage1 table yields 0 # lookup 97", "if block_size is None: fmt = \"%3d,\" * ELEMS_PER_LINE +", "'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani',", "first < last: digitsets.append(first + 9) first += 10 file.close()", "# programmer, so the style is probably dreadful, but it", "'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu',", "zero (32 bits, signed), script extension (16 bits, signed), and", "(6, 123456), (1, 690)], 8 ), \\ ( [(100000, 300),", "-101, 0 } # 28 = ucp_Inherited => Script inherited", "= (size + slice_size - 1) & -slice_size size +=", "'Batk', 'Brah', 'Mand', #New for Unicode 6.1.0 'Cakm', 'Merc', 'Mero',", "will be different):\\n\\ntypedef struct {\\n' for i in range(len(records[0])): record_slice", "other_case[o] # Scan the existing sets to see if any", "PH: output the table of caseless character sets --- print(\"/*", "new code speeds up property # matching many times. The", "for 3-stage table min_size = sys.maxint for stage3_block in [2", "as part of # the upgrading of Unicode property support.", "tables. A number of extensions have been added to the", "pcre2_ucd.c file that contains a digested form of the Unicode", "# in order, and is terminated by NOTACHAR (0xffffffff), which", "matching many times. The script is for the use of", "print(\"\\n /* %3d */\" % count, end='') print(\"\\n};\\n\") # Output", "----------------------------------------------------------------------------- # Minor modifications made to this script: # Added", "in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is None:", "of CaseFolding.txt def get_other_case(chardata): if chardata[1] == 'C' or chardata[1]", "'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh',", "x == t: found = 1 # Add new characters", "this script: # Added #! line at start # Removed", "28, 12, 3, 0, 0, -101, 0 } # 28", "0 # Parse a line of ScriptExtensions.txt def get_script_extension(chardata): this_script_list", "read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The grapheme breaking rules were changed", "using a library - so we include a\") print(\"condition to", "are caseless sets of\") print(\"more than one character. Each list", "128, since 128 is the size # of each block.", "= {0};\") print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[]", "script[i] # With the addition of the new Script Extensions", "tables herein are needed only when UCP support is built,\")", "'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New for Unicode 12.0.0", "0 and other_case[c + other_case[c]] == 0: other_case[c + other_case[c]]", "list in the ucd_script_sets # vector. # # The ucd_records", "stage1 table yields 90 # lookup 66 (0x42) in table", "# which often come after a line which has already", "ucp_gbOther => Grapheme break property \"Other\" # 0 => Not", "'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New for Unicode 12.0.0 'Elymaic',", "# Added #! line at start # Removed tabs #", "== t: found = 1 # Add new characters to", ". Changed string.strip to str.strip # . Added encoding='utf-8' to", "the 32-bit library is run in non-32-bit mode, character values\")", "caseless character sets --- print(\"/* This table contains lists of", "contribution, but is commented out as it # was never", "generated by this script are used by macros defined in", "original script. # # The script has now been upgraded", "this script: # Added code to add a grapheme break", "# offset to caseless matching set, offset to the character's", "= len(records) index.append(i) return index, records def get_record_size_struct(records): size =", "found: s.append(y) appended = 1 # If we have not", "is terminated by NOTACHAR (0xffffffff), which is larger than #", "# New for Unicode 5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician',", "size += slice_size structure += '%s property_%d;\\n' % (slice_type, i)", "# 3 = ucp_gbExtend => Grapheme break property \"Extend\" #", "two stages def compress_table(table, block_size): blocks = {} # Dictionary", "and set appropriate offsets for the characters. caseless_offsets = [0]", "print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[] =", "record in the ucd_records vector. # # The following examples", "has sufficed. \"\"\" # Three-stage tables: # Find the optimum", "Example: lowercase \"a\" (U+0061) is in block 0 # lookup", "data file names to take from the Unicode.tables directory #", "tables when no UCP support is needed. # Update for", "10 decimal digits in Unicode. This is used to ensure", "properties into records def combine_tables(*tables): records = {} index =", "'Plrd', 'Shrd', 'Sora', 'Takr', #New for Unicode 7.0.0 'Bass', 'Aghb',", "record_slice = [record[0] for record in records] slice_type, slice_size =", "code was added by PH in September 2012. I am", "block_size > ELEMS_PER_LINE: el = ELEMS_PER_LINE else: el = block_size", "# Combine the tables table, records = combine_tables(script, category, break_props,", "'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', # New for Unicode 5.0 'Balinese',", "# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def", "speed. # # Conceptually, there is a table of records", "if minlimit <= minval and maxval <= maxlimit: return type_size[num]", "'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', #", "the digits in\") print(\"a script run come from the same", "print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\") print(\" %d, /* Number of", "the Unicode # data tables. A number of extensions have", "'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari',", "+ slice_size - 1) & -slice_size size += slice_size structure", "block of code for creating offsets for caseless matching sets.", "because the default for # all the emojis is \"other\".", "lookup process. # # This script constructs six tables. The", "# Added code to search for sets of more than", "%s, not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close()", "record to hold the value. Luckily, the # structure had", "'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',", "+ 1) % 10) != 0: print(\"ERROR: %04x..%04x does not", "Dictionary for finding identical blocks stage1 = [] # Stage", "}};\") print(\"#endif\") print() print(record_struct) # --- Added by PH: output", "def read_table(file_name, get_value, default_value): global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name)", "offset += len(s) + 1 # End of block of", "end='') count = 8 for d in digitsets: if count", "not found: s.append(y) appended = 1 # If we have", "the Extended_Pictographic property for emoji characters. This # can be", "Now scan the sets and set appropriate offsets for the", "version def read_table(file_name, get_value, default_value): global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$',", "the PCRE2 library, this module is #included\") print(\"by the pcre2test", "totally empty module because some compilers barf at that.\") print(\"Instead,", "count += 1 print(\"\\n};\\n\") print(\"/* This vector is a list", "range(MAX_UNICODE): o = c + other_case[c] # Trigger when this", "'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek',", "'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic'", "# Stage 1 table contains block numbers (indices into stage", "Find the optimum block size for 3-stage table min_size =", "empty; this is used for characters # that are not", "is { 27, 7, 12, 0, 0, 27, 0 }", "compress_table(table, block_size) size += get_tables_size(stage1, stage2) #print \"/* block size", "the value. Luckily, the # structure had a hole in", "values\") print(\"greater than 0x10ffff may be encountered. For these we", "'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund',", "print(\" ucp_Unknown, /* script */\") print(\" ucp_Cn, /* type unassigned", "'Unknown' as the default (this will never be a Script", "when this character's other case does not point back here.", "a caseless set # 0 => No other case #", "was # done when updating to Unicode 11.0.0 (July 2018).", "script_numbers_length + 1): for j in range(0, script_numbers_length): found =", "a Script Extension # value), then scan it and fill", "a previously set # value because in the CaseFolding file", "found: return -i # Not found in existing lists return_value", "the characters. caseless_offsets = [0] * MAX_UNICODE offset = 1;", "\"Other\" # 0 => Not part of a caseless set", "Kelvin symbol are such a set). # # Example: hiragana", "t in zip(*tables): i = records.get(t) if i is None:", "'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara', #New for Unicode 8.0.0", "October 2018 ############################################################################## import re import string import sys MAX_UNICODE", "script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))", "to Unicode 11.0.0 (July 2018). # # Added code to", "(123456, 6), (1, 690)], 8 ), \\ ] for test", "changed for Unicode 11.0.0 (June 2018). Now # we need", "Total size: %d bytes, block size: %d. */\" % (min_size,", "*/\") print(\"/* This file was autogenerated by the MultiStage2.py script.", "every unique record that is # required. The ucd_stage1 table", "the table of digit\") print(\"sets, nor the the large main", "the two stages def compress_table(table, block_size): blocks = {} #", "# --- Added by PH: read Scripts.txt again for the", "maint subdirectory, using the command # # [python3] ./MultiStage2.py >../src/pcre2_ucd.c", "filler */\") print(\" }};\") print(\"#endif\") print() print(record_struct) # --- Added", "Stage 1 table contains block numbers (indices into stage 2", "print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the 32-bit", "to be ignored (returning the default value of 0) #", "example, k, K and the Kelvin symbol are such a", "needed. But don't leave\") print(\"a totally empty module because some", "in table 90 in stage2 yields 564 # record 564", "am not a Python # programmer, so the style is", "subsequent values */\" % len(digitsets), end='') count = 8 for", "== 0: print(\"\\n /* %3d */\" % count, end='') print(\"\\n};\\n\")", "when no UCP support is needed. # Update for PCRE2:", "'Zl', 'Zp', 'Zs' ] # The Extended_Pictographic property is not", "** i for i in range(5,10)]: size = len(records) *", "= value file.close() return table # Get the smallest possible", "# contains the number of subsequent elements, which are in", "[] for t in zip(*tables): i = records.get(t) if i", "value file.close() return table # Get the smallest possible C", "with UTF support.\") print(\"This module should not be referenced otherwise,", "the emoji-data.txt file to find the Extended Pictographic # property,", "# field in the record to hold the value. Luckily,", "sufficed. \"\"\" # Three-stage tables: # Find the optimum block", "= open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line in file: m =", "'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', # New for", "=> Grapheme break property \"Extend\" # 0 => Not part", "than one character. Each list is terminated by NOTACHAR. */\\n\")", "<NAME>, 2008 ############################################################################## # This script was submitted to the", "is important not to overwrite a previously set # value", "for a # code point. Negative values are negated offsets", "the table into the two stages def compress_table(table, block_size): blocks", "block blocks[block] = start stage1.append(start) return stage1, stage2 # Print", "# The ucd_records table contains one instance of every unique", "from Scripts. Code added by PH # in October 2018.", "for reading data # sets only one value, so first", "does not contain a multiple of 10 characters\" % (first,", "for x in s: if x == c or x", "# 12-August-2014: Updated to put Unicode version into the file", "0. This means that this character is expected to be", "= 0 print(\" 0x%05x,\" % d, end='') count += 1", "only when UCP support is built,\") print(\"and in PCRE2 that", "Updated for Unicode 8.0.0 # 02-July-2017: Updated for Unicode 10.0.0", "line of CaseFolding.txt def get_other_case(chardata): if chardata[1] == 'C' or", "into the main table. # The CaseFolding.txt file lists pairs,", "form of the Unicode # data tables. A number of", "ucd_stage1 table is indexed by a character's block number, #", "[record[0] for record in records] slice_type, slice_size = get_type_size(record_slice) size", "of loop looking for caseless sets. # Now scan the", "stage2 = compress_table(stage_i, stage2_block) size += get_tables_size(stage1, stage2, stage3) #", "only 10 of which are currently used. # # 01-March-2010:", "= stage2_block, stage3_block print \"/* Total size: %d bytes\" %", "but we list it here so that the name has", "- int(chardata[0], 16) return 0 # Parse a line of", "= [] for t in zip(*tables): i = records.get(t) if", "and modify the # break-props table. file = open('Unicode.tables/emoji-data.txt', 'r',", "digitsets = [] file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line", "that all the digits in\") print(\"a script run come from", "# New for Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage',", ". Added encoding='utf-8' to the open() call # . Inserted", "resolve to the same record. One or two # are", "other_case[c]] == 0: other_case[c + other_case[c]] = -other_case[c] # Now", "existing set if found: found = 0 for y in", "call # . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int", "# New for Unicode 5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian',", "'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr', #New for Unicode 7.0.0 'Bass',", "+= 10 file.close() digitsets.sort() print(\"/* This table lists the code", "print(\"property. Each sublist is zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] =", "type ucd_record), containing a # script number, script extension value,", "/ %3d => %5d bytes */\" % (stage2_block, stage3_block, size)", "if block_size > ELEMS_PER_LINE: el = ELEMS_PER_LINE else: el =", "'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb', #New", "6), (340, 6), (690, 1)], 4 ), \\ ( [(3,", "The fields in each record are:\") print(\"script (8 bits), character", "padding # to get the Unicode records up to 12", "ucp_Ll => Lower case letter # 12 = ucp_gbOther =>", "slice_size structure += '%s property_%d;\\n' % (slice_type, i) # round", "list offset = 101 # 0 => Dummy value, unused", "add a Script Extensions field to records. This has increased", "extension value, character type, grapheme break type, # offset to", "char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits = [(0, 255),", "by hand. Instead modify the script and run it\") print(\"to", "the size # of each block. The result of a", "'Nshu', 'Soyo', 'Zanb', #New for Unicode 11.0.0 'Dogr', 'Gong', 'Rohg',", "*/\" % count, end='') print(\"\\n};\\n\") # Output the main UCD", "to take from the Unicode.tables directory # Adjusted global table", "before blocksize/ELEMS_PER_LINE because an int is # required and the", "break property from Unicode 6.2.0 # 13-May-2014: Updated for PCRE2", "breaking property. This was # done when updating to Unicode", "== \"\": unicode_version = version elif unicode_version != version: print(\"WARNING:", "(0xffffffff), which is larger than # any valid character. The", "print(\"offset to multichar other cases or zero (8 bits), offset", "'Nushu', 'Soyombo', 'Zanabazar_Square', # New for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi',", "read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i in range(0, MAX_UNICODE): if scriptx[i]", "( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8", "completely in 2012. # Corrected size calculation # Add #ifndef", "set as an additional grapheme break property, because the default", "that are inserted into the main table. # The CaseFolding.txt", "+ (i,))) print('};\\n') script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo',", "This block of code was added by PH in September", "*/\") print(\" 0, /* other case */\") print(\" ucp_Unknown, /*", "+= get_tables_size(stage1, stage2) #print \"/* block size %5d => %5d", "0, /* other case */\") print(\" ucp_Unknown, /* script extension", "of scripts for the Script Extension\") print(\"property. Each sublist is", "is probably dreadful, but it does the job. It scans", "the same set. The first element in the vector #", "* len(record[0]) + '}, /* %3d */') % (record[0] +", "'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham',", "'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt', #New for Unicode 6.0.0", "has already set # data. if table[i] == default_value: table[i]", "'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana',", "if any of the three characters are already # part", "{ 28, 12, 3, 0, 0, -101, 0 } #", "for Unicode 6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr',", "'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', # New", "(1, 690)], 8 ), \\ ] for test in tests:", "line in file: line = re.sub(r'#.*', '', line) chardata =", "record_size stage1, stage2 = compress_table(table, block_size) size += get_tables_size(stage1, stage2)", "PCRE2 that happens automatically with UTF support.\") print(\"This module should", "# 27-July-2019: Updated for Unicode 12.1.0 # ---------------------------------------------------------------------------- # #", "# The following examples are correct for the Unicode 11.0.0", "print() # --- Added by PH: read Scripts.txt again for", "s: if x == y: found = 1 if not", "'Sogo', 'Sogd', #New for Unicode 12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho'", "this code.\") print() print(\"As well as being part of the", "in existing lists return_value = len(script_lists) script_lists.extend(script_numbers) return -return_value #", "'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara', #New", "blocks.get(block) if start is None: # Allocate a new block", "'Old_Sogdian', 'Sogdian', # New for Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong',", "work here in order to compute the # offsets in", "sets. # Combine the tables table, records = combine_tables(script, category,", "(\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\",", "'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada',", "for Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw', #New", "{0};\") print(\"#else\") print() print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/*", "8 to 12 bytes, only 10 of which are currently", "character properties using short # sequences of code that contains", "the # negated offset to the start of the relevant", "0x110000 NOTACHAR = 0xffffffff # Parse a line of Scripts.txt,", "the # file, setting 'Unknown' as the default (this will", "Extension\") print(\"property. Each sublist is zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[]", "*/\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE", "print() print(\"/* Unicode character database. */\") print(\"/* This file was", "# each other caselessly. A new table is output containing", "are the main two-stage UCD tables. The fields in each", "Unicode 5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', # New for", "Added the 'Unknown' script name # 03-October-2018: Added new field", "multiple of 10 characters\" % (first, last), file=sys.stderr) while first", "values. # # Example: lowercase \"a\" (U+0061) is in block", "scan of GraphemeBreakProperty.txt and added a new # field in", "leads to a 2-stage lookup process. # # This script", "Updated for Unicode 12.1.0 # ---------------------------------------------------------------------------- # # # The", "out the tables when not needed. But don't leave\") print(\"a", "language type for the values def get_type_size(table): type_size = [(\"uint8_t\",", "which makes for greater speed. # # Conceptually, there is", "is output containing these sets, and # offsets into the", "int(chardata[2], 16) - int(chardata[0], 16) return 0 # Parse a", "symbol are such a set). # # Example: hiragana letter", "number. # # The ucd_stage2 table is a table of", "compress_table(table, block_size): blocks = {} # Dictionary for finding identical", "the blocks with property values table = tuple(table) for i", "= %d\" % block_size print(s + \" */\") table =", "'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa',", "find sets of more than two characters that must all", "'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh',", "of the new Script Extensions field, we need some padding", "% x, end=' ') print(' NOTACHAR,') print('};') print() # ------", "# any of those scripts, which are Bengali, Devanagari, Grantha,", "MAX_UNICODE padding_dummy[0] = 256 # This block of code was", "size for 3-stage table min_size = sys.maxint for stage3_block in", "identical blocks stage1 = [] # Stage 1 table contains", "'Old_Hungarian', 'SignWriting', # New for Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen',", "Script Extension property # 0 => Dummy value, unused at", "'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii',", "is compiled or not. However\") print(\"a comment was received about", "stage2, stage3 min_stage2_block, min_stage3_block = stage2_block, stage3_block print \"/* Total", "'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', #", "sets to see if any of the three characters are", "is for the use of PCRE maintainers, to # generate", "order. # # The ucd_script_sets vector contains lists of script", "the # Script Extensions properties of certain characters. Each list", "2018). # # Added code to add a Script Extensions", "# the maint subdirectory, using the command # # [python3]", "= 16 s = \"const %s %s[] = { /*", "in [2 ** i for i in range(2,6)]: stage_i, stage3", "by PCRE2 as a grapheme breaking property. This was #", "#New for Unicode 11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo',", "is # required and the result of the division is", "is the index # number of the required record in", "characters. This # can be set as an additional grapheme", "= [0] * MAX_UNICODE padding_dummy[0] = 256 # This block", "added a new # field in the record to hold", "Unicode 5.2.0 # 30-April-2011: Updated list of scripts for Unicode", "xxxx, thereby avoiding name clashes\") print(\"with the library. At present,", "to records. # # Added code to search for sets", "Grantha, and Kannada. # # <NAME>, 03 July 2008 #", "field in the record to hold the value. Luckily, the", "and set \"return\" # offsets for those that are not", "# This code was part of the original contribution, but", "When recompiling tables with a new Unicode version, please check", "(32 bits, signed), script extension (16 bits, signed), and a", "[c, o, t]: for x in s: if x ==", "None): type, size = get_type_size(table) ELEMS_PER_LINE = 16 s =", "version elif unicode_version != version: print(\"WARNING: Unicode version differs in", "the original script. # # The script has now been", "script extension (16 bits, signed), and a dummy\") print(\"16-bit field", "by macros defined in # pcre2_internal.h. They look up Unicode", "power of slice_size size = (size + slice_size - 1)", "'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital',", "last = int(m.group(2),16) if ((last - first + 1) %", "(1, 690)], 8 ), \\ ( [(100000, 300), (6, 6),", "= {\") count = 0 print(\" /* 0 */\", end='')", "{ /* %d bytes\" % (type, table_name, size * len(table))", "'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb',", "The grapheme breaking rules were changed for Unicode 11.0.0 (June", "o = c + other_case[c] # Trigger when this character's", "any valid character. The first list is empty; this is", "break_props, caseless_offsets, other_case, scriptx, padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys())) #", "size %5d => %5d bytes */\" % (block_size, size) if", "values */\" % len(digitsets), end='') count = 8 for d", "the zeroth # element is never used. script_lists = [0]", "PCRE2_PCRE2TEST\") print() # --- Added by PH: read Scripts.txt again", "# generate the pcre2_ucd.c file that contains a digested form", "PCRE project by <NAME>owski as part of # the upgrading", "% ((i / block_size,) + table[i:i+block_size])) print(\"};\\n\") # Extract the", "the the large main UCD tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\")", "of any list. # # The ucd_digit_sets table contains the", "'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam',", "y in [c, o, t]: for x in s: if", "in [c, o, t]: for x in s: if x", "more than one script listed for its # Script Extension", "their size from 8 to 12 bytes, only 10 of", "the # final hole in the structure. # 30-September-2012: Added", "terminated # by zero (ucp_Unknown). A character with more than", "not be referenced otherwise, so\") print(\"it should not matter whether", "list it here so that the name has the correct", "K and the Kelvin symbol are such a set). #", "for Unicode 5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki',", "in range(0, len(table), block_size): block = table[i:i+block_size] start = blocks.get(block)", "PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const", "record are:\") print(\"script (8 bits), character type (8 bits), grapheme", "% (first, last), file=sys.stderr) while first < last: digitsets.append(first +", "an int is # required and the result of the", "'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian',", "= int(m.group(1),16) last = int(m.group(2),16) if ((last - first +", "print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\"", "of a lookup in ucd_stage1 a \"virtual\" block number. #", "that are caseless sets of\") print(\"more than one character. Each", "in range(0, script_numbers_length): found = True if script_lists[i+j] != script_numbers[j]:", "lists return_value = len(script_lists) script_lists.extend(script_numbers) return -return_value # Read the", "PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print() print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version))", "character database. */\") print(\"/* This file was autogenerated by the", "instead of UnicodeData.txt, which is no longer # used. #", "19-June-2015: Updated for Unicode 8.0.0 # 02-July-2017: Updated for Unicode", "found in the file where all the # others are", "as an additional grapheme break property, because the default for", "This new # code scans CaseFolding.txt instead of UnicodeData.txt, which", "caseless matching sets. # Combine the tables table, records =", "for d in script_lists: print(\" %3d,\" % d, end='') count", "chardata[1] == 'C' or chardata[1] == 'S': return int(chardata[2], 16)", "MultiStage2.py script. */\") print(\"/* Total size: %d bytes, block size:", "block_size print(\"/* This module is generated by the maint/MultiStage2.py script.\")", "site; GraphemeBreakProperty.txt is # in the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt,", "'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic', 'Cypriot',", "# 34 = ucp_Latin => No special Script Extension property", "= min(table) maxval = max(table) for num, (minlimit, maxlimit) in", "stage1 table yields 55 # lookup 80 (0x50) in table", "sets of more than two characters that must match #", "1) % 10) != 0: print(\"ERROR: %04x..%04x does not contain", "this module is #included\") print(\"by the pcre2test program, which redefines", "0 => Not part of a caseless set # -32", "= 0 for s in sets: found = 0 for", "Total size: %d bytes\" % min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1')", "ucp_Unknown, /* script extension */\") print(\" 0, /* dummy filler", "16) for i in range(char, last + 1): if break_props[i]", "(1,)], 1 ), \\ ( [(300,), (600,), (600,), (100,)], 2", "be different):\\n\\ntypedef struct {\\n' for i in range(len(records[0])): record_slice =", "digitsets: if count == 8: print(\"\\n \", end='') count =", "the command # # [python3] ./MultiStage2.py >../src/pcre2_ucd.c # # It", "= [] # Stage 2 table contains the blocks with", "# The script has now been upgraded to Python 3", "happens automatically with UTF support.\") print(\"This module should not be", "start stage1.append(start) return stage1, stage2 # Print a table def", "% block_size print(s + \" */\") table = tuple(table) if", "was autogenerated by the MultiStage2.py script. */\") print(\"/* Total size:", "m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16) if m.group(3)", "=> No other case # -101 => Script Extension list", "encoding='utf-8') f = re.match(version_pat, file.readline()) version = f.group(1) if unicode_version", "New for Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut',", "to a 2-stage lookup process. # # This script constructs", "# vector. # # The ucd_records table contains one instance", "PH in September 2012. I am not a Python #", "elif unicode_version != version: print(\"WARNING: Unicode version differs in %s\",", "by zero (ucp_Unknown). A character with more than one script", "/* dummy filler */\") print(\" }};\") print(\"#endif\") print() print(record_struct) #", "2 ), \\ ( [(25, 3), (6, 6), (34, 6),", "1: continue if chardata[1] != \"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$',", "file_name, file=sys.stderr) table = [default_value] * MAX_UNICODE for line in", "comes from the emoji-data.txt # file, but we list it", "property field to records. # # Added code to search", "table[i:i+block_size])) print(\"};\\n\") # Extract the unique combinations of properties into", "the Unicode version def read_table(file_name, get_value, default_value): global unicode_version f", "el = block_size fmt = \"%3d,\" * el + \"\\n\"", "structure def test_record_size(): tests = [ \\ ( [(3,), (6,),", "print(\"#endif /* PCRE2_PCRE2TEST */\") print() print(\"/* Unicode character database. */\")", "in pcre2test, we don't need the table of digit\") print(\"sets,", "GraphemeBreakProperty.txt is # in the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and", "this script are used by macros defined in # pcre2_internal.h.", "the record to hold the value. Luckily, the # structure", "to an existing set if found: found = 0 for", "float # # Added code to scan the emoji-data.txt file", "It comes from the emoji-data.txt # file, but we list", "signed), and a dummy\") print(\"16-bit field to make the whole", "# are different because they are part of a multi-character", "= 256 # This block of code was added by", "for the '9' characters in each\") print(\"set of decimal digits.", "(ucp_Unknown). A character with more than one script listed for", "table in memory, setting/checking the Unicode version def read_table(file_name, get_value,", "these tables is actually\") print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print()", "size < min_size: min_size = size min_stage1, min_stage2, min_stage3 =", "== o or x == t: found = 1 #", "'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi',", "bytes */\" % (block_size, size) if size < min_size: min_size", "from _pcre2_xxx to xxxx, thereby avoiding name clashes\") print(\"with the", "print(\"sets, nor the the large main UCD tables. */\") print()", "by the maint/MultiStage2.py script.\") print(\"Do not modify it by hand.", "= ucp_Hiragana => Hiragana script # 7 = ucp_Lo =>", "5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra',", "file lists pairs, but the common logic for reading data", "Script Extension # value), then scan it and fill in", "print(\"greater than 0x10ffff may be encountered. For these we set", "This code was part of the original contribution, but is", "# # Almost all lowercase latin characters resolve to the", "it # was never used. A two-stage table has sufficed.", "to ensure that digits # in script runs all come", "# The ucd_script_sets vector contains lists of script numbers that", "into the two stages def compress_table(table, block_size): blocks = {}", "= read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props", "never used. script_lists = [0] script_abbrevs_default = script_abbrevs.index('Zzzz') scriptx =", "range(0, MAX_UNICODE): if scriptx[i] == script_abbrevs_default: scriptx[i] = script[i] #", "make the whole thing a multiple of 4 bytes. */\\n\")", "<NAME>owski as part of # the upgrading of Unicode property", "# code point. Negative values are negated offsets in a", "table has sufficed. \"\"\" # Three-stage tables: # Find the", "When #included in pcre2test, we don't need the table of", "min_stage2 = stage1, stage2 min_block_size = block_size print(\"/* This module", "d == 0: print(\"\\n /* %3d */\" % count, end='')", "file_base = f.group(1) version_pat = r\"^# \" + re.escape(file_base) +", "the Unicode.tables directory # Adjusted global table names by prefixing", "grapheme breaking rules were changed for Unicode 11.0.0 (June 2018).", "# CaseFolding.txt are directly in the UCD directory. The emoji-data.txt", "'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po',", "for Unicode 5.0 'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx', #New for", "for s in sets: found = 0 for x in", "for x in s: if x == y: found =", "size for the two-stage table min_size = sys.maxsize for block_size", "by NOTACHAR (0xffffffff), which is larger than # any valid", "number of extensions have been added to the original script.", "table # Get the smallest possible C language type for", "record in records] slice_type, slice_size = get_type_size(record_slice) # add padding:", "%5d bytes */\" % (stage2_block, stage3_block, size) if size <", "Each list is terminated by NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[]", "if unicode_version == \"\": unicode_version = version elif unicode_version !=", "October 2018. Positive values are used for just a single", "each block is indexed by # the offset of a", "), \\ ( [(300, 3), (6, 6), (340, 6), (690,", "item of the next structure in array record_slice = [record[0]", "'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani',", "match # each other caselessly. A new table is output", "regenerate this code.\") print() print(\"As well as being part of", "print(\"script (8 bits), character type (8 bits), grapheme break property", "often come after a line which has already set #", "a list of lists of # multiple scripts. Initialize this", "end='') count += 1 print(\"\\n};\\n\") print(\"/* This vector is a", "in range(5,10)]: size = len(records) * 4 stage1, stage2 =", "of the # Unicode database (UCD) on the Unicode web", "'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher',", "'Shrd', 'Sora', 'Takr', #New for Unicode 7.0.0 'Bass', 'Aghb', 'Dupl',", "ucp_Hiragana => No special Script Extension property # 0 =>", "record 17 is { 34, 5, 12, 0, -32, 34,", "'Hmnp', 'Wcho' ] category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs',", "Added code for multiple caseless sets. This uses the #", "ScriptExtensions.txt, and # CaseFolding.txt are directly in the UCD directory.", "#print \"/* block size %5d => %5d bytes */\" %", "character. However, a real table covering all Unicode # characters", "test in tests: size, struct = get_record_size_struct(test[0]) assert(size == test[1])", "'Wancho' ] script_abbrevs = [ 'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo',", "go through the table and set \"return\" # offsets for", "*/\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] = {\") count = 0 print(\"", "<NAME>, 03 July 2008 # Last Updated: 07 October 2018", "not modify it by hand. Instead modify the script and", "file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line in file: line", "# This block of code was added by PH in", "* mult),))) else: if block_size > ELEMS_PER_LINE: el = ELEMS_PER_LINE", "'So', 'Zl', 'Zp', 'Zs' ] # The Extended_Pictographic property is", "with more than one script listed for its # Script", "points for the '9' characters in each\") print(\"set of decimal", "table[i] == default_value: table[i] = value file.close() return table #", "for the use of PCRE maintainers, to # generate the", "for finding identical blocks stage1 = [] # Stage 1", "is used by PCRE2 as a grapheme breaking property. This", "'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara', #New for", "the optimum block size for the two-stage table min_size =", "offset 101 in the ucd_script_sets vector we find the list", "Future # updates may make change the actual lookup values.", "+ re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r', encoding='utf-8') f", "case letter # 12 = ucp_gbOther => Grapheme break property", "print_table(table, table_name, block_size = None): type, size = get_type_size(table) ELEMS_PER_LINE", "the existing sets to see if any of the three", "Combine the tables table, records = combine_tables(script, category, break_props, caseless_offsets,", "# value because in the CaseFolding file there are lines", "= [ \\ ( [(3,), (6,), (6,), (1,)], 1 ),", "len(records) * record_size stage1, stage2 = compress_table(table, block_size) size +=", "is indexed by a character's block number, # which is", "1), (\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed char\", 1), (\"pcre_int16\", 2),", "Unicode 11.0.0 (July 2018). # # Added code to add", "in block 0 # lookup 0 in stage1 table yields", "existing sets to see if any of the three characters", "32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown, /* script", "'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs = [ 'Zzzz', 'Arab', 'Armn',", "because in the CaseFolding file there are lines # to", "*/') % (record[0] + (i,))) print('};\\n') script_names = ['Unknown', 'Arabic',", "July-2012: Updated list of scripts for Unicode 6.1.0 # 20-August-2012:", "= get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16)", "we list it here so that the name has the", "print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print() print(\"const char *PRIV(unicode_version)", "return table # Get the smallest possible C language type", "= lambda x: x[1]) for i, record in enumerate(records): print(('", "by NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\")", "appended = 1 # If we have not added to", "8: print(\"\\n \", end='') count = 0 print(\" 0x%05x,\" %", "encoding='utf-8') for line in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if", "for Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs =", "size from 8 to 12 bytes, only 10 of which", "table is a table of \"virtual\" blocks; each block is", "times. The script is for the use of PCRE maintainers,", "(\"pcre_int32\", 4)] limits = [(0, 255), (0, 65535), (0, 4294967295),", "fmt = fmt * int(block_size / ELEMS_PER_LINE) for i in", "'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr', #New for Unicode", "'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version = \"\" script", "c in range(MAX_UNICODE): if other_case[c] != 0 and other_case[c +", "round up to the nearest power of slice_size size =", "tone karshana (U+1CD0) is in block 57 (0x39) # lookup", "point back here. We # now have three characters that", "table contains block numbers (indices into stage 2 table) stage2", "characters to an existing set if found: found = 0", "stage2) #print \"/* block size %5d => %5d bytes */\"", "table names by prefixing _pcre_. # Commented out stuff relating", "= [record[0] for record in records] slice_type, slice_size = get_type_size(record_slice)", "print(\" ucp_Unknown, /* script extension */\") print(\" 0, /* dummy", "up to the nearest power of slice_size size = (size", "7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani',", "digested form of the Unicode # data tables. A number", "'Tai_Viet', # New for Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic', #", "'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana',", "about space saving - maybe the guy linked\") print(\"all the", "by # the offset of a character within its own", "names will be different):\\n\\ntypedef struct {\\n' for i in range(len(records[0])):", "Scripts.txt again for the sets of 10 digits. --- digitsets", "first list is empty; this is used for characters #", "is None: i = records[t] = len(records) index.append(i) return index,", "padding_dummy = [0] * MAX_UNICODE padding_dummy[0] = 256 # This", "but is commented out as it # was never used.", "type, grapheme break type, # offset to caseless matching set,", "of # the upgrading of Unicode property support. The new", "[] for c in range(MAX_UNICODE): o = c + other_case[c]", "break if found: return -i # Not found in existing", "in sets: for x in s: caseless_offsets[x] = offset offset", "32767), (-2147483648, 2147483647)] minval = min(table) maxval = max(table) for", "the tables table, records = combine_tables(script, category, break_props, caseless_offsets, other_case,", "division is a float # # Added code to scan", "block_size): blocks = {} # Dictionary for finding identical blocks", "actually\") print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include", "result of the division is a float # # Added", "check the\\n' + \\ 'types in this structure definition from", "6), (6, 340), (1, 690)], 4 ), \\ ( [(300,", "\\ '/* %d bytes, record size %d */' % (len(records)", "print(\" ucp_Cn, /* type unassigned */\") print(\" ucp_gbOther, /* grapheme", "range(1, len(script_lists) - script_numbers_length + 1): for j in range(0,", "project by <NAME>owski as part of # the upgrading of", "# . Processed with 2to3, but that didn't fix everything", "a single script for a # code point. Negative values", "Standard #51 (\"Unicode Emoji\"), # for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt", "2 table) stage2 = [] # Stage 2 table contains", "no longer # used. # # Update for Python3: #", "Unicode 7.0.0 # 12-August-2014: Updated to put Unicode version into", "by PH: read Scripts.txt again for the sets of 10", "to use dummy tables when no UCP support is needed.", "print(\"set of decimal digits. It is used to ensure that", "present # # Almost all lowercase latin characters resolve to", "which are Bengali, Devanagari, Grantha, and Kannada. # # <NAME>,", "1): # It is important not to overwrite a previously", "with a new Unicode version, please check the\\n' + \\", "two statements that needed 2.5 # Consequent code tidy #", "total_size += size * len(table) return total_size # Compress the", "' + \\ '/* %d bytes, record size %d */'", "each other caselessly. A new table is output containing these", "# Example: lowercase \"a\" (U+0061) is in block 0 #", "len(table)) if block_size: s += \", block = %d\" %", "'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', # New for", "file.close() return table # Get the smallest possible C language", "Print a table def print_table(table, table_name, block_size = None): type,", "'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo',", "name changes, and SUPPORT_UCP is abolished. # # Major modifications", "unused at present # # Almost all lowercase latin characters", "type (8 bits), grapheme break property (8 bits),\") print(\"offset to", "the correct index value. break_property_names = ['CR', 'LF', 'Control', 'Extend',", "default_value): global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1)", "12 = ucp_gbOther => Grapheme break property \"Other\" # 0", "the Unicode 11.0.0 database. Future # updates may make change", "table yields 90 # lookup 66 (0x42) in table 90", "0, /* case set */\") print(\" 0, /* other case", "than 255 to make the field 16 bits. padding_dummy =", "padding: round up to the nearest power of slice_size size", "not already set. for c in range(MAX_UNICODE): if other_case[c] !=", "list is empty; this is used for characters # that", "] category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm',", "existing lists return_value = len(script_lists) script_lists.extend(script_numbers) return -return_value # Read", "SUPPORT_UNICODE */\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") # This code", "list. # # The ucd_digit_sets table contains the code points", "\\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") print() print(\"/* Unicode character", "zeroth # element is never used. script_lists = [0] script_abbrevs_default", "directory # Adjusted global table names by prefixing _pcre_. #", "{ ' + \\ '/* %d bytes, record size %d", "+ other_case[o] # Scan the existing sets to see if", "the maint/MultiStage2.py script.\") print(\"Do not modify it by hand. Instead", "subdirectory, using the command # # [python3] ./MultiStage2.py >../src/pcre2_ucd.c #", "to overwrite a previously set # value because in the", "# the offset of a character within its own block,", "print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const", "End of loop looking for caseless sets. # Now scan", "'Tifinagh', 'Ugaritic', 'Yi', # New for Unicode 5.0 'Balinese', 'Cuneiform',", "should be run in # the maint subdirectory, using the", "new table is output containing these sets, and # offsets", "block_size = None): type, size = get_type_size(table) ELEMS_PER_LINE = 16", "scripts for Unicode 5.2.0 # 30-April-2011: Updated list of scripts", "other case, for # every Unicode character. However, a real", "range(0, len(table), block_size): print((\"/* block %d */\\n\" + fmt) %", "so the style is probably dreadful, but it does the", "= tuple(table) if block_size is None: fmt = \"%3d,\" *", "start # Removed tabs # Made it work with Python", "(\"signed char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits = [(0,", "this is used for characters # that are not part", "of which are currently used. # # 01-March-2010: Updated list", "for i in range(char, last + 1): # It is", "- 1) & -slice_size structure += '} ucd_record;\\n*/\\n' return size,", "total_size # Compress the table into the two stages def", "=> Other letter # 12 = ucp_gbOther => Grapheme break", "would be far too big. It can be efficiently compressed", "caseless set # -32 (-0x20) => Other case is U+0041", "the Script value. Parse the # file, setting 'Unknown' as", "'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya',", "at present # # At offset 101 in the ucd_script_sets", "Python 3 # 20-June-2014: Updated for Unicode 7.0.0 # 12-August-2014:", "by this script are used by macros defined in #", "*/\") print(\" ucp_gbOther, /* grapheme break property */\") print(\" 0,", "print(\"more than one character. Each list is terminated by NOTACHAR.", "3, 15, 107, 29, # and terminator 0. This means", "12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs = [ 'Zzzz',", "digits in\") print(\"a script run come from the same set.", "the \"extracted\" subdirectory of the # Unicode database (UCD) on", "'Takri', # New for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan',", "'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi',", "1)], 4 ), \\ ( [(3, 300), (6, 6), (6,", "'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', # New for Unicode", "for s in sets: s = sorted(s) for x in", "len(table), block_size): print((\"/* block %d */\\n\" + fmt) % ((i", "changes, and SUPPORT_UCP is abolished. # # Major modifications made", "values are used for just a single script for a", "so first we go through the table and set \"return\"", "appended = 0 for s in sets: found = 0", "5.0 'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx', #New for Unicode 5.1", "No other case # -101 => Script Extension list offset", "'Maka', 'Medf', 'Sogo', 'Sogd', #New for Unicode 12.0.0 'Elym', 'Nand',", "# Removed tabs # Made it work with Python 2.4", "/* PCRE2_PCRE2TEST */\") print() print(\"/* Unicode character database. */\") print(\"/*", "# example, k, K and the Kelvin symbol are such", "value, so first we go through the table and set", "Other case is U+0041 # 34 = ucp_Latin => No", "list is terminated by NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] =", "print() print(\"#endif /* PCRE2_PCRE2TEST */\") # This code was part", "enumerate(records): print((' {' + '%6d, ' * len(record[0]) + '},", "New for Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs", "offset to other case\") print(\"or zero (32 bits, signed), script", "block_size > ELEMS_PER_LINE: fmt = fmt * int(block_size / ELEMS_PER_LINE)", "= len(records) * record_size stage1, stage2 = compress_table(table, block_size) size", "for y in [c, o, t]: for x in s:", "script_lists: print(\" %3d,\" % d, end='') count += 1 if", "setting/checking the Unicode version def read_table(file_name, get_value, default_value): global unicode_version", "size = get_type_size(table) total_size += size * len(table) return total_size", "'Phag', 'Phnx', #New for Unicode 5.1 'Cari', 'Cham', 'Kali', 'Lepc',", "'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', # New for Unicode 10.0.0", "lists of # multiple scripts. Initialize this list with a", "not much bigger than before. # 18-September-2012: Added code for", "ELEMS_PER_LINE: fmt = fmt * int(block_size / ELEMS_PER_LINE) for i", "Updated for Unicode 11.0.0 # 07-July-2018: Added code to scan", "part of a multi-character caseless set (for # example, k,", "needed only when UCP support is built,\") print(\"and in PCRE2", "each other caselessly. Each list is # in order, and", "break property */\") print(\" 0, /* case set */\") print(\"", "add padding: round up to the nearest power of slice_size", "stage2 = [] # Stage 2 table contains the blocks", "print() print(\"/* The tables herein are needed only when UCP", "get_type_size(record_slice) size = (size + slice_size - 1) & -slice_size", "'r', encoding='utf-8') f = re.match(version_pat, file.readline()) version = f.group(1) if", "file is # in files associated with Unicode Technical Standard", "of PCRE maintainers, to # generate the pcre2_ucd.c file that", "table 55 in stage2 yields 458 # record 458 is", "Unicode.tables directory # Adjusted global table names by prefixing _pcre_.", "= True if script_lists[i+j] != script_numbers[j]: found = False break", "'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb',", "458 # record 458 is { 28, 12, 3, 0,", "the code points of the '9' characters in # each", "bytes (multiple of 4). Set a value # greater than", "programmer, so the style is probably dreadful, but it does", "yields 55 # lookup 80 (0x50) in table 55 in", "'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe',", "# 5 = ucp_Ll => Lower case letter # 12", "hold the value. Luckily, the # structure had a hole", "= c + other_case[c] # Trigger when this character's other", "= max(table) for num, (minlimit, maxlimit) in enumerate(limits): if minlimit", "Adjusted global table names by prefixing _pcre_. # Commented out", "index, records def get_record_size_struct(records): size = 0 structure = '/*", "struct = get_record_size_struct(test[0]) assert(size == test[1]) #print struct def print_records(records,", "set. If so, unite the existing set with the new", "while first < last: digitsets.append(first + 9) first += 10", "are not already set. for c in range(MAX_UNICODE): if other_case[c]", "'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So',", "'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', # New", "new Script Extensions field, we need some padding # to", "which often come after a line which has already set", "This # can be set as an additional grapheme break", "128 is the size # of each block. The result", "and terminator 0. This means that this character is expected", "are added to the main output records. This new #", "= 0xffffffff # Parse a line of Scripts.txt, GraphemeBreakProperty.txt or", "Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', # New", "'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma',", "in\") print(\"a script run come from the same set. */\\n\")", "property (8 bits),\") print(\"offset to multichar other cases or zero", "'Phli', 'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana',", "emoji-data.txt file to find the Extended Pictographic # property, which", "the values def get_type_size(table): type_size = [(\"uint8_t\", 1), (\"uint16_t\", 2),", "be referenced otherwise, so\") print(\"it should not matter whether it", "the table of caseless character sets --- print(\"/* This table", "with # any of those scripts, which are Bengali, Devanagari,", "'SignWriting', # New for Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa',", "print(\"\\n \", end='') count = 0 print(\" 0x%05x,\" % d,", "(taking 128 characters in a block) have the same set", "'Geor', 'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr',", "# for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- #", "script are used by macros defined in # pcre2_internal.h. They", "MAX_UNICODE / len(table) for i in range(0, len(table), ELEMS_PER_LINE): print(fmt", "# Parse a line of CaseFolding.txt def get_other_case(chardata): if chardata[1]", "mult),))) else: if block_size > ELEMS_PER_LINE: el = ELEMS_PER_LINE else:", "print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST", "# The ucd_digit_sets table contains the code points of the", "lookup 57 in stage1 table yields 55 # lookup 80", "to scan emoji-data.txt for the Extended # Pictographic property. #", "int(m.group(1), 16) if m.group(3) is None: last = char else:", "in file: line = re.sub(r'#.*', '', line) chardata = list(map(str.strip,", "None: i = records[t] = len(records) index.append(i) return index, records", "'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana',", "'Lana', 'Tavt', #New for Unicode 6.0.0 'Batk', 'Brah', 'Mand', #New", "size %d */' % (len(records) * record_size, record_size)) records =", "sets. # Now scan the sets and set appropriate offsets", "library, this module is #included\") print(\"by the pcre2test program, which", "min_stage3 = stage1, stage2, stage3 min_stage2_block, min_stage3_block = stage2_block, stage3_block", "file.close() # The Script Extensions property default value is the", "found in existing lists return_value = len(script_lists) script_lists.extend(script_numbers) return -return_value", "the PCRE project by <NAME>owski as part of # the", "(table[i:i+ELEMS_PER_LINE] + (int(i * mult),))) else: if block_size > ELEMS_PER_LINE:", "fmt) % ((i / block_size,) + table[i:i+block_size])) print(\"};\\n\") # Extract", "else: if block_size > ELEMS_PER_LINE: el = ELEMS_PER_LINE else: el", "'Multani', 'Old_Hungarian', 'SignWriting', # New for Unicode 10.0.0 'Adlam', 'Bhaiksuki',", "# each set of 10 decimal digits in Unicode. This", "'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic',", "27, 7, 12, 0, 0, 27, 0 } # 27", "didn't fix everything # . Changed string.strip to str.strip #", "module should not be referenced otherwise, so\") print(\"it should not", "original contribution, but is commented out as it # was", "'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau',", "break_property_names.index('Extended_Pictographic') file.close() # The Script Extensions property default value is", "Python 2.4 by rewriting two statements that needed 2.5 #", "'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai',", "block = %d\" % block_size print(s + \" */\") table", "Minor modifications made to this script: # Added #! line", "'/* When recompiling tables with a new Unicode version, please", "when updating to Unicode 11.0.0 (July 2018). # # Added", "= { /* %d bytes\" % (type, table_name, size *", "of the '9' characters in # each set of 10", "[(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ),", "% (stage2_block, stage3_block, size) if size < min_size: min_size =", "before. # 18-September-2012: Added code for multiple caseless sets. This", "= version elif unicode_version != version: print(\"WARNING: Unicode version differs", "=> Lower case letter # 12 = ucp_gbOther => Grapheme", "are negated offsets in a list of lists of #", "unused at present # # At offset 101 in the", "'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New for Unicode", "a new block start = len(stage2) / block_size stage2 +=", "Extended # Pictographic property. # 01-October-2018: Added the 'Unknown' script", "'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa', 'Thai',", "add a grapheme break property field to records. # #", "in the ucd_script_sets # vector. # # The ucd_records table", "this structure definition from pcre2_internal.h (the actual\\n' + \\ 'field", "stage2 += block blocks[block] = start stage1.append(start) return stage1, stage2", "table[i] = value file.close() return table # Get the smallest", "03-October-2018: Added new field for Script Extensions # 27-July-2019: Updated", "'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym',", "255 to make the field 16 bits. padding_dummy = [0]", "# Minor modifications made to this script: # Added #!", "is needed. # Update for PCRE2: name changes, and SUPPORT_UCP", "+ 1): for j in range(0, script_numbers_length): found = True", "an existing set, create a new one. if not appended:", "(this will never be a Script Extension # value), then", "end='') for d in script_lists: print(\" %3d,\" % d, end='')", "1)], 2 ), \\ ( [(300, 3), (6, 6), (340,", "read_table(file_name, get_value, default_value): global unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base", "{\") print(\" %d, /* Number of subsequent values */\" %", "for the Script Extension\") print(\"property. Each sublist is zero-terminated. */\\n\")", "up property # matching many times. The script is for", "property default value is the Script value. Parse the #", "uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print() print(\"const char *PRIV(unicode_version) =", "same record, and many blocks of # characters (taking 128", "increased # their size from 8 to 12 bytes, only", "ensure that digits # in script runs all come from", "= open(file_name, 'r', encoding='utf-8') f = re.match(version_pat, file.readline()) version =", "'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic',", "'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo', 'Zanabazar_Square', # New for Unicode 11.0.0", "blocks of # characters (taking 128 characters in a block)", "to the main output records. This new # code scans", "'Sora', 'Takr', #New for Unicode 7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba',", "256 # This block of code was added by PH", "of these tables is actually\") print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\")", "terminated by NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\"", "+= size * len(table) return total_size # Compress the table", "the file where all the # others are (GraphemeBreakProperty.txt). It", "the three characters are already # part of a set.", "# property, which is used by PCRE2 as a grapheme", "into stage 2 table) stage2 = [] # Stage 2", "Unicode 11.0.0 (June 2018). Now # we need to find", "get_type_size(table) total_size += size * len(table) return total_size # Compress", "340), (1, 690)], 4 ), \\ ( [(3, 100000), (6,", "should not be referenced otherwise, so\") print(\"it should not matter", "# Now scan the sets and set appropriate offsets for", "yields 564 # record 564 is { 27, 7, 12,", "# New for Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi',", "this_script_list = list(chardata[1].split(' ')) if len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0])", "PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown,", "set. The first element in the vector # contains the", "(min_size, min_block_size)) print() print(\"/* The tables herein are needed only", "property */\") print(\" 0, /* case set */\") print(\" 0,", "= {\") print(\" NOTACHAR,\") for s in sets: s =", "# ----------------------------------------------------------------------------- # Minor modifications made to this script: #", "'Brah', 'Mand', #New for Unicode 6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd',", "stage2 yields 564 # record 564 is { 27, 7,", "# # ----------------------------------------------------------------------------- # Minor modifications made to this script:", "combinations of properties into records def combine_tables(*tables): records = {}", "in range(len(records[0])): record_slice = [record[i] for record in records] slice_type,", "characters that must all # match each other caselessly. Later", "characters would be far too big. It can be efficiently", "property_%d;\\n' % (slice_type, i) # round up to the first", "New for Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic', # New for", "def print_table(table, table_name, block_size = None): type, size = get_type_size(table)", "# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0 #", "=> Other case is U+0041 # 34 = ucp_Latin =>", "# 0 => Dummy value, unused at present # #", "/* SUPPORT_UNICODE */\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") # This", "maxval <= maxlimit: return type_size[num] else: raise OverflowError(\"Too large to", "script has now been upgraded to Python 3 for PCRE2,", "in the # maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt is found", "to the first item of the next structure in array", "<= 1: continue if chardata[1] != \"Extended_Pictographic\": continue m =", "% (len(records) * record_size, record_size)) records = list(zip(list(records.keys()), list(records.values()))) records.sort(key", "that.\") print(\"Instead, just supply some small dummy tables. */\") print()", "other case does not point back here. We # now", "list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue if chardata[1] !=", "decimal digits. It is used to ensure that all the", "# every Unicode character. However, a real table covering all", "x, end=' ') print(' NOTACHAR,') print('};') print() # ------ print(\"/*", "(for # example, k, K and the Kelvin symbol are", "builder # (c) <NAME>, 2008 ############################################################################## # This script was", "'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy',", "2008 # Last Updated: 07 October 2018 ############################################################################## import re", "these sets is # written out. However, we have to", "script */\") print(\" ucp_Cn, /* type unassigned */\") print(\" ucp_gbOther,", "to ensure that all the digits in\") print(\"a script run", "They look up Unicode character properties using short # sequences", "all the emojis is \"other\". We scan the emoji-data.txt file", "ucd_record PRIV(dummy_ucd_record)[] = {{\") print(\" ucp_Unknown, /* script */\") print(\"", "'Takr', #New for Unicode 7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran',", "caseless set (for # example, k, K and the Kelvin", "default value of 0) # which often come after a", "the Extended # Pictographic property. # 01-October-2018: Added the 'Unknown'", "and added a new # field in the record to", "k, K and the Kelvin symbol are such a set).", "return lambda chardata: enum.index(chardata[1]) # Parse a line of CaseFolding.txt", "Multistage table builder # (c) <NAME>, 2008 ############################################################################## # This", "Update for PCRE2: name changes, and SUPPORT_UCP is abolished. #", "#New for Unicode 12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho' ] category_names", "file that contains a digested form of the Unicode #", "(c) <NAME>, 2008 ############################################################################## # This script was submitted to", "for Unicode 7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind',", "'Zs' ] # The Extended_Pictographic property is not found in", "the first table in stage2 yields 17 # record 17", "than before. # 18-September-2012: Added code for multiple caseless sets.", "if len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0]) script_numbers = [] for", "4)] limits = [(0, 255), (0, 65535), (0, 4294967295), (-128,", "Unicode 5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang',", "ucd_record), containing a # script number, script extension value, character", "struct def print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[] = { '", "will never be a Script Extension # value), then scan", "dummy\") print(\"16-bit field to make the whole thing a multiple", "scripts. Initialize this list with a single entry, as the", "(returning the default value of 0) # which often come", "no UCP support is needed. # Update for PCRE2: name", "table is # not much bigger than before. # 18-September-2012:", "ucp_Latin => No special Script Extension property # 0 =>", "line of ScriptExtensions.txt def get_script_extension(chardata): this_script_list = list(chardata[1].split(' ')) if", "sys.maxint for stage3_block in [2 ** i for i in", "get_script_extension, script_abbrevs_default) for i in range(0, MAX_UNICODE): if scriptx[i] ==", "contains the blocks with property values table = tuple(table) for", "[ 'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans',", "to the nearest power of slice_size size = (size +", "x[1]) for i, record in enumerate(records): print((' {' + '%6d,", "'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii', #New for", "it by hand. Instead modify the script and run it\")", "a set. If so, unite the existing set with the", "in the structure. # 30-September-2012: Added RegionalIndicator break property from", "or x == o or x == t: found =", "or DerivedGeneralCategory.txt def make_get_names(enum): return lambda chardata: enum.index(chardata[1]) # Parse", "in stage1 table yields 90 # lookup 66 (0x42) in", "'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt', #New for Unicode", "2), (\"uint32_t\", 4), (\"signed char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)]", "'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb', #New for Unicode 11.0.0 'Dogr',", "the job. It scans # the other_case table to find", "s: if x == c or x == o or", "case set */\") print(\" 0, /* other case */\") print(\"", "Add new characters to an existing set if found: found", "many blocks of # characters (taking 128 characters in a", "rather than using a library - so we include a\")", "print(\"/* This vector is a list of lists of scripts", "the existing set with the new set. appended = 0", "with the new set. appended = 0 for s in", "['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',", "12 bytes, only 10 of which are currently used. #", "0 in stage1 table yields 0 # lookup 97 (0x61)", "-other_case[c]: t = o + other_case[o] # Scan the existing", "to other case\") print(\"or zero (32 bits, signed), script extension", "subdirectory of the # Unicode database (UCD) on the Unicode", "1) & -slice_size structure += '} ucd_record;\\n*/\\n' return size, structure", "field to records. This has increased # their size from", "# # # The main tables generated by this script", "read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case =", "=> Grapheme break property \"Other\" # 0 => Not part", "found = 1 if not found: s.append(y) appended = 1", "print \"/* Total size: %d bytes\" % min_size */ print_records(records)", "(100,)], 2 ), \\ ( [(25, 3), (6, 6), (34,", "continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16) if", "any of those scripts, which are Bengali, Devanagari, Grantha, and", "in ascending order. # # The ucd_script_sets vector contains lists", "6), (6, 340), (1, 690)], 4 ), \\ ( [(3,", "scripts for the Script Extension\") print(\"property. Each sublist is zero-terminated.", "0, -101, 0 } # 28 = ucp_Inherited => Script", "is abolished. # # Major modifications made to this script:", "'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New for", "ucd_records vector. # # The following examples are correct for", "script_numbers = [] for d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length", "'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc',", "for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', #", "are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt # file, but", "16) if m.group(3) is None: last = char else: last", "= ucp_Inherited => Script inherited from predecessor # 12 =", "/ block_size,) + table[i:i+block_size])) print(\"};\\n\") # Extract the unique combinations", "%d\" % min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\")", "these sets, and # offsets into the table are added", "don't leave\") print(\"a totally empty module because some compilers barf", "the start of the relevant list in the ucd_script_sets #", "maxlimit) in enumerate(limits): if minlimit <= minval and maxval <=", "%d */' % (len(records) * record_size, record_size)) records = list(zip(list(records.keys()),", "whole thing a multiple of 4 bytes. */\\n\") print_records(records, record_size)", "is used to ensure that all the digits in\") print(\"a", "for Unicode 5.2.0 # 30-April-2011: Updated list of scripts for", "\", block = %d\" % block_size print(s + \" */\")", "a list of lists of scripts for the Script Extension\")", "# 03-June-2014: Updated for Python 3 # 20-June-2014: Updated for", "block start = len(stage2) / block_size stage2 += block blocks[block]", "size, structure def test_record_size(): tests = [ \\ ( [(3,),", "'Vai', # New for Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic',", "table of these sets is # written out. However, we", "# This script constructs six tables. The ucd_caseless_sets table contains", "here so that the name has the correct index value.", "of lists of scripts for the Script Extension\") print(\"property. Each", "the open() call # . Inserted 'int' before blocksize/ELEMS_PER_LINE because", "search for sets of more than two characters that must", "may make change the actual lookup values. # # Example:", "case # -101 => Script Extension list offset = 101", "in range(char, last + 1): # It is important not", "to xxxx, thereby avoiding name clashes\") print(\"with the library. At", "%d, /* Number of subsequent values */\" % len(digitsets), end='')", "two-stage table min_size = sys.maxsize for block_size in [2 **", "'9' characters in each\") print(\"set of decimal digits. It is", "of the division is a float # # Added code", "unused at present # # Example: vedic tone karshana (U+1CD0)", "emojis is \"other\". We scan the emoji-data.txt file and modify", "scripts, which are Bengali, Devanagari, Grantha, and Kannada. # #", "block size %5d => %5d bytes */\" % (block_size, size)", "'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan',", "None: fmt = \"%3d,\" * ELEMS_PER_LINE + \" /* U+%04X", "NOTACHAR,') print('};') print() # ------ print(\"/* When #included in pcre2test,", "% (slice_type, i) # round up to the first item", "'Gonm', 'Nshu', 'Soyo', 'Zanb', #New for Unicode 11.0.0 'Dogr', 'Gong',", "'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version =", "Devanagari, Grantha, and Kannada. # # <NAME>, 03 July 2008", "'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]", "Hiragana script # 7 = ucp_Lo => Other letter #", "16) return 0 # Parse a line of ScriptExtensions.txt def", "memory, setting/checking the Unicode version def read_table(file_name, get_value, default_value): global", "end=' ') print(' NOTACHAR,') print('};') print() # ------ print(\"/* When", "match each other caselessly. Each list is # in order,", "speeds up property # matching many times. The script is", "and create equivalence sets. sets = [] for c in", "RegionalIndicator break property from Unicode 6.2.0 # 13-May-2014: Updated for", "clashes\") print(\"with the library. At present, just one of these", "'r', encoding='utf-8') for line in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line)", "'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui',", "print(' NOTACHAR,') print('};') print() # ------ print(\"/* When #included in", "sets: found = 0 for x in s: if x", "size * len(table)) if block_size: s += \", block =", "caseless_offsets[x] = offset offset += len(s) + 1 # End", "tables when not needed. But don't leave\") print(\"a totally empty", "] script_abbrevs = [ 'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai',", "open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line in file: line = re.sub(r'#.*',", "offset = 1; for s in sets: for x in", "for the sets of 10 digits. --- digitsets = []", "to find sets of more than two characters that must", "= [] for d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length =", "job. It scans # the other_case table to find sets", "in enumerate(limits): if minlimit <= minval and maxval <= maxlimit:", "Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs = [", "zip(*tables): i = records.get(t) if i is None: i =", "added by PH # in October 2018. Positive values are", "the emoji-data.txt file and modify the # break-props table. file", "emoji-data.txt file and modify the # break-props table. file =", "a line of ScriptExtensions.txt def get_script_extension(chardata): this_script_list = list(chardata[1].split(' '))", "(0x39) # lookup 57 in stage1 table yields 55 #", "NOTACHAR (0xffffffff), which is larger than # any valid character.", "Corrected size calculation # Add #ifndef SUPPORT_UCP to use dummy", "last: digitsets.append(first + 9) first += 10 file.close() digitsets.sort() print(\"/*", "minval and maxval <= maxlimit: return type_size[num] else: raise OverflowError(\"Too", "'Sgnw', #New for Unicode 10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge',", "= {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print() print(\"const", "for x in s: print(' 0x%04x,' % x, end=' ')", "'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp',", "0 print(\" /* 0 */\", end='') for d in script_lists:", "uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\") for s in sets:", "make_get_names(enum): return lambda chardata: enum.index(chardata[1]) # Parse a line of", "scripts for Unicode 6.0.0 # July-2012: Updated list of scripts", "properties using short # sequences of code that contains no", "in this structure definition from pcre2_internal.h (the actual\\n' + \\", "of scripts for Unicode 6.1.0 # 20-August-2012: Added scan of", "but that didn't fix everything # . Changed string.strip to", "'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', # New for", "table = tuple(table) for i in range(0, len(table), block_size): block", "single entry, as the zeroth # element is never used.", "# # http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- # Minor modifications made", "its # Script Extension property has a negative value in", "stages def compress_table(table, block_size): blocks = {} # Dictionary for", "stage3_block) for stage2_block in [2 ** i for i in", "for i in range(len(records[0])): record_slice = [record[i] for record in", "( [(300, 3), (6, 6), (340, 6), (690, 1)], 4", "+ table[i:i+block_size])) print(\"};\\n\") # Extract the unique combinations of properties", "% (block_size, size) if size < min_size: min_size = size", "#! /usr/bin/python # Multistage table builder # (c) <NAME>, 2008", "maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt is found in the \"extracted\"", "= ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt',", "the Kelvin symbol are such a set). # # Example:", "setting 'Unknown' as the default (this will never be a", "was part of the original contribution, but is commented out", "len(table) for i in range(0, len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE]", "in range(2,6)]: stage_i, stage3 = compress_table(table, stage3_block) for stage2_block in", "Pictographic property. # 01-October-2018: Added the 'Unknown' script name #", "+= len(s) + 1 # End of block of code", "print(\"all the modules rather than using a library - so", "6.0.0 # July-2012: Updated list of scripts for Unicode 6.1.0", "'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat',", "is # not much bigger than before. # 18-September-2012: Added", "records def get_record_size_struct(records): size = 0 structure = '/* When", "# updates may make change the actual lookup values. #", "= \"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt',", "in s: if x == c or x == o", "6), (6, 123456), (1, 690)], 8 ), \\ ( [(100000,", "#included\") print(\"by the pcre2test program, which redefines the PRIV macro", "linked\") print(\"all the modules rather than using a library -", "a set). # # Example: hiragana letter A (U+3042) is", "the casefolding table, which isn't used; # removed completely in", "Python 3 for PCRE2, and should be run in #", "'Nko', 'Phags_Pa', 'Phoenician', # New for Unicode 5.1 'Carian', 'Cham',", "el + \"\\n\" if block_size > ELEMS_PER_LINE: fmt = fmt", "ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt. These must be in the", "list(chardata[1].split(' ')) if len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0]) script_numbers =", "#New for Unicode 6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora',", "print(\"This module should not be referenced otherwise, so\") print(\"it should", "'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb',", "Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt are directly in the UCD", "in range(1, len(script_lists) - script_numbers_length + 1): for j in", "'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common', 'Coptic',", "4 ), \\ ( [(3, 100000), (6, 6), (6, 123456),", "case */\") print(\" ucp_Unknown, /* script extension */\") print(\" 0,", "bits), grapheme break property (8 bits),\") print(\"offset to multichar other", "# It is important not to overwrite a previously set", "\\ ( [(300, 300), (6, 6), (6, 340), (1, 690)],", "from the emoji-data.txt # file, but we list it here", "caseless_offsets = [0] * MAX_UNICODE offset = 1; for s", "sets of more than two characters that must all #", "== 0: other_case[c + other_case[c]] = -other_case[c] # Now scan", "However\") print(\"a comment was received about space saving - maybe", "12 = ucp_Mn => Non-spacing mark # 3 = ucp_gbExtend", "len(records) * 4 stage1, stage2 = compress_table(stage_i, stage2_block) size +=", "character's other case, for # every Unicode character. However, a", "ucd_digit_sets table contains the code points of the '9' characters", "# others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt #", "script_abbrevs.index(this_script_list[0]) script_numbers = [] for d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0)", "character's other case does not point back here. We #", "a multiple of 10 characters\" % (first, last), file=sys.stderr) while", "present, just one of these tables is actually\") print(\"needed. */\")", "a multiple of 4 bytes. */\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)')", "# was never used. A two-stage table has sufficed. \"\"\"", "%d bytes\" % min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2',", "% min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2', min_stage2_block) print_table(min_stage3,", "the PRIV macro to change\") print(\"table names from _pcre2_xxx to", "script. # # The script has now been upgraded to", "the vector # contains the number of subsequent elements, which", "characters that must match # each other caselessly. A new", "of records (of type ucd_record), containing a # script number,", "= ucp_Hiragana => No special Script Extension property # 0", "bytes\" % min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2', min_stage2_block)", "relevant list in the ucd_script_sets # vector. # # The", "print() # ------ print(\"/* When #included in pcre2test, we don't", "# Three-stage tables: # Find the optimum block size for", "block_size: s += \", block = %d\" % block_size print(s", "main UCD tables. print(\"/* These are the main two-stage UCD", "other_case[c] # Trigger when this character's other case does not", "print() print(\"As well as being part of the PCRE2 library,", "def compress_table(table, block_size): blocks = {} # Dictionary for finding", "*/\" % (min_size, min_block_size)) print() print(\"/* The tables herein are", "= 8 for d in digitsets: if count == 8:", "which isn't used; # removed completely in 2012. # Corrected", "a value # greater than 255 to make the field", "a 2-stage lookup process. # # This script constructs six", "bits),\") print(\"offset to multichar other cases or zero (8 bits),", "code points of the '9' characters in # each set", "Script Extension property has a negative value in its record.", "expected to be used with # any of those scripts,", "autogenerated by the MultiStage2.py script. */\") print(\"/* Total size: %d", "This script was submitted to the PCRE project by <NAME>owski", "\"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt are directly in", "only one value, so first we go through the table", "uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\")", "scan the emoji-data.txt file to find the Extended Pictographic #", "library. At present, just one of these tables is actually\")", "are such a set). # # Example: hiragana letter A", "but the common logic for reading data # sets only", "the table and set \"return\" # offsets for those that", "= o + other_case[o] # Scan the existing sets to", "is in block 96 (0x60) # lookup 96 in stage1", "564 is { 27, 7, 12, 0, 0, 27, 0", "the table are added to the main output records. This", "# not much bigger than before. # 18-September-2012: Added code", "for i in range(1, len(script_lists) - script_numbers_length + 1): for", "again for the sets of 10 digits. --- digitsets =", "means that this character is expected to be used with", "case is U+0041 # 34 = ucp_Latin => No special", "\\ ( [(3, 300), (6, 6), (6, 340), (1, 690)],", "in sets: found = 0 for x in s: if", "print() print(\"#ifndef PCRE2_PCRE2TEST\") print() # --- Added by PH: read", "be in the # maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt is", "101 in the ucd_script_sets vector we find the list 3,", "Scan the existing sets to see if any of the", "are correct for the Unicode 11.0.0 database. Future # updates", "i in range(5,10)]: size = len(records) * 4 stage1, stage2", "# Dictionary for finding identical blocks stage1 = [] #", "'Yiii', #New for Unicode 5.0 'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx',", "the result of the division is a float # #", "# script number, script extension value, character type, grapheme break", "\"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1), 16)", "statements that needed 2.5 # Consequent code tidy # Adjusted", "list of lists of scripts for the Script Extension\") print(\"property.", "are used by macros defined in # pcre2_internal.h. They look", "scriptx, padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys())) # Find the optimum", "they are part of a multi-character caseless set (for #", "lambda x: x[1]) for i, record in enumerate(records): print((' {'", "'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac',", "'Extended_Pictographic' ] test_record_size() unicode_version = \"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names),", "= 1; for s in sets: for x in s:", "combine_tables(script, category, break_props, caseless_offsets, other_case, scriptx, padding_dummy) record_size, record_struct =", "additional grapheme break property, because the default for # all", "# Corrected size calculation # Add #ifndef SUPPORT_UCP to use", "if not found: s.append(y) appended = 1 # If we", "print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") print() print(\"/* Unicode", "8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw', #New for Unicode", "(indices into stage 2 table) stage2 = [] # Stage", "database. */\") print(\"/* This file was autogenerated by the MultiStage2.py", "for Unicode 6.0.0 'Batk', 'Brah', 'Mand', #New for Unicode 6.1.0", "!= -other_case[c]: t = o + other_case[o] # Scan the", "+ '%6d, ' * len(record[0]) + '}, /* %3d */')", "for Unicode 12.1.0 # ---------------------------------------------------------------------------- # # # The main", "list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue value = get_value(chardata)", "Unicode records up to 12 bytes (multiple of 4). Set", "r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r', encoding='utf-8') f = re.match(version_pat, file.readline())", "= stage1, stage2 min_block_size = block_size print(\"/* This module is", "# file, but we list it here so that the", "'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT',", "character sets --- print(\"/* This table contains lists of characters", "sets, and # offsets into the table are added to", "# At offset 101 in the ucd_script_sets vector we find", "= get_type_size(table) total_size += size * len(table) return total_size #", "run come from the same set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[]", "file names to take from the Unicode.tables directory # Adjusted", "'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd', #New for Unicode", "Non-spacing mark # 3 = ucp_gbExtend => Grapheme break property", "m is None: continue first = int(m.group(1),16) last = int(m.group(2),16)", "Unicode # characters would be far too big. It can", "table into the two stages def compress_table(table, block_size): blocks =", "because some compilers barf at that.\") print(\"Instead, just supply some", "i in range(0, len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i", "table and set \"return\" # offsets for those that are", "currently used. # # 01-March-2010: Updated list of scripts for", "set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\") print(\" %d, /*", "x == o or x == t: found = 1", "module because some compilers barf at that.\") print(\"Instead, just supply", "output records. This new # code scans CaseFolding.txt instead of", "Example: vedic tone karshana (U+1CD0) is in block 57 (0x39)", "caseless matching set, offset to the character's other case, for", "from the Unicode.tables directory # Adjusted global table names by", "library - so we include a\") print(\"condition to cut out", "# Almost all lowercase latin characters resolve to the same", "{{\") print(\" ucp_Unknown, /* script */\") print(\" ucp_Cn, /* type", "element is never used. script_lists = [0] script_abbrevs_default = script_abbrevs.index('Zzzz')", "8 ), \\ ( [(100000, 300), (6, 6), (123456, 6),", "to the same record. One or two # are different", "'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt',", "other_case[o] != -other_case[c]: t = o + other_case[o] # Scan", "'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', # New for Unicode 6.0.0", "raise OverflowError(\"Too large to fit into C types\") def get_tables_size(*tables):", "if size < min_size: min_size = size min_stage1, min_stage2 =", "never used. A two-stage table has sufficed. \"\"\" # Three-stage", "for Unicode 11.0.0 (June 2018). Now # we need to", "SUPPORT_UCP to use dummy tables when no UCP support is", "300), (6, 6), (6, 340), (1, 690)], 4 ), \\", "# # The script has now been upgraded to Python", "data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and", "the style is probably dreadful, but it does the job.", "/* 0 */\", end='') for d in script_lists: print(\" %3d,\"", "Unicode 8.0.0 # 02-July-2017: Updated for Unicode 10.0.0 # 03-July-2018:", "Other letter # 12 = ucp_gbOther => Grapheme break property", "# structure had a hole in it, so the resulting", "i in range(0, len(table), block_size): print((\"/* block %d */\\n\" +", "len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))", "structure had a hole in it, so the resulting table", "s = \"const %s %s[] = { /* %d bytes\"", "own block, and the result is the index # number", "' * len(record[0]) + '}, /* %3d */') % (record[0]", "# data. if table[i] == default_value: table[i] = value file.close()", "The new code speeds up property # matching many times.", "(-2147483648, 2147483647)] minval = min(table) maxval = max(table) for num,", "longer # used. # # Update for Python3: # .", "ELEMS_PER_LINE else: el = block_size fmt = \"%3d,\" * el", "stage1.append(start) return stage1, stage2 # Print a table def print_table(table,", "to find the Extended_Pictographic property for emoji characters. This #", "m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is None: continue first", "records.get(t) if i is None: i = records[t] = len(records)", "PCRE2 as a grapheme breaking property. This was # done", "record. One or two # are different because they are", "here in order to compute the # offsets in the", "# Output the main UCD tables. print(\"/* These are the", "print((\"/* block %d */\\n\" + fmt) % ((i / block_size,)", "This script constructs six tables. The ucd_caseless_sets table contains #", "the main output records. This new # code scans CaseFolding.txt", "which is used by PCRE2 as a grapheme breaking property.", "is a table of records (of type ucd_record), containing a", "# The Extended_Pictographic property is not found in the file", "0 => No other case # -101 => Script Extension", "Added #! line at start # Removed tabs # Made", "by prefixing _pcre_. # Commented out stuff relating to the", "example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- # Minor modifications", "block_size in [2 ** i for i in range(5,10)]: size", "# lookup 0 in stage1 table yields 0 # lookup", "= -other_case[c] # Now scan again and create equivalence sets.", "16) for i in range(char, last + 1): # It", "300), (6, 6), (123456, 6), (1, 690)], 8 ), \\", "for sets of more than two characters that must match", "hole in the structure. # 30-September-2012: Added RegionalIndicator break property", "in [2 ** i for i in range(5,10)]: size =", "# offsets for those that are not already set. for", "(-128, 127), (-32768, 32767), (-2147483648, 2147483647)] minval = min(table) maxval", "not a Python # programmer, so the style is probably", "index = [] for t in zip(*tables): i = records.get(t)", "Dummy value, unused at present # # Almost all lowercase", "-i # Not found in existing lists return_value = len(script_lists)", "'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya',", "inherited from predecessor # 12 = ucp_Mn => Non-spacing mark", "table of digit\") print(\"sets, nor the the large main UCD", "get_tables_size(stage1, stage2, stage3) # print \"/* %5d / %3d =>", "the Script Extension\") print(\"property. Each sublist is zero-terminated. */\\n\") print(\"const", "27-July-2019: Updated for Unicode 12.1.0 # ---------------------------------------------------------------------------- # # #", "print(\" NOTACHAR,\") for s in sets: s = sorted(s) for", "character with more than one script listed for its #", "found = 1 # Add new characters to an existing", "we include a\") print(\"condition to cut out the tables when", "'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic',", "'Phnx', #New for Unicode 5.1 'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci',", "[(3,), (6,), (6,), (1,)], 1 ), \\ ( [(300,), (600,),", "# add padding: round up to the nearest power of", "tables generated by this script are used by macros defined", "34 = ucp_Latin => Latin script # 5 = ucp_Ll", "r\"^# \" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r',", "of 0) # which often come after a line which", "\" /* U+%04X */\" mult = MAX_UNICODE / len(table) for", "'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', # New for Unicode 5.2", "\"Extend\" # 0 => Not part of a caseless set", "'Tai_Tham', 'Tai_Viet', # New for Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic',", "match each other caselessly. Later in this script a table", "== c or x == o or x == t:", "\\ ] for test in tests: size, struct = get_record_size_struct(test[0])", "for i, record in enumerate(records): print((' {' + '%6d, '", "1 # End of block of code for creating offsets", "min_stage1, min_stage2 = stage1, stage2 min_block_size = block_size print(\"/* This", "print(\"#ifndef PCRE2_PCRE2TEST\") print() # --- Added by PH: read Scripts.txt", "int(m.group(2),16) if ((last - first + 1) % 10) !=", "other_case[c + other_case[c]] = -other_case[c] # Now scan again and", "and the result is the index # number of the", "others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt # file,", "# other blocks. This leads to a 2-stage lookup process.", "'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm', 'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara',", "at start # Removed tabs # Made it work with", "1: return script_abbrevs.index(this_script_list[0]) script_numbers = [] for d in this_script_list:", "'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag', 'Goth', 'Grek',", "the Unicode records up to 12 bytes (multiple of 4).", "'Sogd', #New for Unicode 12.0.0 'Elym', 'Nand', 'Hmnp', 'Wcho' ]", "'types in this structure definition from pcre2_internal.h (the actual\\n' +", "property from Unicode 6.2.0 # 13-May-2014: Updated for PCRE2 #", "script is for the use of PCRE maintainers, to #", "{ 34, 5, 12, 0, -32, 34, 0 } #", "Now # we need to find the Extended_Pictographic property for", "constructs six tables. The ucd_caseless_sets table contains # lists of", "that must match # each other caselessly. A new table", "this work here in order to compute the # offsets", "+ 1): if break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has", "set # value because in the CaseFolding file there are", "% (min_size, min_block_size)) print() print(\"/* The tables herein are needed", "contains block numbers (indices into stage 2 table) stage2 =", "to the start of the relevant list in the ucd_script_sets", "- script_numbers_length + 1): for j in range(0, script_numbers_length): found", "'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong',", "script # 7 = ucp_Lo => Other letter # 12", "bits, signed), and a dummy\") print(\"16-bit field to make the", "), \\ ( [(25, 3), (6, 6), (34, 6), (68,", "# -101 => Script Extension list offset = 101 #", "!= version: print(\"WARNING: Unicode version differs in %s\", file_name, file=sys.stderr)", "(690, 1)], 4 ), \\ ( [(3, 300), (6, 6),", "by PH in September 2012. I am not a Python", "records. This has increased # their size from 8 to", "'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene',", "of the PCRE2 library, this module is #included\") print(\"by the", "makes for greater speed. # # Conceptually, there is a", "\"\\n\" if block_size > ELEMS_PER_LINE: fmt = fmt * int(block_size", "script_numbers[j]: found = False break if found: return -i #", "return -i # Not found in existing lists return_value =", "print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\" %", "The ucd_stage2 table is a table of \"virtual\" blocks; each", "return size, structure def test_record_size(): tests = [ \\ (", "the whole table in memory, setting/checking the Unicode version def", "'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt',", "Parse the # file, setting 'Unknown' as the default (this", "CaseFolding.txt instead of UnicodeData.txt, which is no longer # used.", "if x == c or x == o or x", "= ucp_Lo => Other letter # 12 = ucp_gbOther =>", "+ \\ 'field names will be different):\\n\\ntypedef struct {\\n' for", "script_numbers.append(0) script_numbers_length = len(script_numbers) for i in range(1, len(script_lists) -", "encoding='utf-8') for line in file: line = re.sub(r'#.*', '', line)", "are case-equivalent. if other_case[o] != -other_case[c]: t = o +", "size min_stage1, min_stage2 = stage1, stage2 min_block_size = block_size print(\"/*", "numbers that are the # Script Extensions properties of certain", "# part of a set. If so, unite the existing", "(U+1CD0) is in block 57 (0x39) # lookup 57 in", "1) & -slice_size size += slice_size structure += '%s property_%d;\\n'", "a\") print(\"special record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const", "NOTACHAR = 0xffffffff # Parse a line of Scripts.txt, GraphemeBreakProperty.txt", "default value is the Script value. Parse the # file,", "print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\")", "in block 57 (0x39) # lookup 57 in stage1 table", "script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers) for i in range(1, len(script_lists)", "so\") print(\"it should not matter whether it is compiled or", "# Parse a line of ScriptExtensions.txt def get_script_extension(chardata): this_script_list =", "# DerivedGeneralCategory.txt is found in the \"extracted\" subdirectory of the", "= \"%3d,\" * ELEMS_PER_LINE + \" /* U+%04X */\" mult", "in records] slice_type, slice_size = get_type_size(record_slice) # add padding: round", "we don't need the table of digit\") print(\"sets, nor the", "the ucd_script_sets vector we find the list 3, 15, 107,", "'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm',", "out stuff relating to the casefolding table, which isn't used;", "0, 0, -101, 0 } # 28 = ucp_Inherited =>", "tuple(table) for i in range(0, len(table), block_size): block = table[i:i+block_size]", "Unicode. This is used to ensure that digits # in", "was added by PH in September 2012. I am not", "code to scan emoji-data.txt for the Extended # Pictographic property.", "for block_size in [2 ** i for i in range(5,10)]:", "print(\"a script run come from the same set. */\\n\") print(\"const", "# of each block. The result of a lookup in", "to change\") print(\"table names from _pcre2_xxx to xxxx, thereby avoiding", "was never used. A two-stage table has sufficed. \"\"\" #", "print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2, 'ucd_stage2', min_stage2_block) print_table(min_stage3, 'ucd_stage3', min_stage3_block) \"\"\"", "(stage2_block, stage3_block, size) if size < min_size: min_size = size", "range(0, len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i * mult),)))", "here. We # now have three characters that are case-equivalent.", "2-stage lookup process. # # This script constructs six tables.", "break-props table. file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line in", "an existing set if found: found = 0 for y", "PRIV macro to change\") print(\"table names from _pcre2_xxx to xxxx,", "(U+0061) is in block 0 # lookup 0 in stage1", "ELEMS_PER_LINE) for i in range(0, len(table), block_size): print((\"/* block %d", "57 (0x39) # lookup 57 in stage1 table yields 55", "for i in range(0, len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] +", "= ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal',", "combine_tables(*tables): records = {} index = [] for t in", "['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee',", "overwrite a previously set # value because in the CaseFolding", "str.strip # . Added encoding='utf-8' to the open() call #", "macros defined in # pcre2_internal.h. They look up Unicode character", "'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya',", "= int(m.group(2),16) if ((last - first + 1) % 10)", "for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',", "blocksize/ELEMS_PER_LINE because an int is # required and the result", "not contain a multiple of 10 characters\" % (first, last),", "Unicode 11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd', #New", "is the # negated offset to the start of the", "print('};') print() # ------ print(\"/* When #included in pcre2test, we", "table is output containing these sets, and # offsets into", "the code points for the '9' characters in each\") print(\"set", "New for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian',", "we need some padding # to get the Unicode records", "for i in range(5,10)]: size = len(records) * 4 stage1,", "slice_type, slice_size = get_type_size(record_slice) size = (size + slice_size -", "#New for Unicode 5.0 'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx', #New", "properties of certain characters. Each list is terminated # by", "Unicode 12.1.0 # ---------------------------------------------------------------------------- # # # The main tables", "scan emoji-data.txt for the Extended # Pictographic property. # 01-October-2018:", "not to overwrite a previously set # value because in", "int(block_size / ELEMS_PER_LINE) for i in range(0, len(table), block_size): print((\"/*", "some compilers barf at that.\") print(\"Instead, just supply some small", "Unicode database (UCD) on the Unicode web site; GraphemeBreakProperty.txt is", "the original contribution, but is commented out as it #", "16) - int(chardata[0], 16) return 0 # Parse a line", "The ucd_records table contains one instance of every unique record", "built,\") print(\"and in PCRE2 that happens automatically with UTF support.\")", "Unicode 5.2 'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi',", "for i in range(0, len(table), block_size): block = table[i:i+block_size] start", "correct index value. break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',", "'Tirh', 'Wara', #New for Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult',", "that contains no branches, which makes for greater speed. #", "code for multiple caseless sets. This uses the # final", "other_case[c]] = -other_case[c] # Now scan again and create equivalence", "= 0x110000 NOTACHAR = 0xffffffff # Parse a line of", "equivalence sets. sets = [] for c in range(MAX_UNICODE): o", "is { 28, 12, 3, 0, 0, -101, 0 }", "the relevant list in the ucd_script_sets # vector. # #", "\\ ( [(300, 3), (6, 6), (340, 6), (690, 1)],", "indexed by # the offset of a character within its", "print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") print() print(\"/*", "= {} # Dictionary for finding identical blocks stage1 =", "open(file_name, 'r', encoding='utf-8') f = re.match(version_pat, file.readline()) version = f.group(1)", "458 is { 28, 12, 3, 0, 0, -101, 0", "file and modify the # break-props table. file = open('Unicode.tables/emoji-data.txt',", "340), (1, 690)], 4 ), \\ ( [(300, 300), (6,", "does the job. It scans # the other_case table to", "Script Extensions field to records. This has increased # their", "for the characters. caseless_offsets = [0] * MAX_UNICODE offset =", "def get_tables_size(*tables): total_size = 0 for table in tables: type,", "11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd', #New for", "encoding='utf-8' to the open() call # . Inserted 'int' before", "'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh',", "int(m.group(3), 16) for i in range(char, last + 1): if", "property %s, not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic')", "# used. # # Update for Python3: # . Processed", "Dummy value, unused at present # # At offset 101", "'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', # New for Unicode", "with Python 2.4 by rewriting two statements that needed 2.5", "char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the 32-bit library", "# --- Added by PH: output the table of caseless", "'Rejang', 'Saurashtra', 'Sundanese', 'Vai', # New for Unicode 5.2 'Avestan',", "record, and many blocks of # characters (taking 128 characters", "table of records (of type ucd_record), containing a # script", "numbers (indices into stage 2 table) stage2 = [] #", "\"\"\" # Three-stage tables: # Find the optimum block size", "17 # record 17 is { 34, 5, 12, 0,", "= 101 # 0 => Dummy value, unused at present", "'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag', 'Goth', 'Grek', 'Gujr', 'Guru',", "% (type, table_name, size * len(table)) if block_size: s +=", "27 = ucp_Hiragana => No special Script Extension property #", "that didn't fix everything # . Changed string.strip to str.strip", "'T', 'LV', 'LVT', 'Regional_Indicator', 'Other', 'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version", "the large main UCD tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print()", "grapheme break property (8 bits),\") print(\"offset to multichar other cases", "[2 ** i for i in range(2,6)]: stage_i, stage3 =", "(type, table_name, size * len(table)) if block_size: s += \",", "with a single entry, as the zeroth # element is", "a new one. if not appended: sets.append([c, o, t]) #", "i is None: i = records[t] = len(records) index.append(i) return", "sets: for x in s: caseless_offsets[x] = offset offset +=", "where all the # others are (GraphemeBreakProperty.txt). It comes from", "96 in stage1 table yields 90 # lookup 66 (0x42)", "'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm',", "'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New for Unicode 12.0.0 'Elymaic', 'Nandinagari',", "max(table) for num, (minlimit, maxlimit) in enumerate(limits): if minlimit <=", "01-March-2010: Updated list of scripts for Unicode 5.2.0 # 30-April-2011:", "directory. The emoji-data.txt file is # in files associated with", "for Unicode 6.0.0 # July-2012: Updated list of scripts for", "assert(size == test[1]) #print struct def print_records(records, record_size): print('const ucd_record", "character. Each list is terminated by NOTACHAR. */\\n\") print(\"const uint32_t", "Consequent code tidy # Adjusted data file names to take", "# Adjusted global table names by prefixing _pcre_. # Commented", "stage1, stage2 min_block_size = block_size print(\"/* This module is generated", "*/\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() # --- Added by PH:", "is terminated # by zero (ucp_Unknown). A character with more", "*/\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] = {\") print(\" %d, /* Number", "probably dreadful, but it does the job. It scans #", "if size < min_size: min_size = size min_stage1, min_stage2, min_stage3", "in each record are:\") print(\"script (8 bits), character type (8", "using short # sequences of code that contains no branches,", "{0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {0};\") print(\"#else\") print() print(\"const char", "code to add a grapheme break property field to records.", "lists of characters that are caseless sets of\") print(\"more than", "'Batak', 'Brahmi', 'Mandaic', # New for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive',", "print(\"/* This file was autogenerated by the MultiStage2.py script. */\")", "A number of extensions have been added to the original", "# . Added encoding='utf-8' to the open() call # .", "in order to compute the # offsets in the table", "#New for Unicode 7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj',", "part of the PCRE2 library, this module is #included\") print(\"by", "print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print()", "the offset of a character within its own block, and", "s += \", block = %d\" % block_size print(s +", "# offsets in the table that are inserted into the", "record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record PRIV(dummy_ucd_record)[]", "'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor', 'Glag', 'Goth', 'Grek', 'Gujr',", "Read the whole table in memory, setting/checking the Unicode version", "database. Future # updates may make change the actual lookup", "= {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t", "30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0 # 13-May-2014:", "Luckily, the # structure had a hole in it, so", "* len(table) return total_size # Compress the table into the", "d, end='') count += 1 print(\"\\n};\\n\") print(\"/* This vector is", "extension (16 bits, signed), and a dummy\") print(\"16-bit field to", "PCRE2: name changes, and SUPPORT_UCP is abolished. # # Major", "d, end='') count += 1 if d == 0: print(\"\\n", "point divided by 128, since 128 is the size #", "from predecessor # 12 = ucp_Mn => Non-spacing mark #", "4), (\"signed char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits =", "multiple scripts. Initialize this list with a single entry, as", "block size for the two-stage table min_size = sys.maxsize for", "i, record in enumerate(records): print((' {' + '%6d, ' *", "3-stage table min_size = sys.maxint for stage3_block in [2 **", "for x in s: caseless_offsets[x] = offset offset += len(s)", "is terminated by NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\")", "'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi',", "2.4 by rewriting two statements that needed 2.5 # Consequent", "# # This script constructs six tables. The ucd_caseless_sets table", "# Allocate a new block start = len(stage2) / block_size", "'Hani', 'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr',", "set \"return\" # offsets for those that are not already", "(0x60) # lookup 96 in stage1 table yields 90 #", "size = len(records) * 4 stage1, stage2 = compress_table(stage_i, stage2_block)", "property # 0 => Dummy value, unused at present #", "records up to 12 bytes (multiple of 4). Set a", "f = re.match(version_pat, file.readline()) version = f.group(1) if unicode_version ==", "'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', # New for Unicode 7.0.0", "tables: type, size = get_type_size(table) total_size += size * len(table)", "more than two characters that must match # each other", "print(\"special record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\") print(\"const ucd_record", "3), (6, 6), (340, 6), (690, 1)], 4 ), \\", "number of subsequent elements, which are in ascending order. #", "for d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers) for", "in # pcre2_internal.h. They look up Unicode character properties using", "If the 32-bit library is run in non-32-bit mode, character", "for stage3_block in [2 ** i for i in range(2,6)]:", "\\ 'field names will be different):\\n\\ntypedef struct {\\n' for i", "small dummy tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[]", "at present # # Example: vedic tone karshana (U+1CD0) is", "print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size) print(\"#if UCD_BLOCK_SIZE != %d\" % min_block_size) print(\"#error", "/* other case */\") print(\" ucp_Unknown, /* script extension */\")", "# -32 (-0x20) => Other case is U+0041 # 34", "it is compiled or not. However\") print(\"a comment was received", "16 s = \"const %s %s[] = { /* %d", "not needed. But don't leave\") print(\"a totally empty module because", "zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] = {\") count = 0", "script # 5 = ucp_Ll => Lower case letter #", "# # The ucd_stage2 table is a table of \"virtual\"", "tables is actually\") print(\"needed. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() print(\"#ifdef", "'Elym', 'Nand', 'Hmnp', 'Wcho' ] category_names = ['Cc', 'Cf', 'Cn',", "calculation # Add #ifndef SUPPORT_UCP to use dummy tables when", "'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr', 'Hira',", "line) chardata = list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue", "sets. This uses the # final hole in the structure.", "modify the script and run it\") print(\"to regenerate this code.\")", "in pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\") print() print(\"#endif /*", "all the # others are (GraphemeBreakProperty.txt). It comes from the", "when UCP support is built,\") print(\"and in PCRE2 that happens", "is no longer # used. # # Update for Python3:", "the ucd_records vector. # # The following examples are correct", "and SUPPORT_UCP is abolished. # # Major modifications made to", "files associated with Unicode Technical Standard #51 (\"Unicode Emoji\"), #", "so, unite the existing set with the new set. appended", "property support. The new code speeds up property # matching", "so we include a\") print(\"condition to cut out the tables", "'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog',", "larger than # any valid character. The first list is", "3 for PCRE2, and should be run in # the", "C language type for the values def get_type_size(table): type_size =", "(of type ucd_record), containing a # script number, script extension", "= blocks.get(block) if start is None: # Allocate a new", "+ \" */\") table = tuple(table) if block_size is None:", "# lookup 80 (0x50) in table 55 in stage2 yields", "block 57 (0x39) # lookup 57 in stage1 table yields", "fmt * int(block_size / ELEMS_PER_LINE) for i in range(0, len(table),", "'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi',", "the emoji-data.txt # file, but we list it here so", "blocks = {} # Dictionary for finding identical blocks stage1", "compilers barf at that.\") print(\"Instead, just supply some small dummy", "Parse a line of CaseFolding.txt def get_other_case(chardata): if chardata[1] ==", "\\ ( [(100000, 300), (6, 6), (123456, 6), (1, 690)],", "# [python3] ./MultiStage2.py >../src/pcre2_ucd.c # # It requires six Unicode", "Almost all lowercase latin characters resolve to the same record.", "made to this script: # Added #! line at start", "MAX_UNICODE = 0x110000 NOTACHAR = 0xffffffff # Parse a line", "'Tglg', 'Tagb', 'Tale', 'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar',", "print(\"/* This table contains lists of characters that are caseless", "# # It requires six Unicode data tables: DerivedGeneralCategory.txt, #", "in the record to hold the value. Luckily, the #", "a hole in it, so the resulting table is #", "table in stage2 yields 17 # record 17 is {", "= offset offset += len(s) + 1 # End of", "contains lists of characters that are caseless sets of\") print(\"more", "of caseless character sets --- print(\"/* This table contains lists", "range(MAX_UNICODE): if other_case[c] != 0 and other_case[c + other_case[c]] ==", "sets of 10 digits. --- digitsets = [] file =", "= \"const %s %s[] = { /* %d bytes\" %", "records = list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda x: x[1]) for", "= get_type_size(table) ELEMS_PER_LINE = 16 s = \"const %s %s[]", "list of scripts for Unicode 5.2.0 # 30-April-2011: Updated list", "# pcre2_internal.h. They look up Unicode character properties using short", "( [(3,), (6,), (6,), (1,)], 1 ), \\ ( [(300,),", "(0x61) in the first table in stage2 yields 17 #", "array record_slice = [record[0] for record in records] slice_type, slice_size", "open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line in file: m = re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+',", "print(\"it should not matter whether it is compiled or not.", "in table 55 in stage2 yields 458 # record 458", "caselessly. Each list is # in order, and is terminated", "2 ), \\ ( [(300, 3), (6, 6), (340, 6),", "for Python3: # . Processed with 2to3, but that didn't", "letter A (U+3042) is in block 96 (0x60) # lookup", "'Tavt', #New for Unicode 6.0.0 'Batk', 'Brah', 'Mand', #New for", "for multiple caseless sets. This uses the # final hole", "'C' or chardata[1] == 'S': return int(chardata[2], 16) - int(chardata[0],", "Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt. These must be in", "result of a lookup in ucd_stage1 a \"virtual\" block number.", "# sequences of code that contains no branches, which makes", "the smallest possible C language type for the values def", "2018. Positive values are used for just a single script", "== y: found = 1 if not found: s.append(y) appended", "print \"/* %5d / %3d => %5d bytes */\" %", "(1, 690)], 4 ), \\ ( [(300, 300), (6, 6),", "[(3, 300), (6, 6), (6, 340), (1, 690)], 4 ),", "A two-stage table has sufficed. \"\"\" # Three-stage tables: #", "those that are not already set. for c in range(MAX_UNICODE):", "# done when updating to Unicode 11.0.0 (July 2018). #", "multichar other cases or zero (8 bits), offset to other", "in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers) for i in", "Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese', 'Kaithi',", "'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw', 'Sinh',", "# . Changed string.strip to str.strip # . Added encoding='utf-8'", "made to this script: # Added code to add a", "Added by PH: output the table of caseless character sets", "line at start # Removed tabs # Made it work", "is None: last = char else: last = int(m.group(3), 16)", "record_size): print('const ucd_record PRIV(ucd_records)[] = { ' + \\ '/*", "Update for Python3: # . Processed with 2to3, but that", "o, t]) # End of loop looking for caseless sets.", "'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi',", "+ \"\\n\" if block_size > ELEMS_PER_LINE: fmt = fmt *", "of the next structure in array record_slice = [record[0] for", "'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale', 'Taml',", "to the casefolding table, which isn't used; # removed completely", "data # sets only one value, so first we go", "=> Script Extension list offset = 101 # 0 =>", "= read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other')) other_case", "Script Extensions property default value is the Script value. Parse", "*/\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\") for s", "4 bytes. */\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)", "offset to the character's other case, for # every Unicode", "scans # the other_case table to find sets of more", "= ucp_Ll => Lower case letter # 12 = ucp_gbOther", "field 16 bits. padding_dummy = [0] * MAX_UNICODE padding_dummy[0] =", "for PCRE2, and should be run in # the maint", "in ucd_stage1 a \"virtual\" block number. # # The ucd_stage2", "used with # any of those scripts, which are Bengali,", "than one script listed for its # Script Extension property", "characters that are case-equivalent. if other_case[o] != -other_case[c]: t =", "lookup 80 (0x50) in table 55 in stage2 yields 458", "Positive values are used for just a single script for", "of Unicode property support. The new code speeds up property", "the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt are directly", "other_case, scriptx, padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys())) # Find the", "grapheme break property */\") print(\" 0, /* case set */\")", "123456), (1, 690)], 8 ), \\ ( [(100000, 300), (6,", "if block_size: s += \", block = %d\" % block_size", "= [] for c in range(MAX_UNICODE): o = c +", "not point back here. We # now have three characters", "last + 1): # It is important not to overwrite", "{0};\") print(\"const uint16_t PRIV(ucd_stage2)[] = {0};\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] =", "New for Unicode 5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian',", "MAX_UNICODE offset = 1; for s in sets: for x", "End of block of code for creating offsets for caseless", "of every unique record that is # required. The ucd_stage1", "maint/MultiStage2.py script.\") print(\"Do not modify it by hand. Instead modify", "the next structure in array record_slice = [record[0] for record", "def print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[] = { ' +", "so that the name has the correct index value. break_property_names", "used. # # Update for Python3: # . Processed with", "http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- # Minor modifications made to this", "# by zero (ucp_Unknown). A character with more than one", "value. Luckily, the # structure had a hole in it,", "for the Unicode 11.0.0 database. Future # updates may make", "list 3, 15, 107, 29, # and terminator 0. This", "in range(0, len(table), ELEMS_PER_LINE): print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i *", "8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian', 'SignWriting', # New for", "structure. # 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0", "values def get_type_size(table): type_size = [(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\",", "list with a single entry, as the zeroth # element", "records.sort(key = lambda x: x[1]) for i, record in enumerate(records):", "# <NAME>, 03 July 2008 # Last Updated: 07 October", "= len(records) * 4 stage1, stage2 = compress_table(stage_i, stage2_block) size", "every Unicode character. However, a real table covering all Unicode", "to the PCRE project by <NAME>owski as part of #", "'Brahmi', 'Mandaic', # New for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs',", "s in sets: for x in s: caseless_offsets[x] = offset", "The ucd_caseless_sets table contains # lists of characters that all", "number, script extension value, character type, grapheme break type, #", "last + 1): if break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x", "in the \"auxiliary\" subdirectory. Scripts.txt, ScriptExtensions.txt, and # CaseFolding.txt are", "a library - so we include a\") print(\"condition to cut", "sets: s = sorted(s) for x in s: print(' 0x%04x,'", "are not part of any list. # # The ucd_digit_sets", "size = len(records) * record_size stage1, stage2 = compress_table(table, block_size)", "for just a single script for a # code point.", "UCD_BLOCK_SIZE != %d\" % min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE in", "SUPPORT_UCP is abolished. # # Major modifications made to this", "for Unicode 11.0.0 'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd',", "records] slice_type, slice_size = get_type_size(record_slice) # add padding: round up", "6.0.0 'Batak', 'Brahmi', 'Mandaic', # New for Unicode 6.1.0 'Chakma',", "(6,), (6,), (1,)], 1 ), \\ ( [(300,), (600,), (600,),", "type unassigned */\") print(\" ucp_gbOther, /* grapheme break property */\")", "for # all the emojis is \"other\". We scan the", "support.\") print(\"This module should not be referenced otherwise, so\") print(\"it", "of digit\") print(\"sets, nor the the large main UCD tables.", "*/\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") # This code was", "take from the Unicode.tables directory # Adjusted global table names", "inserted into the main table. # The CaseFolding.txt file lists", "many characters have the same record, and many blocks of", "# 12 = ucp_gbOther => Grapheme break property \"Other\" #", "+ other_case[c]] = -other_case[c] # Now scan again and create", "make_get_names(break_property_names), break_property_names.index('Other')) other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0) # The grapheme", "found = False break if found: return -i # Not", "using the command # # [python3] ./MultiStage2.py >../src/pcre2_ucd.c # #", "None: last = char else: last = int(m.group(3), 16) for", "'Grantha', 'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',", "=> Not part of a caseless set # 0 =>", "# # Added code to add a Script Extensions field", "Emoji\"), # for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt # # -----------------------------------------------------------------------------", "element in the vector # contains the number of subsequent", "if len(chardata) <= 1: continue if chardata[1] != \"Extended_Pictographic\": continue", "Added encoding='utf-8' to the open() call # . Inserted 'int'", "are Bengali, Devanagari, Grantha, and Kannada. # # <NAME>, 03", "is None: continue first = int(m.group(1),16) last = int(m.group(2),16) if", "a table of \"virtual\" blocks; each block is indexed by", "September 2012. I am not a Python # programmer, so", "negative value in its record. This is the # negated", "i for i in range(5,10)]: size = len(records) * record_size", "# End of block of code for creating offsets for", "= read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i in range(0, MAX_UNICODE): if", "offsets for those that are not already set. for c", "and should be run in # the maint subdirectory, using", "'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva',", "18-September-2012: Added code for multiple caseless sets. This uses the", "in enumerate(records): print((' {' + '%6d, ' * len(record[0]) +", "), \\ ( [(300, 300), (6, 6), (6, 340), (1,", "0 => No other case # 27 = ucp_Hiragana =>", "karshana (U+1CD0) is in block 57 (0x39) # lookup 57", "However, we have to do this work here in order", "-slice_size structure += '} ucd_record;\\n*/\\n' return size, structure def test_record_size():", "at present # # Almost all lowercase latin characters resolve", "a \"virtual\" block number. # # The ucd_stage2 table is", "'Pauc', 'Sidd', 'Tirh', 'Wara', #New for Unicode 8.0.0 'Ahom', 'Hluw',", "12 bytes (multiple of 4). Set a value # greater", "records = {} index = [] for t in zip(*tables):", "}};\") print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t PRIV(ucd_stage2)[] =", "*/\\n\" + fmt) % ((i / block_size,) + table[i:i+block_size])) print(\"};\\n\")", "'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', #", "the same set of records as # other blocks. This", "other blocks. This leads to a 2-stage lookup process. #", "must be in the # maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt", "This uses the # final hole in the structure. #", "(July 2018). # # Added code to add a Script", "July 2008 # Last Updated: 07 October 2018 ############################################################################## import", "!= script_numbers[j]: found = False break if found: return -i", "count = 0 print(\" 0x%05x,\" % d, end='') count +=", "# Stage 2 table contains the blocks with property values", "characters # that are not part of any list. #", "a grapheme break property field to records. # # Added", "for Unicode 10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm',", "/* U+%04X */\" mult = MAX_UNICODE / len(table) for i", "= list(chardata[1].split(' ')) if len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0]) script_numbers", "'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd',", "than using a library - so we include a\") print(\"condition", "yields 17 # record 17 is { 34, 5, 12,", "test[1]) #print struct def print_records(records, record_size): print('const ucd_record PRIV(ucd_records)[] =", "a # script number, script extension value, character type, grapheme", "o or x == t: found = 1 # Add", "New for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki',", "1): for j in range(0, script_numbers_length): found = True if", "within its own block, and the result is the index", "continue value = get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char =", "'Inscriptional_Parthian', 'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet',", "[record[i] for record in records] slice_type, slice_size = get_type_size(record_slice) #", "to 12 bytes, only 10 of which are currently used.", "'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii', #New for Unicode 5.2 'Avst',", "lookup in ucd_stage1 a \"virtual\" block number. # # The", "hole in it, so the resulting table is # not", "file_name) file_base = f.group(1) version_pat = r\"^# \" + re.escape(file_base)", "'Nand', 'Hmnp', 'Wcho' ] category_names = ['Cc', 'Cf', 'Cn', 'Co',", "= 0 structure = '/* When recompiling tables with a", "'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt', #New", "of subsequent elements, which are in ascending order. # #", "rules were changed for Unicode 11.0.0 (June 2018). Now #", "stage1, stage2 = compress_table(table, block_size) size += get_tables_size(stage1, stage2) #print", "new block start = len(stage2) / block_size stage2 += block", "redefines the PRIV macro to change\") print(\"table names from _pcre2_xxx", "this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers) for i in range(1,", "print(\"or zero (32 bits, signed), script extension (16 bits, signed),", "range(5,10)]: size = len(records) * 4 stage1, stage2 = compress_table(stage_i,", "0 => Dummy value, unused at present # # Example:", "# The Script Extensions property default value is the Script", "!= %d\" % min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h\")", "'9' characters in # each set of 10 decimal digits", "sets. sets = [] for c in range(MAX_UNICODE): o =", "# # 01-March-2010: Updated list of scripts for Unicode 5.2.0", "Dummy value, unused at present # # Example: vedic tone", "[0] * MAX_UNICODE padding_dummy[0] = 256 # This block of", "in 2012. # Corrected size calculation # Add #ifndef SUPPORT_UCP", "# # [python3] ./MultiStage2.py >../src/pcre2_ucd.c # # It requires six", "continue if chardata[1] != \"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0])", "points of the '9' characters in # each set of", "'Miao', 'Sharada', 'Sora_Sompeng', 'Takri', # New for Unicode 7.0.0 'Bassa_Vah',", "run in non-32-bit mode, character values\") print(\"greater than 0x10ffff may", "break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend', 'SpacingMark', 'L', 'V',", "is zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] = {\") count =", "def get_type_size(table): type_size = [(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\", 4),", "8 for d in digitsets: if count == 8: print(\"\\n", "34, 0 } # 34 = ucp_Latin => Latin script", "other cases or zero (8 bits), offset to other case\")", "Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is # required", "# in script runs all come from the same set.", "the default for # all the emojis is \"other\". We", "= re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is None: continue first =", "PH: read Scripts.txt again for the sets of 10 digits.", "main tables generated by this script are used by macros", "for its # Script Extension property has a negative value", "(0x42) in table 90 in stage2 yields 564 # record", "for PCRE2: name changes, and SUPPORT_UCP is abolished. # #", "resulting table is # not much bigger than before. #", "print(s + \" */\") table = tuple(table) if block_size is", "Unicode character database. */\") print(\"/* This file was autogenerated by", "'Latin', 'Limbu', 'Linear_B', 'Malayalam', 'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian',", "set */\") print(\" 0, /* other case */\") print(\" ucp_Unknown,", "i in range(2,6)]: stage_i, stage3 = compress_table(table, stage3_block) for stage2_block", "s in sets: s = sorted(s) for x in s:", "bytes, record size %d */' % (len(records) * record_size, record_size))", "the default from Scripts. Code added by PH # in", "one script listed for its # Script Extension property has", "line.split(';'))) if len(chardata) <= 1: continue value = get_value(chardata) m", "GraphemeBreakProperty.txt and added a new # field in the record", "of a caseless set # -32 (-0x20) => Other case", "of \"virtual\" blocks; each block is indexed by # the", "enum.index(chardata[1]) # Parse a line of CaseFolding.txt def get_other_case(chardata): if", "= list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue value =", "if scriptx[i] == script_abbrevs_default: scriptx[i] = script[i] # With the", "print(\" /* 0 */\", end='') for d in script_lists: print(\"", "of UnicodeData.txt, which is no longer # used. # #", "'Guru', 'Hani', 'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana', 'Khar',", "default_value: table[i] = value file.close() return table # Get the", "field to records. # # Added code to search for", "be far too big. It can be efficiently compressed by", "script listed for its # Script Extension property has a", "range(5,10)]: size = len(records) * record_size stage1, stage2 = compress_table(table,", "0, /* dummy filler */\") print(\" }};\") print(\"#endif\") print() print(record_struct)", "--- Added by PH: read Scripts.txt again for the sets", "a dummy\") print(\"16-bit field to make the whole thing a", "# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new", "Extension # value), then scan it and fill in the", "list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda x: x[1]) for i, record", "size = (size + slice_size - 1) & -slice_size structure", "These are the main two-stage UCD tables. The fields in", "value, unused at present # # Example: vedic tone karshana", "digits # in script runs all come from the same", "'Sundanese', 'Vai', # New for Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs',", "get_script_extension(chardata): this_script_list = list(chardata[1].split(' ')) if len(this_script_list) == 1: return", "%d bytes, block size: %d. */\" % (min_size, min_block_size)) print()", "# Consequent code tidy # Adjusted data file names to", "# # The following examples are correct for the Unicode", "table of caseless character sets --- print(\"/* This table contains", "of characters that all match each other caselessly. Each list", "= script[i] # With the addition of the new Script", "= block_size print(\"/* This module is generated by the maint/MultiStage2.py", "= get_type_size(record_slice) # add padding: round up to the nearest", "== script_abbrevs_default: scriptx[i] = script[i] # With the addition of", "the whole thing a multiple of 4 bytes. */\\n\") print_records(records,", "(i,))) print('};\\n') script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille',", "of the required record in the ucd_records vector. # #", "for record in records] slice_type, slice_size = get_type_size(record_slice) size =", "'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han',", "as being part of the PCRE2 library, this module is", "\"/* block size %5d => %5d bytes */\" % (block_size,", "characters resolve to the same record. One or two #", "script numbers that are the # Script Extensions properties of", "in stage2 yields 17 # record 17 is { 34,", "Script Extensions field, we need some padding # to get", "print() print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the", "added to the original script. # # The script has", "%d bytes\" % (type, table_name, size * len(table)) if block_size:", "in # each set of 10 decimal digits in Unicode.", "in October 2018. Positive values are used for just a", "# element is never used. script_lists = [0] script_abbrevs_default =", "+ r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name, 'r', encoding='utf-8') f = re.match(version_pat,", "'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn',", "other caselessly. Each list is # in order, and is", "(0, 65535), (0, 4294967295), (-128, 127), (-32768, 32767), (-2147483648, 2147483647)]", "structure definition from pcre2_internal.h (the actual\\n' + \\ 'field names", "contains no branches, which makes for greater speed. # #", "'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb',", "category, break_props, caseless_offsets, other_case, scriptx, padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys()))", "associated with Unicode Technical Standard #51 (\"Unicode Emoji\"), # for", "are in ascending order. # # The ucd_script_sets vector contains", "list of scripts for Unicode 6.0.0 # July-2012: Updated list", "'Tirhuta', 'Warang_Citi', # New for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran',", "[(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ),", "# characters (taking 128 characters in a block) have the", "-return_value # Read the whole table in memory, setting/checking the", "'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil',", "'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii', #New", "number of the required record in the ucd_records vector. #", "if break_props[i] != break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has break property", "=> %5d bytes */\" % (stage2_block, stage3_block, size) if size", "part of a caseless set # 0 => No other", "of slice_size size = (size + slice_size - 1) &", "+= slice_size structure += '%s property_%d;\\n' % (slice_type, i) #", "up to the first item of the next structure in", "for record in records] slice_type, slice_size = get_type_size(record_slice) # add", "print(fmt % (table[i:i+ELEMS_PER_LINE] + (int(i * mult),))) else: if block_size", "get_record_size_struct(list(records.keys())) # Find the optimum block size for the two-stage", "vector we find the list 3, 15, 107, 29, #", "Last Updated: 07 October 2018 ############################################################################## import re import string", "The CaseFolding.txt file lists pairs, but the common logic for", "macro to change\") print(\"table names from _pcre2_xxx to xxxx, thereby", "\\ ( [(300,), (600,), (600,), (100,)], 2 ), \\ (", "Unicode 6.1.0 # 20-August-2012: Added scan of GraphemeBreakProperty.txt and added", "] test_record_size() unicode_version = \"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown'))", "offset to caseless matching set, offset to the character's other", "end='') count = 0 print(\" 0x%05x,\" % d, end='') count", "a block) have the same set of records as #", "new # code scans CaseFolding.txt instead of UnicodeData.txt, which is", "script_lists.extend(script_numbers) return -return_value # Read the whole table in memory,", "This means that this character is expected to be used", "as the default (this will never be a Script Extension", "With the addition of the new Script Extensions field, we", "= break_property_names.index('Extended_Pictographic') file.close() # The Script Extensions property default value", "digit\") print(\"sets, nor the the large main UCD tables. */\")", "property \"Extend\" # 0 => Not part of a caseless", "0 # lookup 0 in stage1 table yields 0 #", "'Xsux', 'Nkoo', 'Phag', 'Phnx', #New for Unicode 5.1 'Cari', 'Cham',", "The result of a lookup in ucd_stage1 a \"virtual\" block", "the other_case table to find sets of more than two", "Extension list offset = 101 # 0 => Dummy value,", "is # in files associated with Unicode Technical Standard #51", "/ block_size stage2 += block blocks[block] = start stage1.append(start) return", "it here so that the name has the correct index", "break_props[i] = break_property_names.index('Extended_Pictographic') file.close() # The Script Extensions property default", "for d in digitsets: if count == 8: print(\"\\n \",", "bits), character type (8 bits), grapheme break property (8 bits),\")", "style is probably dreadful, but it does the job. It", "script_lists[i+j] != script_numbers[j]: found = False break if found: return", "re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1) version_pat = r\"^# \" +", "ucp_Mn => Non-spacing mark # 3 = ucp_gbExtend => Grapheme", "a\") print(\"condition to cut out the tables when not needed.", "Initialize this list with a single entry, as the zeroth", "3, 0, 0, -101, 0 } # 28 = ucp_Inherited", "0, 0, 27, 0 } # 27 = ucp_Hiragana =>", "property has a negative value in its record. This is", "has break property %s, not 'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i]", "= ucp_Latin => Latin script # 5 = ucp_Ll =>", "have the same set of records as # other blocks.", "= re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1) version_pat = r\"^# \"", "file # 19-June-2015: Updated for Unicode 8.0.0 # 02-July-2017: Updated", "get_tables_size(*tables): total_size = 0 for table in tables: type, size", "return_value = len(script_lists) script_lists.extend(script_numbers) return -return_value # Read the whole", "in records] slice_type, slice_size = get_type_size(record_slice) size = (size +", "loop looking for caseless sets. # Now scan the sets", "value), then scan it and fill in the default from", "%3d */\" % count, end='') print(\"\\n};\\n\") # Output the main", "'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw', #New for Unicode 10.0.0", "should not matter whether it is compiled or not. However\")", "zero (8 bits), offset to other case\") print(\"or zero (32", "i in range(0, len(table), block_size): block = table[i:i+block_size] start =", "15, 107, 29, # and terminator 0. This means that", "up Unicode character properties using short # sequences of code", "tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() # --- Added by", "-32 (-0x20) => Other case is U+0041 # 34 =", "min_stage2_block, min_stage3_block = stage2_block, stage3_block print \"/* Total size: %d", "int(m.group(3), 16) for i in range(char, last + 1): #", "added by PH in September 2012. I am not a", "This is the # negated offset to the start of", "'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj', 'Mani', 'Mend', 'Modi', 'Mroo',", "table. file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line in file:", "for Unicode 10.0.0 # 03-July-2018: Updated for Unicode 11.0.0 #", "chardata[1] != \"Extended_Pictographic\": continue m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char =", "of block of code for creating offsets for caseless matching", "compute the # offsets in the table that are inserted", "existing set with the new set. appended = 0 for", "for # every Unicode character. However, a real table covering", "each\") print(\"set of decimal digits. It is used to ensure", "x == y: found = 1 if not found: s.append(y)", "come from the same set. */\\n\") print(\"const uint32_t PRIV(ucd_digit_sets)[] =", "Made it work with Python 2.4 by rewriting two statements", "+ 1): # It is important not to overwrite a", "single script for a # code point. Negative values are", "breaking rules were changed for Unicode 11.0.0 (June 2018). Now", "tests: size, struct = get_record_size_struct(test[0]) assert(size == test[1]) #print struct", "set # 0 => No other case # -101 =>", "> ELEMS_PER_LINE: el = ELEMS_PER_LINE else: el = block_size fmt", "), \\ ( [(3, 300), (6, 6), (6, 340), (1,", "stage3 min_stage2_block, min_stage3_block = stage2_block, stage3_block print \"/* Total size:", "empty module because some compilers barf at that.\") print(\"Instead, just", "+ fmt) % ((i / block_size,) + table[i:i+block_size])) print(\"};\\n\") #", "= list(map(str.strip, line.split(';'))) if len(chardata) <= 1: continue if chardata[1]", "4). Set a value # greater than 255 to make", "0: print(\"\\n /* %3d */\" % count, end='') print(\"\\n};\\n\") #", "real table covering all Unicode # characters would be far", "=> Dummy value, unused at present # # Almost all", "make the field 16 bits. padding_dummy = [0] * MAX_UNICODE", "is built,\") print(\"and in PCRE2 that happens automatically with UTF", "in the ucd_records vector. # # The following examples are", "were changed for Unicode 11.0.0 (June 2018). Now # we", "Latin script # 5 = ucp_Ll => Lower case letter", "the '9' characters in # each set of 10 decimal", "set, create a new one. if not appended: sets.append([c, o,", "'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal', 'Cherokee', 'Common',", "= MAX_UNICODE / len(table) for i in range(0, len(table), ELEMS_PER_LINE):", "up a\") print(\"special record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH == 32\")", "PCRE2 library, this module is #included\") print(\"by the pcre2test program,", "DerivedGeneralCategory.txt is found in the \"extracted\" subdirectory of the #", "'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi',", "{\\n' for i in range(len(records[0])): record_slice = [record[i] for record", "PH # in October 2018. Positive values are used for", "*/\") print(\" 0, /* dummy filler */\") print(\" }};\") print(\"#endif\")", "The script is for the use of PCRE maintainers, to", "be run in # the maint subdirectory, using the command", "10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo',", "not found in the file where all the # others", "for creating offsets for caseless matching sets. # Combine the", "file was autogenerated by the MultiStage2.py script. */\") print(\"/* Total", "=> No special Script Extension property # 0 => Dummy", "The tables herein are needed only when UCP support is", "'Tale', 'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii', #New", "stage3 = compress_table(table, stage3_block) for stage2_block in [2 ** i", "data. if table[i] == default_value: table[i] = value file.close() return", "that are not part of any list. # # The", "import sys MAX_UNICODE = 0x110000 NOTACHAR = 0xffffffff # Parse", "DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt, # CaseFolding.txt, and emoji-data.txt. These", "import re import string import sys MAX_UNICODE = 0x110000 NOTACHAR", "'}, /* %3d */') % (record[0] + (i,))) print('};\\n') script_names", "= r\"^# \" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file = open(file_name,", "PCRE2_PCRE2TEST\") print() print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\")", "tidy # Adjusted data file names to take from the", "Changed string.strip to str.strip # . Added encoding='utf-8' to the", "int is # required and the result of the division", "use dummy tables when no UCP support is needed. #", "The ucd_digit_sets table contains the code points of the '9'", "large to fit into C types\") def get_tables_size(*tables): total_size =", "'Yi', # New for Unicode 5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa',", "we have to do this work here in order to", "'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb', #New for Unicode", "indexed by a character's block number, # which is the", "& -slice_size size += slice_size structure += '%s property_%d;\\n' %", "three characters are already # part of a set. If", "'Vaii', #New for Unicode 5.2 'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli',", "print(\"#endif /* PCRE2_PCRE2TEST */\") # This code was part of", "bytes\" % (type, table_name, size * len(table)) if block_size: s", "character is expected to be used with # any of", "the required record in the ucd_records vector. # # The", "break property field to records. # # Added code to", "a line which has already set # data. if table[i]", "len(stage2) / block_size stage2 += block blocks[block] = start stage1.append(start)", "print(\" 0, /* case set */\") print(\" 0, /* other", "- 1) & -slice_size size += slice_size structure += '%s", "we go through the table and set \"return\" # offsets", "global table names by prefixing _pcre_. # Commented out stuff", "min_block_size) print(\"#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif /*", "!= 0: print(\"ERROR: %04x..%04x does not contain a multiple of", "<gh_stars>0 #! /usr/bin/python # Multistage table builder # (c) <NAME>,", "/* grapheme break property */\") print(\" 0, /* case set", "print(\"/* Total size: %d bytes, block size: %d. */\" %", "from 8 to 12 bytes, only 10 of which are", "block, and the result is the index # number of", "the default value of 0) # which often come after", "stage1, stage2 # Print a table def print_table(table, table_name, block_size", "Extended Pictographic # property, which is used by PCRE2 as", "is None: # Allocate a new block start = len(stage2)", "(record[0] + (i,))) print('};\\n') script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali',", "# # At offset 101 in the ucd_script_sets vector we", "# 01-March-2010: Updated list of scripts for Unicode 5.2.0 #", "than two characters that must all # match each other", "# # The ucd_digit_sets table contains the code points of", "of certain characters. Each list is terminated # by zero", "((i / block_size,) + table[i:i+block_size])) print(\"};\\n\") # Extract the unique", "Unicode 10.0.0 # 03-July-2018: Updated for Unicode 11.0.0 # 07-July-2018:", "test_record_size() unicode_version = \"\" script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown')) category", "This table contains lists of characters that are caseless sets", "\"/* Total size: %d bytes\" % min_size */ print_records(records) print_table(min_stage1,", "i for i in range(5,10)]: size = len(records) * 4", "to cut out the tables when not needed. But don't", "by rewriting two statements that needed 2.5 # Consequent code", "Extension property has a negative value in its record. This", "if other_case[c] != 0 and other_case[c + other_case[c]] == 0:", "terminated by NOTACHAR (0xffffffff), which is larger than # any", "def combine_tables(*tables): records = {} index = [] for t", "the default (this will never be a Script Extension #", "x in s: if x == y: found = 1", "'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai',", "other caselessly. A new table is output containing these sets,", "0 # lookup 97 (0x61) in the first table in", "stage2 min_block_size = block_size print(\"/* This module is generated by", "= [ 'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd',", "07 October 2018 ############################################################################## import re import string import sys", "= 0 for x in s: if x == c", "690)], 4 ), \\ ( [(3, 100000), (6, 6), (6,", "'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', # New for", "Unicode 6.0.0 # July-2012: Updated list of scripts for Unicode", "table yields 55 # lookup 80 (0x50) in table 55", "CaseFolding file there are lines # to be ignored (returning", "creating offsets for caseless matching sets. # Combine the tables", "herein are needed only when UCP support is built,\") print(\"and", "re.match(r'([0-9a-fA-F]+)\\.\\.([0-9a-fA-F]+)\\s+;\\s+\\S+\\s+#\\s+Nd\\s+', line) if m is None: continue first = int(m.group(1),16)", "so the resulting table is # not much bigger than", "new characters to an existing set if found: found =", "avoiding name clashes\") print(\"with the library. At present, just one", "Not part of a caseless set # -32 (-0x20) =>", "[(0, 255), (0, 65535), (0, 4294967295), (-128, 127), (-32768, 32767),", "this script a table of these sets is # written", "be ignored (returning the default value of 0) # which", "'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda',", "ucp_Unknown, /* script */\") print(\" ucp_Cn, /* type unassigned */\")", "sets and set appropriate offsets for the characters. caseless_offsets =", "# 7 = ucp_Lo => Other letter # 12 =", "signed), script extension (16 bits, signed), and a dummy\") print(\"16-bit", "on the Unicode web site; GraphemeBreakProperty.txt is # in the", "'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl',", "whole table in memory, setting/checking the Unicode version def read_table(file_name,", "in stage2 yields 458 # record 458 is { 28,", "the guy linked\") print(\"all the modules rather than using a", "= [] # Stage 1 table contains block numbers (indices", "start = len(stage2) / block_size stage2 += block blocks[block] =", "size: %d bytes, block size: %d. */\" % (min_size, min_block_size))", "'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans', 'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl',", "to add a grapheme break property field to records. #", "if len(chardata) <= 1: continue value = get_value(chardata) m =", "\"extracted\" subdirectory of the # Unicode database (UCD) on the", "size: %d bytes\" % min_size */ print_records(records) print_table(min_stage1, 'ucd_stage1') print_table(min_stage2,", "Trigger when this character's other case does not point back", "Please correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\")", "offsets in the table that are inserted into the main", "% len(digitsets), end='') count = 8 for d in digitsets:", "blocks with property values table = tuple(table) for i in", "are already # part of a set. If so, unite", "vector # contains the number of subsequent elements, which are", "+= '} ucd_record;\\n*/\\n' return size, structure def test_record_size(): tests =", "( [(25, 3), (6, 6), (34, 6), (68, 1)], 2", "range(0, script_numbers_length): found = True if script_lists[i+j] != script_numbers[j]: found", "c in range(MAX_UNICODE): o = c + other_case[c] # Trigger", "'Warang_Citi', # New for Unicode 8.0.0 'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani',", "looking for caseless sets. # Now scan the sets and", "bits), offset to other case\") print(\"or zero (32 bits, signed),", "------ print(\"/* When #included in pcre2test, we don't need the", "for Python 3 # 20-June-2014: Updated for Unicode 7.0.0 #", "correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE */\") print()", "# maint/Unicode.tables subdirectory. # # DerivedGeneralCategory.txt is found in the", "'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited',", "# Compress the table into the two stages def compress_table(table,", "for Unicode 6.0.0 'Batak', 'Brahmi', 'Mandaic', # New for Unicode", "'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx', #New for Unicode 5.1 'Cari',", "table = tuple(table) if block_size is None: fmt = \"%3d,\"", "'Soyombo', 'Zanabazar_Square', # New for Unicode 11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya',", "5.0 'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician', # New for Unicode", "The emoji-data.txt file is # in files associated with Unicode", "slice_size = get_type_size(record_slice) # add padding: round up to the", "library is run in non-32-bit mode, character values\") print(\"greater than", "not part of any list. # # The ucd_digit_sets table", "# # Example: lowercase \"a\" (U+0061) is in block 0", "in the UCD directory. The emoji-data.txt file is # in", "3), (6, 6), (34, 6), (68, 1)], 2 ), \\", "At present, just one of these tables is actually\") print(\"needed.", "'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam',", "no branches, which makes for greater speed. # # Conceptually,", "are lines # to be ignored (returning the default value", "records[t] = len(records) index.append(i) return index, records def get_record_size_struct(records): size", "default (this will never be a Script Extension # value),", "30-April-2011: Updated list of scripts for Unicode 6.0.0 # July-2012:", "i for i in range(2,6)]: stage_i, stage3 = compress_table(table, stage3_block)", "modifications made to this script: # Added #! line at", "'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa',", "{\") print(\" NOTACHAR,\") for s in sets: s = sorted(s)", "elements, which are in ascending order. # # The ucd_script_sets", "if found: found = 0 for y in [c, o,", "# removed completely in 2012. # Corrected size calculation #", "if table[i] == default_value: table[i] = value file.close() return table", "'Lydi', 'Olck', 'Rjng', 'Saur', 'Sund', 'Vaii', #New for Unicode 5.2", "is not found in the file where all the #", "digits. --- digitsets = [] file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8')", "= script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i in", "100000), (6, 6), (6, 123456), (1, 690)], 8 ), \\", "stage1, stage2 = compress_table(stage_i, stage2_block) size += get_tables_size(stage1, stage2, stage3)", "* MAX_UNICODE padding_dummy[0] = 256 # This block of code", "a table def print_table(table, table_name, block_size = None): type, size", "= 0 print(\" /* 0 */\", end='') for d in", "'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', # New for Unicode 8.0.0 'Ahom',", "used; # removed completely in 2012. # Corrected size calculation", "caselessly. A new table is output containing these sets, and", "in stage2 yields 564 # record 564 is { 27,", "and maxval <= maxlimit: return type_size[num] else: raise OverflowError(\"Too large", "lists pairs, but the common logic for reading data #", "= compress_table(table, block_size) size += get_tables_size(stage1, stage2) #print \"/* block", "( [(300,), (600,), (600,), (100,)], 2 ), \\ ( [(25,", "min_stage2, min_stage3 = stage1, stage2, stage3 min_stage2_block, min_stage3_block = stage2_block,", "emoji-data.txt. These must be in the # maint/Unicode.tables subdirectory. #", "value. Parse the # file, setting 'Unknown' as the default", "tabs # Made it work with Python 2.4 by rewriting", "records def combine_tables(*tables): records = {} index = [] for", "bits. padding_dummy = [0] * MAX_UNICODE padding_dummy[0] = 256 #", "ucd_stage2 table is a table of \"virtual\" blocks; each block", "to add a Script Extensions field to records. This has", "print(\"#ifndef SUPPORT_UNICODE\") print(\"const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t", "Compress the table into the two stages def compress_table(table, block_size):", "'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc',", "emoji-data.txt file is # in files associated with Unicode Technical", "Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum):", "# Example: hiragana letter A (U+3042) is in block 96", "the result is the index # number of the required", "property. This was # done when updating to Unicode 11.0.0", "# 03-October-2018: Added new field for Script Extensions # 27-July-2019:", "11.0.0 'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin', 'Old_Sogdian', 'Sogdian', # New", "dreadful, but it does the job. It scans # the", "66 (0x42) in table 90 in stage2 yields 564 #", "block_size) size += get_tables_size(stage1, stage2) #print \"/* block size %5d", "min_size = size min_stage1, min_stage2 = stage1, stage2 min_block_size =", "\\ ( [(25, 3), (6, 6), (34, 6), (68, 1)],", "script_abbrevs_default) for i in range(0, MAX_UNICODE): if scriptx[i] == script_abbrevs_default:", "(1, 690)], 4 ), \\ ( [(3, 100000), (6, 6),", "Unicode 6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr', #New", "ucp_gbOther, /* grapheme break property */\") print(\" 0, /* case", "script was submitted to the PCRE project by <NAME>owski as", "final hole in the structure. # 30-September-2012: Added RegionalIndicator break", "to hold the value. Luckily, the # structure had a", "the table that are inserted into the main table. #", "into records def combine_tables(*tables): records = {} index = []", "different):\\n\\ntypedef struct {\\n' for i in range(len(records[0])): record_slice = [record[i]", "index.append(i) return index, records def get_record_size_struct(records): size = 0 structure", "'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic', 'Shavian', 'Sinhala', 'Syloti_Nagri',", "the pcre2test program, which redefines the PRIV macro to change\")", "# ------ print(\"/* When #included in pcre2test, we don't need", "# the upgrading of Unicode property support. The new code", "0 for x in s: if x == c or", "not appended: sets.append([c, o, t]) # End of loop looking", "d in digitsets: if count == 8: print(\"\\n \", end='')", "main table. # The CaseFolding.txt file lists pairs, but the", "or x == t: found = 1 # Add new", "the modules rather than using a library - so we", "automatically with UTF support.\") print(\"This module should not be referenced", "unassigned */\") print(\" ucp_gbOther, /* grapheme break property */\") print(\"", "scriptx[i] == script_abbrevs_default: scriptx[i] = script[i] # With the addition", "of properties into records def combine_tables(*tables): records = {} index", "27, 0 } # 27 = ucp_Hiragana => Hiragana script", "the # offsets in the table that are inserted into", "Unicode 10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu',", "'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo', 'Zanb', #New for Unicode 11.0.0", "because they are part of a multi-character caseless set (for", "2147483647)] minval = min(table) maxval = max(table) for num, (minlimit,", "GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum): return lambda chardata: enum.index(chardata[1]) #", "'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek',", "= open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for line in file: line =", "again and create equivalence sets. sets = [] for c", "find the Extended_Pictographic property for emoji characters. This # can", "with 2to3, but that didn't fix everything # . Changed", "table, which isn't used; # removed completely in 2012. #", "characters have the same record, and many blocks of #", "# Scan the existing sets to see if any of", "** i for i in range(2,6)]: stage_i, stage3 = compress_table(table,", "Pictographic # property, which is used by PCRE2 as a", "main output records. This new # code scans CaseFolding.txt instead", "type, size = get_type_size(table) ELEMS_PER_LINE = 16 s = \"const", "just supply some small dummy tables. */\") print() print(\"#ifndef SUPPORT_UNICODE\")", "start is None: # Allocate a new block start =", "/* case set */\") print(\" 0, /* other case */\")", "2018). Now # we need to find the Extended_Pictographic property", "table min_size = sys.maxsize for block_size in [2 ** i", "of script numbers that are the # Script Extensions properties", "lookup 97 (0x61) in the first table in stage2 yields", "file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8') for line in file: m", "line) if m is None: continue first = int(m.group(1),16) last", "required. The ucd_stage1 table is indexed by a character's block", "reading data # sets only one value, so first we", "unicode_version != version: print(\"WARNING: Unicode version differs in %s\", file_name,", "to compute the # offsets in the table that are", "decimal digits in Unicode. This is used to ensure that", "Grapheme break property \"Other\" # 0 => Not part of", "than 0x10ffff may be encountered. For these we set up", "first element in the vector # contains the number of", "# we need to find the Extended_Pictographic property for emoji", "6.1.0 # 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a", "written out. However, we have to do this work here", "We scan the emoji-data.txt file and modify the # break-props", "'ZWJ', 'Extended_Pictographic' ] test_record_size() unicode_version = \"\" script = read_table('Unicode.tables/Scripts.txt',", "important not to overwrite a previously set # value because", "a character within its own block, and the result is", "# 07-July-2018: Added code to scan emoji-data.txt for the Extended", "# matching many times. The script is for the use", "caselessly. Later in this script a table of these sets", "'Control', 'Extend', 'Prepend', 'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator',", "11.0.0 database. Future # updates may make change the actual", "zero (ucp_Unknown). A character with more than one script listed", "34 = ucp_Latin => No special Script Extension property #", "sets.append([c, o, t]) # End of loop looking for caseless", "PCRE2, and should be run in # the maint subdirectory,", "re import string import sys MAX_UNICODE = 0x110000 NOTACHAR =", "tables. The fields in each record are:\") print(\"script (8 bits),", "maintainers, to # generate the pcre2_ucd.c file that contains a", "hiragana letter A (U+3042) is in block 96 (0x60) #", "\"%3d,\" * el + \"\\n\" if block_size > ELEMS_PER_LINE: fmt", "in range(5,10)]: size = len(records) * record_size stage1, stage2 =", "print(\"const uint8_t PRIV(ucd_script_sets)[] = {\") count = 0 print(\" /*", "= compress_table(table, stage3_block) for stage2_block in [2 ** i for", "if script_lists[i+j] != script_numbers[j]: found = False break if found:", "field to make the whole thing a multiple of 4", "than # any valid character. The first list is empty;", "#New for Unicode 8.0.0 'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw',", "\"\": unicode_version = version elif unicode_version != version: print(\"WARNING: Unicode", "1 table contains block numbers (indices into stage 2 table)", "maxlimit: return type_size[num] else: raise OverflowError(\"Too large to fit into", "[] # Stage 1 table contains block numbers (indices into", "fmt = \"%3d,\" * ELEMS_PER_LINE + \" /* U+%04X */\"", "can be set as an additional grapheme break property, because", "ucp_Cn, /* type unassigned */\") print(\" ucp_gbOther, /* grapheme break", "stage2 yields 458 # record 458 is { 28, 12,", "'Phags_Pa', 'Phoenician', # New for Unicode 5.1 'Carian', 'Cham', 'Kayah_Li',", "f.group(1) if unicode_version == \"\": unicode_version = version elif unicode_version", "(June 2018). Now # we need to find the Extended_Pictographic", "However, a real table covering all Unicode # characters would", "(first, last), file=sys.stderr) while first < last: digitsets.append(first + 9)", "end='') count += 1 if d == 0: print(\"\\n /*", "'Sharada', 'Sora_Sompeng', 'Takri', # New for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian',", "# 19-June-2015: Updated for Unicode 8.0.0 # 02-July-2017: Updated for", "= {\") print(\" %d, /* Number of subsequent values */\"", "that many characters have the same record, and many blocks", "Updated for Python 3 # 20-June-2014: Updated for Unicode 7.0.0", "')) if len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0]) script_numbers = []", "= len(stage2) / block_size stage2 += block blocks[block] = start", "property, which is used by PCRE2 as a grapheme breaking", "It is used to ensure that all the digits in\")", "get_other_case(chardata): if chardata[1] == 'C' or chardata[1] == 'S': return", "'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai', # New", "result is the index # number of the required record", "after a line which has already set # data. if", "] # The Extended_Pictographic property is not found in the", "No special Script Extension property # 0 => Dummy value,", "already set. for c in range(MAX_UNICODE): if other_case[c] != 0", "modify the # break-props table. file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8')", "modules rather than using a library - so we include", "otherwise, so\") print(\"it should not matter whether it is compiled", "table is indexed by a character's block number, # which", "other_case[c + other_case[c]] == 0: other_case[c + other_case[c]] = -other_case[c]", "for Unicode 8.0.0 # 02-July-2017: Updated for Unicode 10.0.0 #", "for example: # # http://unicode.org/Public/emoji/11.0/emoji-data.txt # # ----------------------------------------------------------------------------- # Minor", "# 18-September-2012: Added code for multiple caseless sets. This uses", "default from Scripts. Code added by PH # in October", "of code was added by PH in September 2012. I", "PRIV(ucd_script_sets)[] = {\") count = 0 print(\" /* 0 */\",", "print(\"WARNING: Unicode version differs in %s\", file_name, file=sys.stderr) table =", "by a character's block number, # which is the character's", "min_stage3_block = stage2_block, stage3_block print \"/* Total size: %d bytes\"", "%s\", file_name, file=sys.stderr) table = [default_value] * MAX_UNICODE for line", "= records[t] = len(records) index.append(i) return index, records def get_record_size_struct(records):", "hand. Instead modify the script and run it\") print(\"to regenerate", "main UCD tables. */\") print() print(\"#ifndef PCRE2_PCRE2TEST\") print() # ---", "containing these sets, and # offsets into the table are", "# to be ignored (returning the default value of 0)", "sequences of code that contains no branches, which makes for", "'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', # New for Unicode", "'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian', 'Old_Permic',", "3 = ucp_gbExtend => Grapheme break property \"Extend\" # 0", "first table in stage2 yields 17 # record 17 is", "# New for Unicode 12.0.0 'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ]", "print(' 0x%04x,' % x, end=' ') print(' NOTACHAR,') print('};') print()", "a digested form of the Unicode # data tables. A", "structure = '/* When recompiling tables with a new Unicode", "print(\"WARNING: Emoji 0x%x has break property %s, not 'Other'\", i,", "to the character's other case, for # every Unicode character.", "5 = ucp_Ll => Lower case letter # 12 =", "-101 => Script Extension list offset = 101 # 0", "with property values table = tuple(table) for i in range(0,", "that must all # match each other caselessly. Later in", "1 # Add new characters to an existing set if", "lowercase latin characters resolve to the same record. One or", "encountered. For these we set up a\") print(\"special record. */\")", "PRIV(ucd_records)[] = { ' + \\ '/* %d bytes, record", "= 1 if not found: s.append(y) appended = 1 #", "Updated: 07 October 2018 ############################################################################## import re import string import", "(6, 6), (6, 340), (1, 690)], 4 ), \\ (", "12, 3, 0, 0, -101, 0 } # 28 =", "# # Example: hiragana letter A (U+3042) is in block", "5, 12, 0, -32, 34, 0 } # 34 =", "contain a multiple of 10 characters\" % (first, last), file=sys.stderr)", "the same record, and many blocks of # characters (taking", "print(\"#ifdef HAVE_CONFIG_H\") print(\"#include \\\"config.h\\\"\") print(\"#endif\") print() print(\"#include \\\"pcre2_internal.h\\\"\") print() print(\"#endif", "128 characters in a block) have the same set of", "unicode_version f = re.match(r'^[^/]+/([^.]+)\\.txt$', file_name) file_base = f.group(1) version_pat =", "Each list is terminated # by zero (ucp_Unknown). A character", "type, size = get_type_size(table) total_size += size * len(table) return", "run it\") print(\"to regenerate this code.\") print() print(\"As well as", "a single entry, as the zeroth # element is never", "the resulting table is # not much bigger than before.", "Unicode 6.0.0 'Batk', 'Brah', 'Mand', #New for Unicode 6.1.0 'Cakm',", "in this script a table of these sets is #", "not added to an existing set, create a new one.", "in Unicode. This is used to ensure that digits #", "don't need the table of digit\") print(\"sets, nor the the", "added to the main output records. This new # code", "0, 27, 0 } # 27 = ucp_Hiragana => Hiragana", "= combine_tables(script, category, break_props, caseless_offsets, other_case, scriptx, padding_dummy) record_size, record_struct", "# record 458 is { 28, 12, 3, 0, 0,", "a caseless set # -32 (-0x20) => Other case is", "i in range(char, last + 1): if break_props[i] != break_property_names.index('Other'):", "table covering all Unicode # characters would be far too", "print(\"and in PCRE2 that happens automatically with UTF support.\") print(\"This", "run in # the maint subdirectory, using the command #", "block size: %d. */\" % (min_size, min_block_size)) print() print(\"/* The", "90 # lookup 66 (0x42) in table 90 in stage2", "'Rjng', 'Saur', 'Sund', 'Vaii', #New for Unicode 5.2 'Avst', 'Bamu',", "get the Unicode records up to 12 bytes (multiple of", "NOTACHAR. */\\n\") print(\"const uint32_t PRIV(ucd_caseless_sets)[] = {\") print(\" NOTACHAR,\") for", "5.2 'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu',", "I am not a Python # programmer, so the style", "scriptx[i] = script[i] # With the addition of the new", "contains the code points of the '9' characters in #", "Later in this script a table of these sets is", "(the actual\\n' + \\ 'field names will be different):\\n\\ntypedef struct", "padding_dummy[0] = 256 # This block of code was added", "\\\"{}\\\";\".format(unicode_version)) print() print(\"/* If the 32-bit library is run in", "'Mandaic', # New for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao',", "script_abbrevs = [ 'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi',", "It is important not to overwrite a previously set #", "code point. Negative values are negated offsets in a list", "'Khojki', 'Khudawadi', 'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean', 'Old_North_Arabian',", "must all # match each other caselessly. Later in this", "print(\"#endif\") print() print(record_struct) # --- Added by PH: output the", "'Other'\", i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close() # The", "padding_dummy) record_size, record_struct = get_record_size_struct(list(records.keys())) # Find the optimum block", "nor the the large main UCD tables. */\") print() print(\"#ifndef", "{\") count = 0 print(\" /* 0 */\", end='') for", "contains lists of script numbers that are the # Script", "make change the actual lookup values. # # Example: lowercase", "Each sublist is zero-terminated. */\\n\") print(\"const uint8_t PRIV(ucd_script_sets)[] = {\")", "the main two-stage UCD tables. The fields in each record", "'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No',", "the # break-props table. file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8') for", "stage2, stage3) # print \"/* %5d / %3d => %5d", "'Hang', 'Hano', 'Hebr', 'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo',", "lookup 96 in stage1 table yields 90 # lookup 66", "mult = MAX_UNICODE / len(table) for i in range(0, len(table),", "} # 34 = ucp_Latin => Latin script # 5", "if not appended: sets.append([c, o, t]) # End of loop", "the list 3, 15, 107, 29, # and terminator 0.", "[(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed char\", 1), (\"pcre_int16\",", "PRIV(ucd_digit_sets)[] = {\") print(\" %d, /* Number of subsequent values", "the main UCD tables. print(\"/* These are the main two-stage", "number, # which is the character's code point divided by", "type_size[num] else: raise OverflowError(\"Too large to fit into C types\")", "common logic for reading data # sets only one value,", "this character's other case does not point back here. We", "vector contains lists of script numbers that are the #", "that all match each other caselessly. Each list is #", "Unicode 11.0.0 database. Future # updates may make change the", "part of a caseless set # -32 (-0x20) => Other", "used. A two-stage table has sufficed. \"\"\" # Three-stage tables:", "# final hole in the structure. # 30-September-2012: Added RegionalIndicator", "get_type_size(record_slice) # add padding: round up to the nearest power", "to the original script. # # The script has now", "from Unicode 6.2.0 # 13-May-2014: Updated for PCRE2 # 03-June-2014:", "come from the same set. The first element in the", "this character is expected to be used with # any", "stage1 table yields 0 # lookup 97 (0x61) in the", "'Phoenician', # New for Unicode 5.1 'Carian', 'Cham', 'Kayah_Li', 'Lepcha',", "7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj',", "limits = [(0, 255), (0, 65535), (0, 4294967295), (-128, 127),", "script for a # code point. Negative values are negated", "that are not already set. for c in range(MAX_UNICODE): if", "of subsequent values */\" % len(digitsets), end='') count = 8", "# match each other caselessly. Later in this script a", "= [(0, 255), (0, 65535), (0, 4294967295), (-128, 127), (-32768,", "re.sub(r'#.*', '', line) chardata = list(map(str.strip, line.split(';'))) if len(chardata) <=", "#New for Unicode 10.0.0 'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang',", "caseless set # 0 => No other case # 27", "( [(300, 300), (6, 6), (6, 340), (1, 690)], 4", "this list with a single entry, as the zeroth #", "but it does the job. It scans # the other_case", "in script runs all come from the same set. The", "names by prefixing _pcre_. # Commented out stuff relating to", "= 0 for y in [c, o, t]: for x", "script extension value, character type, grapheme break type, # offset", "return int(chardata[2], 16) - int(chardata[0], 16) return 0 # Parse", "j in range(0, script_numbers_length): found = True if script_lists[i+j] !=", "break type, # offset to caseless matching set, offset to", "present # # Example: vedic tone karshana (U+1CD0) is in", "Removed tabs # Made it work with Python 2.4 by", "unique record that is # required. The ucd_stage1 table is", "be used with # any of those scripts, which are", "ucd_record;\\n*/\\n' return size, structure def test_record_size(): tests = [ \\", "it does the job. It scans # the other_case table", "U+%04X */\" mult = MAX_UNICODE / len(table) for i in", "one of these tables is actually\") print(\"needed. */\") print() print(\"#ifndef", "'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi', 'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi', # New", "% 10) != 0: print(\"ERROR: %04x..%04x does not contain a", "last), file=sys.stderr) while first < last: digitsets.append(first + 9) first", "print(\"\\n};\\n\") print(\"/* This vector is a list of lists of", "= {} index = [] for t in zip(*tables): i", "multiple of 4 bytes. */\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2,", "else: raise OverflowError(\"Too large to fit into C types\") def", "smallest possible C language type for the values def get_type_size(table):", "0 => Not part of a caseless set # 0", "code.\") print() print(\"As well as being part of the PCRE2", "x in s: if x == c or x ==", "print(\"#else\") print() print(\"const char *PRIV(unicode_version) = \\\"{}\\\";\".format(unicode_version)) print() print(\"/* If", "len(this_script_list) == 1: return script_abbrevs.index(this_script_list[0]) script_numbers = [] for d", "\"virtual\" block number. # # The ucd_stage2 table is a", "print(\"#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h\") print(\"#endif\") print(\"#endif /* SUPPORT_UNICODE", "# Get the smallest possible C language type for the", "+ \" /* U+%04X */\" mult = MAX_UNICODE / len(table)", "---------------------------------------------------------------------------- # # # The main tables generated by this", "len(script_lists) script_lists.extend(script_numbers) return -return_value # Read the whole table in", "'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei', 'Sarb', 'Orkh', 'Samr',", "t]) # End of loop looking for caseless sets. #", "is generated by the maint/MultiStage2.py script.\") print(\"Do not modify it", "ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\")", "that contains a digested form of the Unicode # data", "has now been upgraded to Python 3 for PCRE2, and", "web site; GraphemeBreakProperty.txt is # in the \"auxiliary\" subdirectory. Scripts.txt,", "2008 ############################################################################## # This script was submitted to the PCRE", "re.match(version_pat, file.readline()) version = f.group(1) if unicode_version == \"\": unicode_version", "# to get the Unicode records up to 12 bytes", "*/\") print(\" ucp_Cn, /* type unassigned */\") print(\" ucp_gbOther, /*", "continue first = int(m.group(1),16) last = int(m.group(2),16) if ((last -", "7.0.0 # 12-August-2014: Updated to put Unicode version into the", "{{0,0,0,0,0,0,0 }};\") print(\"const uint16_t PRIV(ucd_stage1)[] = {0};\") print(\"const uint16_t PRIV(ucd_stage2)[]", "vector. # # The ucd_records table contains one instance of", "0 => Dummy value, unused at present # # Almost", "in memory, setting/checking the Unicode version def read_table(file_name, get_value, default_value):", "to fit into C types\") def get_tables_size(*tables): total_size = 0", "print(\"ERROR: %04x..%04x does not contain a multiple of 10 characters\"", "for Unicode 6.1.0 'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',", "the structure. # 30-September-2012: Added RegionalIndicator break property from Unicode", "by 128, since 128 is the size # of each", "# lookup 66 (0x42) in table 90 in stage2 yields", "== 'C' or chardata[1] == 'S': return int(chardata[2], 16) -", "that the name has the correct index value. break_property_names =", "'Sora_Sompeng', 'Takri', # New for Unicode 7.0.0 'Bassa_Vah', 'Caucasian_Albanian', 'Duployan',", "in the vector # contains the number of subsequent elements,", "'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana', 'Thai', 'Tibetan',", "are directly in the UCD directory. The emoji-data.txt file is", "# offsets into the table are added to the main", "=> Not part of a caseless set # -32 (-0x20)", "'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb',", "needed. # Update for PCRE2: name changes, and SUPPORT_UCP is", "%04x..%04x does not contain a multiple of 10 characters\" %", "block) have the same set of records as # other", "7, 12, 0, 0, 27, 0 } # 27 =", "property is not found in the file where all the", "# 28 = ucp_Inherited => Script inherited from predecessor #", "other case # 27 = ucp_Hiragana => No special Script", "order, and is terminated by NOTACHAR (0xffffffff), which is larger", "= (size + slice_size - 1) & -slice_size structure +=", "or chardata[1] == 'S': return int(chardata[2], 16) - int(chardata[0], 16)", "which is no longer # used. # # Update for", "sorted(s) for x in s: print(' 0x%04x,' % x, end='", "digits. It is used to ensure that all the digits", "Updated list of scripts for Unicode 5.2.0 # 30-April-2011: Updated", "*/\" % (stage2_block, stage3_block, size) if size < min_size: min_size", "size: %d. */\" % (min_size, min_block_size)) print() print(\"/* The tables", "in non-32-bit mode, character values\") print(\"greater than 0x10ffff may be", "which has already set # data. if table[i] == default_value:", "ignored (returning the default value of 0) # which often", "that this character is expected to be used with #", "all the digits in\") print(\"a script run come from the", "scan the sets and set appropriate offsets for the characters.", "block_size is None: fmt = \"%3d,\" * ELEMS_PER_LINE + \"", "# Added code to add a grapheme break property field", "The following examples are correct for the Unicode 11.0.0 database.", "which redefines the PRIV macro to change\") print(\"table names from", "(68, 1)], 2 ), \\ ( [(300, 3), (6, 6),", "print(\" }};\") print(\"#endif\") print() print(record_struct) # --- Added by PH:", "to str.strip # . Added encoding='utf-8' to the open() call", "of 10 decimal digits in Unicode. This is used to", "just one of these tables is actually\") print(\"needed. */\") print()", "), \\ ( [(300,), (600,), (600,), (100,)], 2 ), \\", "'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs = [ 'Zzzz', 'Arab', 'Armn', 'Beng',", "02-July-2017: Updated for Unicode 10.0.0 # 03-July-2018: Updated for Unicode", "set appropriate offsets for the characters. caseless_offsets = [0] *", "we have not added to an existing set, create a", "ucd_records table contains one instance of every unique record that", "in the ucd_script_sets vector we find the list 3, 15,", "stage1, stage2, stage3 min_stage2_block, min_stage3_block = stage2_block, stage3_block print \"/*", "stage2 yields 17 # record 17 is { 34, 5,", "# Add new characters to an existing set if found:", "(\"uint32_t\", 4), (\"signed char\", 1), (\"pcre_int16\", 2), (\"pcre_int32\", 4)] limits", "break property, because the default for # all the emojis", "script_names.index('Unknown')) category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn')) break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names),", "03-July-2018: Updated for Unicode 11.0.0 # 07-July-2018: Added code to", "12.1.0 # ---------------------------------------------------------------------------- # # # The main tables generated", "if i is None: i = records[t] = len(records) index.append(i)", "'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc',", "6.1.0 'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr', #New for", "have three characters that are case-equivalent. if other_case[o] != -other_case[c]:", "for j in range(0, script_numbers_length): found = True if script_lists[i+j]", "'Samaritan', 'Tai_Tham', 'Tai_Viet', # New for Unicode 6.0.0 'Batak', 'Brahmi',", "other caselessly. Later in this script a table of these", "in stage1 table yields 55 # lookup 80 (0x50) in", "'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin',", "fmt = \"%3d,\" * el + \"\\n\" if block_size >", "for table in tables: type, size = get_type_size(table) total_size +=", "pcre2test, we don't need the table of digit\") print(\"sets, nor", "record that is # required. The ucd_stage1 table is indexed", "The first element in the vector # contains the number", "records. # # Added code to search for sets of", "i) # round up to the first item of the", "Bengali, Devanagari, Grantha, and Kannada. # # <NAME>, 03 July", "version_pat = r\"^# \" + re.escape(file_base) + r\"-(\\d+\\.\\d+\\.\\d+)\\.txt$\" file =", "'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo',", "by PH # in October 2018. Positive values are used", "print(\"#endif /* SUPPORT_UNICODE */\") print() print(\"#endif /* PCRE2_PCRE2TEST */\") #", "script runs all come from the same set. The first", "is in block 0 # lookup 0 in stage1 table", "for characters # that are not part of any list.", "for num, (minlimit, maxlimit) in enumerate(limits): if minlimit <= minval", "# multiple scripts. Initialize this list with a single entry,", "28 = ucp_Inherited => Script inherited from predecessor # 12", "Unicode version into the file # 19-June-2015: Updated for Unicode", "in array record_slice = [record[0] for record in records] slice_type,", "extension */\") print(\" 0, /* dummy filler */\") print(\" }};\")", "ensure that all the digits in\") print(\"a script run come", "sets --- print(\"/* This table contains lists of characters that", "found: found = 0 for y in [c, o, t]:", "compiled or not. However\") print(\"a comment was received about space", "used. # # 01-March-2010: Updated list of scripts for Unicode", "blocks. This leads to a 2-stage lookup process. # #", "into the table are added to the main output records.", "=> Script inherited from predecessor # 12 = ucp_Mn =>", "need to find the Extended_Pictographic property for emoji characters. This", "stage2 = compress_table(table, block_size) size += get_tables_size(stage1, stage2) #print \"/*", "include a\") print(\"condition to cut out the tables when not", "UCD tables. print(\"/* These are the main two-stage UCD tables.", "# record 564 is { 27, 7, 12, 0, 0,", "record_size, record_size)) records = list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda x:", "for Unicode 5.2 'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic', 'Inscriptional_Pahlavi', 'Inscriptional_Parthian', 'Javanese',", "three characters that are case-equivalent. if other_case[o] != -other_case[c]: t", "Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum): return lambda chardata: enum.index(chardata[1])", "other_case table to find sets of more than two characters", "table[i:i+block_size] start = blocks.get(block) if start is None: # Allocate", "script_abbrevs.index('Zzzz') scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default) for i in range(0,", "offset offset += len(s) + 1 # End of block", "module is generated by the maint/MultiStage2.py script.\") print(\"Do not modify", "have the same record, and many blocks of # characters", "= [(\"uint8_t\", 1), (\"uint16_t\", 2), (\"uint32_t\", 4), (\"signed char\", 1),", "Scripts. Code added by PH # in October 2018. Positive", "out. However, we have to do this work here in", "already # part of a set. If so, unite the", "% (record[0] + (i,))) print('};\\n') script_names = ['Unknown', 'Arabic', 'Armenian',", "stage2 # Print a table def print_table(table, table_name, block_size =", "10.0.0 'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi', 'Nushu', 'Soyombo',", "put Unicode version into the file # 19-June-2015: Updated for", "which are currently used. # # 01-March-2010: Updated list of", "Unicode character. However, a real table covering all Unicode #", "The Extended_Pictographic property is not found in the file where", "been added to the original script. # # The script", "i, break_property_names[break_props[i]], file=sys.stderr) break_props[i] = break_property_names.index('Extended_Pictographic') file.close() # The Script", "for those that are not already set. for c in", "structure += '} ucd_record;\\n*/\\n' return size, structure def test_record_size(): tests", "), \\ ( [(100000, 300), (6, 6), (123456, 6), (1,", "offsets for the characters. caseless_offsets = [0] * MAX_UNICODE offset", "count = 0 print(\" /* 0 */\", end='') for d", "as a grapheme breaking property. This was # done when", "grapheme break type, # offset to caseless matching set, offset", "of a multi-character caseless set (for # example, k, K", "# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is", "type, # offset to caseless matching set, offset to the", "existing set, create a new one. if not appended: sets.append([c,", "*/\", end='') for d in script_lists: print(\" %3d,\" % d,", "90 in stage2 yields 564 # record 564 is {", "* 4 stage1, stage2 = compress_table(stage_i, stage2_block) size += get_tables_size(stage1,", "[] for d in this_script_list: script_numbers.append(script_abbrevs.index(d)) script_numbers.append(0) script_numbers_length = len(script_numbers)", "record_size)) records = list(zip(list(records.keys()), list(records.values()))) records.sort(key = lambda x: x[1])", "t: found = 1 # Add new characters to an", "101 # 0 => Dummy value, unused at present #", "the upgrading of Unicode property support. The new code speeds", "size calculation # Add #ifndef SUPPORT_UCP to use dummy tables", "i in range(1, len(script_lists) - script_numbers_length + 1): for j", "(8 bits),\") print(\"offset to multichar other cases or zero (8", "/usr/bin/python # Multistage table builder # (c) <NAME>, 2008 ##############################################################################", "CaseFolding.txt, and emoji-data.txt. These must be in the # maint/Unicode.tables", "(size + slice_size - 1) & -slice_size structure += '}", "script: # Added #! line at start # Removed tabs", "ucp_Latin => Latin script # 5 = ucp_Ll => Lower", "thereby avoiding name clashes\") print(\"with the library. At present, just", "ELEMS_PER_LINE + \" /* U+%04X */\" mult = MAX_UNICODE /", "'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian', 'Glagolitic', 'Gothic',", "o, t]: for x in s: if x == y:", "len(chardata) <= 1: continue if chardata[1] != \"Extended_Pictographic\": continue m", "C types\") def get_tables_size(*tables): total_size = 0 for table in", "= fmt * int(block_size / ELEMS_PER_LINE) for i in range(0,", "Unicode 7.0.0 'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina',", "PCRE2 # 03-June-2014: Updated for Python 3 # 20-June-2014: Updated", "a negative value in its record. This is the #", "MAX_UNICODE for line in file: line = re.sub(r'#.*', '', line)", "last = char else: last = int(m.group(3), 16) for i", "'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho' ] script_abbrevs = [ 'Zzzz', 'Arab',", "# # Major modifications made to this script: # Added", "be encountered. For these we set up a\") print(\"special record.", "a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt def make_get_names(enum): return", "of code for creating offsets for caseless matching sets. #", "it work with Python 2.4 by rewriting two statements that", "value = get_value(chardata) m = re.match(r'([0-9a-fA-F]+)(\\.\\.([0-9a-fA-F]+))?$', chardata[0]) char = int(m.group(1),", "/ ELEMS_PER_LINE) for i in range(0, len(table), block_size): print((\"/* block", "print(\" 0, /* dummy filler */\") print(\" }};\") print(\"#endif\") print()", "count, end='') print(\"\\n};\\n\") # Output the main UCD tables. print(\"/*", "break_property_names.index('Other'): print(\"WARNING: Emoji 0x%x has break property %s, not 'Other'\",", "'Kaithi', 'Lisu', 'Meetei_Mayek', 'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet', # New", "+ other_case[c]] == 0: other_case[c + other_case[c]] = -other_case[c] #", "1 # If we have not added to an existing", "matching sets. # Combine the tables table, records = combine_tables(script,", "(0x50) in table 55 in stage2 yields 458 # record", "which is larger than # any valid character. The first", "a Python # programmer, so the style is probably dreadful,", "of more than two characters that must all # match", "in stage1 table yields 0 # lookup 97 (0x61) in", "0xffffffff # Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt", "Python3: # . Processed with 2to3, but that didn't fix", "2 table contains the blocks with property values table =", "digits in Unicode. This is used to ensure that digits", "sys MAX_UNICODE = 0x110000 NOTACHAR = 0xffffffff # Parse a", "i = records.get(t) if i is None: i = records[t]", "'field names will be different):\\n\\ntypedef struct {\\n' for i in", "'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb', 'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu',", "= ELEMS_PER_LINE else: el = block_size fmt = \"%3d,\" *", "'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana', 'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer',", "of\") print(\"more than one character. Each list is terminated by", "of 4 bytes. */\\n\") print_records(records, record_size) print_table(min_stage1, 'PRIV(ucd_stage1)') print_table(min_stage2, 'PRIV(ucd_stage2)',", "CaseFolding.txt file lists pairs, but the common logic for reading", "print(\"/* If the 32-bit library is run in non-32-bit mode,", "= compress_table(stage_i, stage2_block) size += get_tables_size(stage1, stage2, stage3) # print", "The ucd_stage1 table is indexed by a character's block number,", "11.0.0 (June 2018). Now # we need to find the", "# lookup 96 in stage1 table yields 90 # lookup", "'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii', #New for Unicode 5.0 'Bali',", "'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu', 'Mtei',", "name # 03-October-2018: Added new field for Script Extensions #", "print(\"table names from _pcre2_xxx to xxxx, thereby avoiding name clashes\")", "to search for sets of more than two characters that", "for emoji characters. This # can be set as an", "[(25, 3), (6, 6), (34, 6), (68, 1)], 2 ),", "/* %d bytes\" % (type, table_name, size * len(table)) if", "print((' {' + '%6d, ' * len(record[0]) + '}, /*", "'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya', 'Osma', 'Runr', 'Shaw',", "that happens automatically with UTF support.\") print(\"This module should not", "used by macros defined in # pcre2_internal.h. They look up", "These must be in the # maint/Unicode.tables subdirectory. # #", "# ---------------------------------------------------------------------------- # # # The main tables generated by", "'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi', # New for Unicode 5.0", "present # # At offset 101 in the ucd_script_sets vector", "contains one instance of every unique record that is #", "If we have not added to an existing set, create", "print(\"a totally empty module because some compilers barf at that.\")", "get_record_size_struct(records): size = 0 structure = '/* When recompiling tables", "and a dummy\") print(\"16-bit field to make the whole thing", "= stage1, stage2, stage3 min_stage2_block, min_stage3_block = stage2_block, stage3_block print", "is the Script value. Parse the # file, setting 'Unknown'", "test_record_size(): tests = [ \\ ( [(3,), (6,), (6,), (1,)],", "requires six Unicode data tables: DerivedGeneralCategory.txt, # GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt,", "return type_size[num] else: raise OverflowError(\"Too large to fit into C", "'Ugaritic', 'Yi', # New for Unicode 5.0 'Balinese', 'Cuneiform', 'Nko',", "other case\") print(\"or zero (32 bits, signed), script extension (16", "file where all the # others are (GraphemeBreakProperty.txt). It comes", "in September 2012. I am not a Python # programmer,", "characters in # each set of 10 decimal digits in", "set up a\") print(\"special record. */\") print() print(\"#if PCRE2_CODE_UNIT_WIDTH ==", "9) first += 10 file.close() digitsets.sort() print(\"/* This table lists", "), \\ ] for test in tests: size, struct =", "compress_table(table, stage3_block) for stage2_block in [2 ** i for i", "= f.group(1) if unicode_version == \"\": unicode_version = version elif", "< min_size: min_size = size min_stage1, min_stage2, min_stage3 = stage1,", "maybe the guy linked\") print(\"all the modules rather than using", "# # Added code to scan the emoji-data.txt file to", "index # number of the required record in the ucd_records", "if chardata[1] == 'C' or chardata[1] == 'S': return int(chardata[2],", "the pcre2_ucd.c file that contains a digested form of the", "\\ ( [(3,), (6,), (6,), (1,)], 1 ), \\ (", "output containing these sets, and # offsets into the table", "new Unicode version, please check the\\n' + \\ 'types in", "27 = ucp_Hiragana => Hiragana script # 7 = ucp_Lo" ]
[ "import setup, find_packages from retrobiocat_web import __version__ with open('requirements.txt') as", "find_packages from retrobiocat_web import __version__ with open('requirements.txt') as f: requirements", "open('requirements.txt') as f: requirements = f.read().splitlines() setup( name = 'retrobiocat_web',", "retrobiocat_web import __version__ with open('requirements.txt') as f: requirements = f.read().splitlines()", "include_package_data=True, version = __version__, license='', description = 'Retrosynthesis', author =", "__version__, license='', description = 'Retrosynthesis', author = '<NAME>', author_email =", "setuptools import setup, find_packages from retrobiocat_web import __version__ with open('requirements.txt')", "= 'Retrosynthesis', author = '<NAME>', author_email = '<EMAIL>', url =", "'', download_url = '', keywords = ['enzyme'], install_requires=requirements, classifiers=[ 'Development", "Status :: 3 - Alpha', 'License :: OSI Approved ::", "url = '', download_url = '', keywords = ['enzyme'], install_requires=requirements,", "'<EMAIL>', url = '', download_url = '', keywords = ['enzyme'],", "classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI", ":: MIT License', 'Programming Language :: Python :: 3'], )", "with open('requirements.txt') as f: requirements = f.read().splitlines() setup( name =", "= '', keywords = ['enzyme'], install_requires=requirements, classifiers=[ 'Development Status ::", "= f.read().splitlines() setup( name = 'retrobiocat_web', packages = find_packages(), include_package_data=True,", "download_url = '', keywords = ['enzyme'], install_requires=requirements, classifiers=[ 'Development Status", "packages = find_packages(), include_package_data=True, version = __version__, license='', description =", "import __version__ with open('requirements.txt') as f: requirements = f.read().splitlines() setup(", "'retrobiocat_web', packages = find_packages(), include_package_data=True, version = __version__, license='', description", "author = '<NAME>', author_email = '<EMAIL>', url = '', download_url", "requirements = f.read().splitlines() setup( name = 'retrobiocat_web', packages = find_packages(),", "= __version__, license='', description = 'Retrosynthesis', author = '<NAME>', author_email", "= ['enzyme'], install_requires=requirements, classifiers=[ 'Development Status :: 3 - Alpha',", "description = 'Retrosynthesis', author = '<NAME>', author_email = '<EMAIL>', url", "from setuptools import setup, find_packages from retrobiocat_web import __version__ with", "= find_packages(), include_package_data=True, version = __version__, license='', description = 'Retrosynthesis',", "setup, find_packages from retrobiocat_web import __version__ with open('requirements.txt') as f:", "'License :: OSI Approved :: MIT License', 'Programming Language ::", "OSI Approved :: MIT License', 'Programming Language :: Python ::", "setup( name = 'retrobiocat_web', packages = find_packages(), include_package_data=True, version =", "'<NAME>', author_email = '<EMAIL>', url = '', download_url = '',", "f.read().splitlines() setup( name = 'retrobiocat_web', packages = find_packages(), include_package_data=True, version", "author_email = '<EMAIL>', url = '', download_url = '', keywords", "keywords = ['enzyme'], install_requires=requirements, classifiers=[ 'Development Status :: 3 -", "= 'retrobiocat_web', packages = find_packages(), include_package_data=True, version = __version__, license='',", "'Retrosynthesis', author = '<NAME>', author_email = '<EMAIL>', url = '',", "3 - Alpha', 'License :: OSI Approved :: MIT License',", "= '', download_url = '', keywords = ['enzyme'], install_requires=requirements, classifiers=[", "__version__ with open('requirements.txt') as f: requirements = f.read().splitlines() setup( name", "name = 'retrobiocat_web', packages = find_packages(), include_package_data=True, version = __version__,", "find_packages(), include_package_data=True, version = __version__, license='', description = 'Retrosynthesis', author", ":: OSI Approved :: MIT License', 'Programming Language :: Python", "= '<EMAIL>', url = '', download_url = '', keywords =", "'', keywords = ['enzyme'], install_requires=requirements, classifiers=[ 'Development Status :: 3", "version = __version__, license='', description = 'Retrosynthesis', author = '<NAME>',", "= '<NAME>', author_email = '<EMAIL>', url = '', download_url =", "license='', description = 'Retrosynthesis', author = '<NAME>', author_email = '<EMAIL>',", "['enzyme'], install_requires=requirements, classifiers=[ 'Development Status :: 3 - Alpha', 'License", "'Development Status :: 3 - Alpha', 'License :: OSI Approved", "- Alpha', 'License :: OSI Approved :: MIT License', 'Programming", "as f: requirements = f.read().splitlines() setup( name = 'retrobiocat_web', packages", "Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language", "Approved :: MIT License', 'Programming Language :: Python :: 3'],", "install_requires=requirements, classifiers=[ 'Development Status :: 3 - Alpha', 'License ::", ":: 3 - Alpha', 'License :: OSI Approved :: MIT", "from retrobiocat_web import __version__ with open('requirements.txt') as f: requirements =", "f: requirements = f.read().splitlines() setup( name = 'retrobiocat_web', packages =" ]
[ "= Chem.MolFromSmiles(rsmi) except Exception as e: print(e) return try: fp_bit", "AllChem,DataStructs def get_classes(path): f = open(path, 'rb') dict_ = pickle.load(f)", "= AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize,", "due to {}\".format(e)) return pfp = fp rxn_fp = pfp", "classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): #", "= Chem.MolFromSmiles(psmi) except Exception as e: return try: fp_bit =", "due to {}\".format(e)) return rfp = fp try: mol =", "import numpy as np from rdkit import Chem from rdkit.Chem", "but takes smiles separately and returns pfp and rfp separately", "AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32')", "[(x,y) for x,y in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi,", "numpy as np from rdkit import Chem from rdkit.Chem import", "def get_classes(path): f = open(path, 'rb') dict_ = pickle.load(f) f.close()", "fp try: mol = Chem.MolFromSmiles(psmi) except Exception as e: return", "separately and returns pfp and rfp separately rsmi = rsmi.encode('utf-8')", "as e: print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2,", "Similar as the above function but takes smiles separately and", "except Exception as e: print(\"Cannot build product fp due to", "except Exception as e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol,", "rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi)", "and returns pfp and rfp separately rsmi = rsmi.encode('utf-8') psmi", "mol = Chem.MolFromSmiles(psmi) except Exception as e: return try: fp_bit", "pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes =", "pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as the above function", "to {}\".format(e)) return pfp = fp rxn_fp = pfp -", "d[1],reverse=True) classes = [(x,y) for x,y in classes] return classes", "pfp = fp rxn_fp = pfp - rfp final_fp =", "print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures,", "try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp", "= sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes = [(x,y) for x,y", "rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except Exception", "return pfp = fp rxn_fp = pfp - rfp final_fp", "np from rdkit import Chem from rdkit.Chem import AllChem,DataStructs def", "useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as", "as e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize,", "rfp separately rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol", "Chem from rdkit.Chem import AllChem,DataStructs def get_classes(path): f = open(path,", "to {}\".format(e)) return rfp = fp try: mol = Chem.MolFromSmiles(psmi)", "return rfp = fp try: mol = Chem.MolFromSmiles(psmi) except Exception", "reactant fp due to {}\".format(e)) return rfp = fp try:", "Exception as e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2,", "build product fp due to {}\".format(e)) return pfp = fp", "fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e:", "as np from rdkit import Chem from rdkit.Chem import AllChem,DataStructs", "key=lambda d: d[1],reverse=True) classes = [(x,y) for x,y in classes]", "as the above function but takes smiles separately and returns", "DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build reactant fp", "psmi = psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except Exception as", "# Similar as the above function but takes smiles separately", "useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as", "calculate_rfp=True, useChirality=True): # Similar as the above function but takes", "Chem.MolFromSmiles(rsmi) except Exception as e: print(e) return try: fp_bit =", "useChirality=True): # Similar as the above function but takes smiles", "fp rxn_fp = pfp - rfp final_fp = np.concatenate((pfp, rxn_fp))", "useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception", "{}\".format(e)) return rfp = fp try: mol = Chem.MolFromSmiles(psmi) except", "= AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize,", "classes = [(x,y) for x,y in classes] return classes def", "for x,y in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384,", "try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp", "nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except", "import AllChem,DataStructs def get_classes(path): f = open(path, 'rb') dict_ =", "rdkit.Chem import AllChem,DataStructs def get_classes(path): f = open(path, 'rb') dict_", "Chem.MolFromSmiles(psmi) except Exception as e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect(", "= fp rxn_fp = pfp - rfp final_fp = np.concatenate((pfp,", "return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True):", "except Exception as e: print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect(", "psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except Exception as e: print(e)", "pfp and rfp separately rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8')", "import pickle import numpy as np from rdkit import Chem", "print(\"Cannot build reactant fp due to {}\".format(e)) return rfp =", "create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as", "product fp due to {}\".format(e)) return pfp = fp rxn_fp", "{}\".format(e)) return pfp = fp rxn_fp = pfp - rfp", "Exception as e: print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol,", "mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit,", "return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality)", "np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build", "fp) except Exception as e: print(\"Cannot build product fp due", "# -*- coding: utf-8 -*- import pickle import numpy as", "sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes = [(x,y) for x,y in", "f.close() classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes = [(x,y)", "= rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except", "fp) except Exception as e: print(\"Cannot build reactant fp due", "build reactant fp due to {}\".format(e)) return rfp = fp", "pickle import numpy as np from rdkit import Chem from", "fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e:", "= [(x,y) for x,y in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi,", "radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp)", "fp due to {}\".format(e)) return pfp = fp rxn_fp =", "import Chem from rdkit.Chem import AllChem,DataStructs def get_classes(path): f =", "as e: print(\"Cannot build product fp due to {}\".format(e)) return", "= pfp - rfp final_fp = np.concatenate((pfp, rxn_fp)) return final_fp", "utf-8 -*- import pickle import numpy as np from rdkit", "dict_ = pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True)", "print(\"Cannot build product fp due to {}\".format(e)) return pfp =", "useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception", "rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as the above", "rxn_fp = pfp - rfp final_fp = np.concatenate((pfp, rxn_fp)) return", "as e: print(\"Cannot build reactant fp due to {}\".format(e)) return", "returns pfp and rfp separately rsmi = rsmi.encode('utf-8') psmi =", "AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32')", "= np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot", "'rb') dict_ = pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda d:", "rdkit import Chem from rdkit.Chem import AllChem,DataStructs def get_classes(path): f", "= psmi.encode('utf-8') try: mol = Chem.MolFromSmiles(rsmi) except Exception as e:", "f = open(path, 'rb') dict_ = pickle.load(f) f.close() classes =", "-*- import pickle import numpy as np from rdkit import", "DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build product fp", "-*- coding: utf-8 -*- import pickle import numpy as np", "in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False,", "mol = Chem.MolFromSmiles(rsmi) except Exception as e: print(e) return try:", "mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit,", "def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar", "and rfp separately rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try:", "except Exception as e: print(\"Cannot build reactant fp due to", "useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as the above function but", "e: print(\"Cannot build reactant fp due to {}\".format(e)) return rfp", "e: return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures,", "fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp =", "the above function but takes smiles separately and returns pfp", "fp due to {}\".format(e)) return rfp = fp try: mol", "= open(path, 'rb') dict_ = pickle.load(f) f.close() classes = sorted(dict_.items(),", "return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality)", "np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build", "try: mol = Chem.MolFromSmiles(rsmi) except Exception as e: print(e) return", "= pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes", "smiles separately and returns pfp and rfp separately rsmi =", "classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True,", "dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build product", "Exception as e: print(\"Cannot build product fp due to {}\".format(e))", "try: mol = Chem.MolFromSmiles(psmi) except Exception as e: return try:", "nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except", "radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality) fp = np.empty(rxnfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp)", "= np.empty(pfpsize, dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot", "e: print(\"Cannot build product fp due to {}\".format(e)) return pfp", "e: print(e) return try: fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=rxnfpsize,", "dtype='float32') DataStructs.ConvertToNumpyArray(fp_bit, fp) except Exception as e: print(\"Cannot build reactant", "get_classes(path): f = open(path, 'rb') dict_ = pickle.load(f) f.close() classes", "classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True) classes = [(x,y) for", "separately rsmi = rsmi.encode('utf-8') psmi = psmi.encode('utf-8') try: mol =", "= fp try: mol = Chem.MolFromSmiles(psmi) except Exception as e:", "fp_bit = AllChem.GetMorganFingerprintAsBitVect( mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality) fp =", "from rdkit import Chem from rdkit.Chem import AllChem,DataStructs def get_classes(path):", "psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True): # Similar as the", "coding: utf-8 -*- import pickle import numpy as np from", "from rdkit.Chem import AllChem,DataStructs def get_classes(path): f = open(path, 'rb')", "d: d[1],reverse=True) classes = [(x,y) for x,y in classes] return", "Exception as e: print(\"Cannot build reactant fp due to {}\".format(e))", "takes smiles separately and returns pfp and rfp separately rsmi", "open(path, 'rb') dict_ = pickle.load(f) f.close() classes = sorted(dict_.items(), key=lambda", "above function but takes smiles separately and returns pfp and", "function but takes smiles separately and returns pfp and rfp", "x,y in classes] return classes def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384,", "rfp = fp try: mol = Chem.MolFromSmiles(psmi) except Exception as" ]
[ "= [] _TEMPLATE_MAPPING = {'html': 'main.html', 'entities': 'entities.html', 'groups': 'groups.html'}", "_NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if not", "events=('start', )): if not isinstance(element.tag, str): continue if not element.tag.startswith(_NS):", "= model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url )", "except: logging.exception('Got exception') content = None title = 'Error during", "content = None title = 'Error during obtaining %s' %", "not in element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return tree", "webstruct.sequence_encoding import webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link,", "= html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree = absolute_links(tree, url) tree", "in lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue for", "return str(target_url) def absolute_links(tree, url): _LINK_SOURCES = ['src', 'href'] try:", "!= 'O' ) groups = webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity", "lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue if not", "target_url = target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url = base_url.join(target_url) except:", "sample_groups(groups): groups = [tuple(sorted(g)) for g in groups] sampled =", "if not html_tokens: return tree, list(), list() tree = html_tokens[0].elem.getroottree().getroot()", "def absolutize_link(link, base_url): if link.startswith('#'): return link try: target_url =", "= None title = 'Error during obtaining %s' % (url,", "requests import yarl import webstruct.model import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo", "during obtaining %s' % (url, ) entities = [] groups", "entities if tag != 'O' ) groups = webstruct.model.extract_entitiy_groups( tokens,", "import lxml.html import requests import yarl import webstruct.model import webstruct.sequence_encoding", "tree = absolute_links(tree, url) tree = parent_links(tree, base_url) title =", "if 'href' not in element.attrib: continue url = element.attrib['href'] if", "_TOKENS_PER_PART)]) tags = [i for t in tags for i", "absolutize_link(element.attrib[attr], base_url) return tree def parent_links(tree, base_url): base_url = yarl.URL(base_url)", "tree, tokens, tags = run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens, tags)", "= parent_links(tree, base_url) title = tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree,", "random from flask import Flask, render_template, request import joblib from", "_TOKENS_PER_PART = 2000 def run_model(tree, model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree)", "tree, list(), list() tree = html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for", "[tuple(sorted(g)) for g in groups] sampled = sorted(list(set(groups)), key=lambda g:-len(g))", "continue if not element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):] return tree", "try: target_url = yarl.URL(link) except: return link if target_url.is_absolute() and", "if attr not in element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url)", "t] return tree, html_tokens, tags def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL',", "def index(): url = request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output', 'html')", "for i in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags = [i for", "for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag,", "[i for t in tags for i in t] return", "= 2000 def run_model(tree, model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if", "continue element.attrib['target'] = '_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return tree def", "len(html_tokens), _TOKENS_PER_PART)]) tags = [i for t in tags for", "@webstruct_demo.route('/') def index(): url = request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output',", "load = {'url': url, 'images': 0, 'base_url': url} response =", "= extract_ner(response_content, response_url, request.url) except: logging.exception('Got exception') content = None", "functools import logging import random from flask import Flask, render_template,", "= yarl.URL(url) except: return tree for _, element in lxml.html.etree.iterwalk(tree,", "from flask import Flask, render_template, request import joblib from lxml.html", "response.content, url def extract_ner(response_content, response_url, base_url): url = response_url tree", "return tree _TOKENS_PER_PART = 2000 def run_model(tree, model): html_tokens, _", "html5parser import lxml.html import requests import yarl import webstruct.model import", "tag != 'O' ) groups = webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None,", "for i in t] return tree, html_tokens, tags def download(url):", "base_url) title = tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags", "title = 'Error during obtaining %s' % (url, ) entities", "= yarl.URL(base_url) for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if", "t in tags for i in t] return tree, html_tokens,", "tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags = run_model(tree, model)", "tags = [i for t in tags for i in", "= list(set(entities)) random.shuffle(unique) sampled = unique[:5] sampled = sorted(sampled, key=lambda", "url): _LINK_SOURCES = ['src', 'href'] try: base_url = yarl.URL(url) except:", "element.attrib: continue url = element.attrib['href'] if url.startswith('#'): continue element.attrib['target'] =", "in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags = [i for t in", "tokens, tags = run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens, tags) tree", "tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags", "list(), list() tree = html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i", "'routing': routing, 'srcdoc': content, 'groups': groups, 'output': output} return render_template(template,", "= functools.reduce(lambda x,y: x and y is not None, [splash_url,", "webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None)", "g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def index(): url = request.args.get('url', 'http://en.wikipedia.org/')", "Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if link.startswith('#'): return link", "!= 'a': continue if 'href' not in element.attrib: continue url", "base_url = yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t)) for t in", "'O' ) groups = webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity )", "groups = [tuple(sorted(g)) for g in groups] sampled = sorted(list(set(groups)),", "content, title, entities, groups def sample_entities(entities): unique = list(set(entities)) random.shuffle(unique)", "{'url': url, 'title': title, 'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups,", ") entities = [] groups = [] _TEMPLATE_MAPPING = {'html':", "dont_penalize=None, join_tokens=model.build_entity ) return content, title, entities, groups def sample_entities(entities):", "not element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):] return tree _TOKENS_PER_PART =", "return tree, list(), list() tree = html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART]", "tags for i in t] return tree, html_tokens, tags def", "= str(base_url.update_query(url=url)) return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element", "content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities =", "logging import random from flask import Flask, render_template, request import", "target_url = yarl.URL(link) except: return link if target_url.is_absolute() and target_url.scheme:", "join_tokens=model.build_entity ) return content, title, entities, groups def sample_entities(entities): unique", "for t in tags for i in t] return tree,", "url=url ) content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags))", "isinstance(element.tag, str): continue if element.tag != 'a': continue if 'href'", "if target_url.is_absolute() and not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return str(target_url)", "return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in lxml.html.etree.iterwalk(tree,", "sampled = sorted(sampled, key=lambda e:(e[1], e[0])) return sampled def sample_groups(groups):", "tree = remove_namespace(tree) tree = absolute_links(tree, url) tree = parent_links(tree,", "= '_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\"", "yarl.URL(base_url) for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if not", "webstruct.model import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py')", "not isinstance(element.tag, str): continue if not element.tag.startswith(_NS): continue element.tag =", "groups = extract_ner(response_content, response_url, request.url) except: logging.exception('Got exception') content =", "[] groups = [] _TEMPLATE_MAPPING = {'html': 'main.html', 'entities': 'entities.html',", "key=lambda e:(e[1], e[0])) return sampled def sample_groups(groups): groups = [tuple(sorted(g))", "tags, dont_penalize=None, join_tokens=model.build_entity ) return content, title, entities, groups def", "target_url = base_url.join(target_url) except: return link return str(target_url) def absolute_links(tree,", "str): continue for attr in _LINK_SOURCES: if attr not in", "e[0])) return sampled def sample_groups(groups): groups = [tuple(sorted(g)) for g", "= target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url = base_url.join(target_url) except: return", "encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag)", "webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if", "y is not None, [splash_url, splash_user, splash_pass], True) if not", "= yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t)) for t in ['html',", "= tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags = run_model(tree,", "str(target_url) def absolute_links(tree, url): _LINK_SOURCES = ['src', 'href'] try: base_url", "'title': title, 'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing,", "return link return str(target_url) def absolute_links(tree, url): _LINK_SOURCES = ['src',", "if element.tag != 'a': continue if 'href' not in element.attrib:", "= sorted(sampled, key=lambda e:(e[1], e[0])) return sampled def sample_groups(groups): groups", "target_url.is_absolute() and not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return str(target_url) try:", "url, 'images': 0, 'base_url': url} response = requests.post(splash_url + '/render.html',", "response = requests.get(url) return response.content, response.url load = {'url': url,", "import random from flask import Flask, render_template, request import joblib", "(model.build_entity(tokens), tag) for (tokens, tag) in entities if tag !=", "return tree for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if", "entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens, tag) in entities", "json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url def extract_ner(response_content, response_url, base_url):", "{'url': url, 'images': 0, 'base_url': url} response = requests.post(splash_url +", "isinstance(element.tag, str): continue for attr in _LINK_SOURCES: if attr not", "= lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty(", "= response_url tree = html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree =", "if url.startswith('#'): continue element.attrib['target'] = '_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return", "element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return tree def parent_links(tree,", "target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url = base_url.join(target_url) except: return link", "url.startswith('#'): continue element.attrib['target'] = '_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return tree", "isinstance(element.tag, str): continue if not element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):]", "html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)])", "import webstruct.model import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True)", "index(): url = request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output', 'html') try:", "title, entities, groups def sample_entities(entities): unique = list(set(entities)) random.shuffle(unique) sampled", "events=('start', )): if not isinstance(element.tag, str): continue for attr in", "response.content, response.url load = {'url': url, 'images': 0, 'base_url': url}", "= {'url': url, 'title': title, 'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups':", "str): continue if not element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):] return", "base_url): base_url = yarl.URL(base_url) for _, element in lxml.html.etree.iterwalk(tree, events=('start',", "groups = [] _TEMPLATE_MAPPING = {'html': 'main.html', 'entities': 'entities.html', 'groups':", "webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url ) content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities", "tags def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER',", "yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t)) for t in ['html', 'entities',", "e:(e[1], e[0])) return sampled def sample_groups(groups): groups = [tuple(sorted(g)) for", "= 'Error during obtaining %s' % (url, ) entities =", "2000 def run_model(tree, model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if not", "splash_pass)) return response.content, url def extract_ner(response_content, response_url, base_url): url =", "target_url.scheme: return link if target_url.is_absolute() and not target_url.scheme: target_url =", "response_url = download(url) content, title, entities, groups = extract_ner(response_content, response_url,", "{'html': 'main.html', 'entities': 'entities.html', 'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html'])", "element in lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue", "for t in ['html', 'entities', 'groups']} values = {'url': url,", "tokens, tags, dont_penalize=None, join_tokens=model.build_entity ) return content, title, entities, groups", "i in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags = [i for t", "routing, 'srcdoc': content, 'groups': groups, 'output': output} return render_template(template, **values)", "try: target_url = base_url.join(target_url) except: return link return str(target_url) def", "%s' % (url, ) entities = [] groups = []", "0, 'base_url': url} response = requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user,", "for g in groups] sampled = sorted(list(set(groups)), key=lambda g:-len(g)) return", "_LINK_SOURCES = ['src', 'href'] try: base_url = yarl.URL(url) except: return", "not is_splash: response = requests.get(url) return response.content, response.url load =", "and target_url.scheme: return link if target_url.is_absolute() and not target_url.scheme: target_url", "return tree def parent_links(tree, base_url): base_url = yarl.URL(base_url) for _,", "None) splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash", "for (tokens, tag) in entities if tag != 'O' )", "import logging import random from flask import Flask, render_template, request", "if target_url.is_absolute() and target_url.scheme: return link if target_url.is_absolute() and not", "= ['src', 'href'] try: base_url = yarl.URL(url) except: return tree", "and y is not None, [splash_url, splash_user, splash_pass], True) if", "url = element.attrib['href'] if url.startswith('#'): continue element.attrib['target'] = '_parent' element.attrib['href']", "sampled = unique[:5] sampled = sorted(sampled, key=lambda e:(e[1], e[0])) return", "+ '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url def extract_ner(response_content,", "['src', 'href'] try: base_url = yarl.URL(url) except: return tree for", "not isinstance(element.tag, str): continue for attr in _LINK_SOURCES: if attr", "splash_pass], True) if not is_splash: response = requests.get(url) return response.content,", "tree _TOKENS_PER_PART = 2000 def run_model(tree, model): html_tokens, _ =", "element.attrib['href'] = str(base_url.update_query(url=url)) return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _,", "html_tokens, tags def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user =", "return link if target_url.is_absolute() and target_url.scheme: return link if target_url.is_absolute()", "def extract_ner(response_content, response_url, base_url): url = response_url tree = html5parser.document_fromstring(response_content)", "if not isinstance(element.tag, str): continue if not element.tag.startswith(_NS): continue element.tag", "attr not in element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return", "= model.html_tokenizer.tokenize_single(tree) if not html_tokens: return tree, list(), list() tree", "sampled_groups = sample_groups(groups) base_url = yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t))", "in t] return tree, html_tokens, tags def download(url): splash_url =", "groups = webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity ) return content,", "% (url, ) entities = [] groups = [] _TEMPLATE_MAPPING", "key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def index(): url = request.args.get('url',", "routing = {t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']}", "list(set(entities)) random.shuffle(unique) sampled = unique[:5] sampled = sorted(sampled, key=lambda e:(e[1],", "and not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url", "is_splash: response = requests.get(url) return response.content, response.url load = {'url':", "logging.exception('Got exception') content = None title = 'Error during obtaining", "= sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def index(): url", "continue element.tag = element.tag[len(_NS):] return tree _TOKENS_PER_PART = 2000 def", "= webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity ) return content, title,", "continue if element.tag != 'a': continue if 'href' not in", "except: return tree for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):", "absolute_links(tree, url) tree = parent_links(tree, base_url) title = tree.xpath('//title')[0].text model", "= sample_entities(entities) sampled_groups = sample_groups(groups) base_url = yarl.URL(request.url) routing =", "joblib from lxml.html import html5parser import lxml.html import requests import", "in entities if tag != 'O' ) groups = webstruct.model.extract_entitiy_groups(", "exception') content = None title = 'Error during obtaining %s'", "= Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if link.startswith('#'): return", "instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if link.startswith('#'): return link try:", "auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url def extract_ner(response_content, response_url, base_url): url", "sampled[:2] @webstruct_demo.route('/') def index(): url = request.args.get('url', 'http://en.wikipedia.org/') output =", "entities = [] groups = [] _TEMPLATE_MAPPING = {'html': 'main.html',", "'Error during obtaining %s' % (url, ) entities = []", "run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator( tree,", "splash_user, splash_pass], True) if not is_splash: response = requests.get(url) return", "if link.startswith('#'): return link try: target_url = yarl.URL(link) except: return", "extract_ner(response_content, response_url, base_url): url = response_url tree = html5parser.document_fromstring(response_content) tree", "model.html_tokenizer.tokenize_single(tree) if not html_tokens: return tree, list(), list() tree =", "splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash =", "is not None, [splash_url, splash_user, splash_pass], True) if not is_splash:", "= absolutize_link(element.attrib[attr], base_url) return tree def parent_links(tree, base_url): base_url =", "url def extract_ner(response_content, response_url, base_url): url = response_url tree =", "[splash_url, splash_user, splash_pass], True) if not is_splash: response = requests.get(url)", "= yarl.URL(link) except: return link if target_url.is_absolute() and target_url.scheme: return", "'entities': 'entities.html', 'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities =", "download(url) content, title, entities, groups = extract_ner(response_content, response_url, request.url) except:", "remove_namespace(tree) tree = absolute_links(tree, url) tree = parent_links(tree, base_url) title", "entity_colors=model.entity_colors, url=url ) content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens,", "= webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url ) content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8')", "groups] sampled = sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def", "try: base_url = yarl.URL(url) except: return tree for _, element", "splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass =", "link try: target_url = yarl.URL(link) except: return link if target_url.is_absolute()", "sampled_entities = sample_entities(entities) sampled_groups = sample_groups(groups) base_url = yarl.URL(request.url) routing", "tree def parent_links(tree, base_url): base_url = yarl.URL(base_url) for _, element", "= _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups = sample_groups(groups) base_url", "link if target_url.is_absolute() and not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return", "= absolute_links(tree, url) tree = parent_links(tree, base_url) title = tree.xpath('//title')[0].text", "[] _TEMPLATE_MAPPING = {'html': 'main.html', 'entities': 'entities.html', 'groups': 'groups.html'} template", "'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc': content, 'groups': groups,", ")): if not isinstance(element.tag, str): continue for attr in _LINK_SOURCES:", "_LINK_SOURCES: if attr not in element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr],", "tree = parent_links(tree, base_url) title = tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH'])", "= request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output', 'html') try: response_content, response_url", "= sample_groups(groups) base_url = yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t)) for", "return response.content, url def extract_ner(response_content, response_url, base_url): url = response_url", "def absolute_links(tree, url): _LINK_SOURCES = ['src', 'href'] try: base_url =", "joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags = run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens,", "element.attrib['target'] = '_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return tree def remove_namespace(tree):", "'http://en.wikipedia.org/') output = request.args.get('output', 'html') try: response_content, response_url = download(url)", "'main.html', 'entities': 'entities.html', 'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities", "attr in _LINK_SOURCES: if attr not in element.attrib: continue element.attrib[attr]", "is_splash = functools.reduce(lambda x,y: x and y is not None,", "= {t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']} values", "def run_model(tree, model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if not html_tokens:", "splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda x,y: x and", "except: return link return str(target_url) def absolute_links(tree, url): _LINK_SOURCES =", "= run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator(", "try: response_content, response_url = download(url) content, title, entities, groups =", "response_url tree = html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree = absolute_links(tree,", "= [i for t in tags for i in t]", "absolutize_link(link, base_url): if link.startswith('#'): return link try: target_url = yarl.URL(link)", "continue url = element.attrib['href'] if url.startswith('#'): continue element.attrib['target'] = '_parent'", "response_url, request.url) except: logging.exception('Got exception') content = None title =", "element.tag[len(_NS):] return tree _TOKENS_PER_PART = 2000 def run_model(tree, model): html_tokens,", "tree = html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree = absolute_links(tree, url)", "request.url) except: logging.exception('Got exception') content = None title = 'Error", "'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups", "return content, title, entities, groups def sample_entities(entities): unique = list(set(entities))", "import webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url):", "if not isinstance(element.tag, str): continue if element.tag != 'a': continue", "str): continue if element.tag != 'a': continue if 'href' not", "import functools import logging import random from flask import Flask,", "_ = model.html_tokenizer.tokenize_single(tree) if not html_tokens: return tree, list(), list()", "response_content, response_url = download(url) content, title, entities, groups = extract_ner(response_content,", "str(target_url) try: target_url = base_url.join(target_url) except: return link return str(target_url)", "(tokens, tag) in entities if tag != 'O' ) groups", "g in groups] sampled = sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2]", "return sampled[:2] @webstruct_demo.route('/') def index(): url = request.args.get('url', 'http://en.wikipedia.org/') output", "_TEMPLATE_MAPPING = {'html': 'main.html', 'entities': 'entities.html', 'groups': 'groups.html'} template =", "'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups =", "import Flask, render_template, request import joblib from lxml.html import html5parser", "obtaining %s' % (url, ) entities = [] groups =", "= requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url", "= joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags = run_model(tree, model) tree =", "target_url.is_absolute() and target_url.scheme: return link if target_url.is_absolute() and not target_url.scheme:", "element.tag != 'a': continue if 'href' not in element.attrib: continue", "str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']} values = {'url':", "webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if link.startswith('#'):", "'a': continue if 'href' not in element.attrib: continue url =", "= element.tag[len(_NS):] return tree _TOKENS_PER_PART = 2000 def run_model(tree, model):", "base_url): if link.startswith('#'): return link try: target_url = yarl.URL(link) except:", "content, title, entities, groups = extract_ner(response_content, response_url, request.url) except: logging.exception('Got", ")): if not isinstance(element.tag, str): continue if not element.tag.startswith(_NS): continue", "if tag != 'O' ) groups = webstruct.model.extract_entitiy_groups( tokens, tags,", "in element.attrib: continue url = element.attrib['href'] if url.startswith('#'): continue element.attrib['target']", "lxml.html import requests import yarl import webstruct.model import webstruct.sequence_encoding import", "response = requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content,", "lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue for attr", "t in ['html', 'entities', 'groups']} values = {'url': url, 'title':", "None) is_splash = functools.reduce(lambda x,y: x and y is not", "tags) tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url ) content =", "None, [splash_url, splash_user, splash_pass], True) if not is_splash: response =", "element.attrib['href'] if url.startswith('#'): continue element.attrib['target'] = '_parent' element.attrib['href'] = str(base_url.update_query(url=url))", "import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo = Flask(__name__, instance_relative_config=True) webstruct_demo.config.from_pyfile('config.py') def", "render_template, request import joblib from lxml.html import html5parser import lxml.html", "flask import Flask, render_template, request import joblib from lxml.html import", "def parent_links(tree, base_url): base_url = yarl.URL(base_url) for _, element in", "absolute_links(tree, url): _LINK_SOURCES = ['src', 'href'] try: base_url = yarl.URL(url)", "True) if not is_splash: response = requests.get(url) return response.content, response.url", "element.tag = element.tag[len(_NS):] return tree _TOKENS_PER_PART = 2000 def run_model(tree,", "'groups']} values = {'url': url, 'title': title, 'entities': entities, 'sampled_entities':", "'/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url def extract_ner(response_content, response_url,", "webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens, tag)", "from lxml.html import html5parser import lxml.html import requests import yarl", "= [] groups = [] _TEMPLATE_MAPPING = {'html': 'main.html', 'entities':", ") return content, title, entities, groups def sample_entities(entities): unique =", "extract_ner(response_content, response_url, request.url) except: logging.exception('Got exception') content = None title", "import html5parser import lxml.html import requests import yarl import webstruct.model", "= unique[:5] sampled = sorted(sampled, key=lambda e:(e[1], e[0])) return sampled", "lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue if element.tag", "html_tokens: return tree, list(), list() tree = html_tokens[0].elem.getroottree().getroot() tags =", "sampled_groups, 'routing': routing, 'srcdoc': content, 'groups': groups, 'output': output} return", "template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups = sample_groups(groups)", "base_url = yarl.URL(url) except: return tree for _, element in", "import requests import yarl import webstruct.model import webstruct.sequence_encoding import webstruct.webannotator", "None title = 'Error during obtaining %s' % (url, )", "return response.content, response.url load = {'url': url, 'images': 0, 'base_url':", "= {'html': 'main.html', 'entities': 'entities.html', 'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output,", "in element.attrib: continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return tree def", "in lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str): continue if", "remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if", "element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):] return tree _TOKENS_PER_PART = 2000", "tags = run_model(tree, model) tree = model.html_tokenizer.detokenize_single(tokens, tags) tree =", "= base_url.join(target_url) except: return link return str(target_url) def absolute_links(tree, url):", "return link try: target_url = yarl.URL(link) except: return link if", "response.url load = {'url': url, 'images': 0, 'base_url': url} response", "unique = list(set(entities)) random.shuffle(unique) sampled = unique[:5] sampled = sorted(sampled,", "html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if not html_tokens: return tree, list(),", "= element.attrib['href'] if url.startswith('#'): continue element.attrib['target'] = '_parent' element.attrib['href'] =", "_TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups = sample_groups(groups) base_url = yarl.URL(request.url)", "functools.reduce(lambda x,y: x and y is not None, [splash_url, splash_user,", "tree for _, element in lxml.html.etree.iterwalk(tree, events=('start', )): if not", "= webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS',", "link.startswith('#'): return link try: target_url = yarl.URL(link) except: return link", "element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return tree def parent_links(tree, base_url): base_url", "tree, entity_colors=model.entity_colors, url=url ) content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities =", "'entities.html', 'groups': 'groups.html'} template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities)", "webstruct_demo.config.from_pyfile('config.py') def absolutize_link(link, base_url): if link.startswith('#'): return link try: target_url", "output = request.args.get('output', 'html') try: response_content, response_url = download(url) content,", "url = response_url tree = html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree", "sampled = sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def index():", "= model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags =", "not in element.attrib: continue url = element.attrib['href'] if url.startswith('#'): continue", "'href'] try: base_url = yarl.URL(url) except: return tree for _,", "title, 'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc':", "groups def sample_entities(entities): unique = list(set(entities)) random.shuffle(unique) sampled = unique[:5]", "'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc': content,", "parent_links(tree, base_url): base_url = yarl.URL(base_url) for _, element in lxml.html.etree.iterwalk(tree,", "{t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']} values =", "(url, ) entities = [] groups = [] _TEMPLATE_MAPPING =", "['html', 'entities', 'groups']} values = {'url': url, 'title': title, 'entities':", "'href' not in element.attrib: continue url = element.attrib['href'] if url.startswith('#'):", "= webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda", "in _LINK_SOURCES: if attr not in element.attrib: continue element.attrib[attr] =", "download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER', None) splash_pass", "model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags = run_model(tree, model) tree", "if not is_splash: response = requests.get(url) return response.content, response.url load", "yarl.URL(url) except: return tree for _, element in lxml.html.etree.iterwalk(tree, events=('start',", "yarl import webstruct.model import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo = Flask(__name__,", "list() tree = html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in", "title = tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens, tags =", "range(0, len(html_tokens), _TOKENS_PER_PART)]) tags = [i for t in tags", "return tree, html_tokens, tags def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None)", "if not isinstance(element.tag, str): continue for attr in _LINK_SOURCES: if", "model) tree = model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors,", "webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens, tag) in entities if tag", "_TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html']) sampled_entities = sample_entities(entities) sampled_groups = sample_groups(groups) base_url =", "entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc': content, 'groups':", "base_url.join(target_url) except: return link return str(target_url) def absolute_links(tree, url): _LINK_SOURCES", "= requests.get(url) return response.content, response.url load = {'url': url, 'images':", "sample_entities(entities) sampled_groups = sample_groups(groups) base_url = yarl.URL(request.url) routing = {t:", "entities, groups = extract_ner(response_content, response_url, request.url) except: logging.exception('Got exception') content", "return str(target_url) try: target_url = base_url.join(target_url) except: return link return", "in tags for i in t] return tree, html_tokens, tags", "unique[:5] sampled = sorted(sampled, key=lambda e:(e[1], e[0])) return sampled def", "request.args.get('output', 'html') try: response_content, response_url = download(url) content, title, entities,", "tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in lxml.html.etree.iterwalk(tree, events=('start',", "url, 'title': title, 'entities': entities, 'sampled_entities': sampled_entities, 'sampled_groups': sampled_groups, 'routing':", "continue element.attrib[attr] = absolutize_link(element.attrib[attr], base_url) return tree def parent_links(tree, base_url):", "request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output', 'html') try: response_content, response_url =", "return link if target_url.is_absolute() and not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme)", ") content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities", "'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc': content, 'groups': groups, 'output': output}", "not isinstance(element.tag, str): continue if element.tag != 'a': continue if", "requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return response.content, url def", "tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url ) content = lxml.html.tostring(tree,", "= remove_namespace(tree) tree = absolute_links(tree, url) tree = parent_links(tree, base_url)", "webstruct_demo.config.get('SPLASH_USER', None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda x,y:", "model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url ) content", "link return str(target_url) def absolute_links(tree, url): _LINK_SOURCES = ['src', 'href']", "url} response = requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass)) return", "random.shuffle(unique) sampled = unique[:5] sampled = sorted(sampled, key=lambda e:(e[1], e[0]))", "sampled def sample_groups(groups): groups = [tuple(sorted(g)) for g in groups]", "tree = model.html_tokenizer.detokenize_single(tokens, tags) tree = webstruct.webannotator.to_webannotator( tree, entity_colors=model.entity_colors, url=url", "link if target_url.is_absolute() and target_url.scheme: return link if target_url.is_absolute() and", "tag) for (tokens, tag) in entities if tag != 'O'", "Flask, render_template, request import joblib from lxml.html import html5parser import", "'_parent' element.attrib['href'] = str(base_url.update_query(url=url)) return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for", "webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda x,y: x and y is", "str(base_url.update_query(url=url)) return tree def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in", "base_url): url = response_url tree = html5parser.document_fromstring(response_content) tree = remove_namespace(tree)", "requests.get(url) return response.content, response.url load = {'url': url, 'images': 0,", "= webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens, tag) in entities if", ")): if not isinstance(element.tag, str): continue if element.tag != 'a':", "return sampled def sample_groups(groups): groups = [tuple(sorted(g)) for g in", "x and y is not None, [splash_url, splash_user, splash_pass], True)", "parent_links(tree, base_url) title = tree.xpath('//title')[0].text model = joblib.load(webstruct_demo.config['MODEL_PATH']) tree, tokens,", "url = request.args.get('url', 'http://en.wikipedia.org/') output = request.args.get('output', 'html') try: response_content,", "model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)]) tags = [i", "tag) in entities if tag != 'O' ) groups =", "sample_groups(groups) base_url = yarl.URL(request.url) routing = {t: str(base_url.update_query(output=t)) for t", "= download(url) content, title, entities, groups = extract_ner(response_content, response_url, request.url)", "base_url) return tree def parent_links(tree, base_url): base_url = yarl.URL(base_url) for", "sorted(sampled, key=lambda e:(e[1], e[0])) return sampled def sample_groups(groups): groups =", "run_model(tree, model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if not html_tokens: return", "def sample_entities(entities): unique = list(set(entities)) random.shuffle(unique) sampled = unique[:5] sampled", "lxml.html.tostring(tree, encoding='utf-8').decode('utf-8') entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens),", "webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity ) return content, title, entities,", "events=('start', )): if not isinstance(element.tag, str): continue if element.tag !=", "not None, [splash_url, splash_user, splash_pass], True) if not is_splash: response", "entities, groups def sample_entities(entities): unique = list(set(entities)) random.shuffle(unique) sampled =", "tree = html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0,", "'html') try: response_content, response_url = download(url) content, title, entities, groups", "continue for attr in _LINK_SOURCES: if attr not in element.attrib:", "title, entities, groups = extract_ner(response_content, response_url, request.url) except: logging.exception('Got exception')", "if not element.tag.startswith(_NS): continue element.tag = element.tag[len(_NS):] return tree _TOKENS_PER_PART", "values = {'url': url, 'title': title, 'entities': entities, 'sampled_entities': sampled_entities,", "= request.args.get('output', 'html') try: response_content, response_url = download(url) content, title,", "sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/') def index(): url =", "i in t] return tree, html_tokens, tags def download(url): splash_url", "None) splash_pass = webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda x,y: x", "'images': 0, 'base_url': url} response = requests.post(splash_url + '/render.html', json=load,", "in ['html', 'entities', 'groups']} values = {'url': url, 'title': title,", "= html_tokens[0].elem.getroottree().getroot() tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens),", "for attr in _LINK_SOURCES: if attr not in element.attrib: continue", "not html_tokens: return tree, list(), list() tree = html_tokens[0].elem.getroottree().getroot() tags", ") groups = webstruct.model.extract_entitiy_groups( tokens, tags, dont_penalize=None, join_tokens=model.build_entity ) return", "html5parser.document_fromstring(response_content) tree = remove_namespace(tree) tree = absolute_links(tree, url) tree =", "request import joblib from lxml.html import html5parser import lxml.html import", "x,y: x and y is not None, [splash_url, splash_user, splash_pass],", "'entities', 'groups']} values = {'url': url, 'title': title, 'entities': entities,", "continue if 'href' not in element.attrib: continue url = element.attrib['href']", "= webstruct_demo.config.get('SPLASH_PASS', None) is_splash = functools.reduce(lambda x,y: x and y", "yarl.URL(link) except: return link if target_url.is_absolute() and target_url.scheme: return link", "tree, html_tokens, tags def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user", "not target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url =", "target_url.scheme: target_url = target_url.with_scheme(base_url.scheme) return str(target_url) try: target_url = base_url.join(target_url)", "url) tree = parent_links(tree, base_url) title = tree.xpath('//title')[0].text model =", "in groups] sampled = sorted(list(set(groups)), key=lambda g:-len(g)) return sampled[:2] @webstruct_demo.route('/')", "tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens, tag) in", "sampled_entities, 'sampled_groups': sampled_groups, 'routing': routing, 'srcdoc': content, 'groups': groups, 'output':", "import yarl import webstruct.model import webstruct.sequence_encoding import webstruct.webannotator webstruct_demo =", "response_url, base_url): url = response_url tree = html5parser.document_fromstring(response_content) tree =", "entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag) for", "_, element in lxml.html.etree.iterwalk(tree, events=('start', )): if not isinstance(element.tag, str):", "= {'url': url, 'images': 0, 'base_url': url} response = requests.post(splash_url", "= [tuple(sorted(g)) for g in groups] sampled = sorted(list(set(groups)), key=lambda", "def sample_groups(groups): groups = [tuple(sorted(g)) for g in groups] sampled", "except: return link if target_url.is_absolute() and target_url.scheme: return link if", "def download(url): splash_url = webstruct_demo.config.get('SPLASH_URL', None) splash_user = webstruct_demo.config.get('SPLASH_USER', None)", "'base_url': url} response = requests.post(splash_url + '/render.html', json=load, auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass))", "def remove_namespace(tree): _NS=\"{http://www.w3.org/1999/xhtml}\" for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):", "base_url = yarl.URL(base_url) for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):", "model): html_tokens, _ = model.html_tokenizer.tokenize_single(tree) if not html_tokens: return tree,", "sample_entities(entities): unique = list(set(entities)) random.shuffle(unique) sampled = unique[:5] sampled =", "lxml.html import html5parser import lxml.html import requests import yarl import", "import joblib from lxml.html import html5parser import lxml.html import requests", "= webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags)) entities = webstruct.model._drop_empty( (model.build_entity(tokens), tag) for (tokens," ]
[ "__author__ = '<NAME>' from setuptools import setup setup( name=\"einops\", version='0.3.2',", "deep learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'],", "keywords='deep learning, neural networks, tensor manipulation, machine learning, ' 'scientific", "name=\"einops\", version='0.3.2', description=\"A new flavour of deep learning operations\", long_description=open('README.md',", ":: Science/Research', 'Programming Language :: Python :: 3 ', ],", "manipulation, machine learning, ' 'scientific computations, einops', install_requires=[ # no", "learning, neural networks, tensor manipulation, machine learning, ' 'scientific computations,", "packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience :: Science/Research', 'Programming Language ::", "classifiers=[ 'Intended Audience :: Science/Research', 'Programming Language :: Python ::", "encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience ::", "setuptools import setup setup( name=\"einops\", version='0.3.2', description=\"A new flavour of", ":: Python :: 3 ', ], keywords='deep learning, neural networks,", "new flavour of deep learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops',", "import setup setup( name=\"einops\", version='0.3.2', description=\"A new flavour of deep", "operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended", "machine learning, ' 'scientific computations, einops', install_requires=[ # no run-time", "long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience :: Science/Research',", "version='0.3.2', description=\"A new flavour of deep learning operations\", long_description=open('README.md', encoding='utf-8').read(),", "'Programming Language :: Python :: 3 ', ], keywords='deep learning,", "' 'scientific computations, einops', install_requires=[ # no run-time or installation-time", "neural networks, tensor manipulation, machine learning, ' 'scientific computations, einops',", "computations, einops', install_requires=[ # no run-time or installation-time dependencies ],", "from setuptools import setup setup( name=\"einops\", version='0.3.2', description=\"A new flavour", "description=\"A new flavour of deep learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown',", "<gh_stars>1000+ __author__ = '<NAME>' from setuptools import setup setup( name=\"einops\",", "url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience :: Science/Research', 'Programming", "of deep learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops',", "', ], keywords='deep learning, neural networks, tensor manipulation, machine learning,", "'Intended Audience :: Science/Research', 'Programming Language :: Python :: 3", "'einops.layers'], classifiers=[ 'Intended Audience :: Science/Research', 'Programming Language :: Python", "setup setup( name=\"einops\", version='0.3.2', description=\"A new flavour of deep learning", "flavour of deep learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>',", "Python :: 3 ', ], keywords='deep learning, neural networks, tensor", "Language :: Python :: 3 ', ], keywords='deep learning, neural", "networks, tensor manipulation, machine learning, ' 'scientific computations, einops', install_requires=[", "einops', install_requires=[ # no run-time or installation-time dependencies ], )", "tensor manipulation, machine learning, ' 'scientific computations, einops', install_requires=[ #", "author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience :: Science/Research', 'Programming Language", "setup( name=\"einops\", version='0.3.2', description=\"A new flavour of deep learning operations\",", ":: 3 ', ], keywords='deep learning, neural networks, tensor manipulation,", "= '<NAME>' from setuptools import setup setup( name=\"einops\", version='0.3.2', description=\"A", "Science/Research', 'Programming Language :: Python :: 3 ', ], keywords='deep", "], keywords='deep learning, neural networks, tensor manipulation, machine learning, '", "3 ', ], keywords='deep learning, neural networks, tensor manipulation, machine", "Audience :: Science/Research', 'Programming Language :: Python :: 3 ',", "learning operations\", long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[", "'scientific computations, einops', install_requires=[ # no run-time or installation-time dependencies", "learning, ' 'scientific computations, einops', install_requires=[ # no run-time or", "'<NAME>' from setuptools import setup setup( name=\"einops\", version='0.3.2', description=\"A new", "long_description=open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', url='https://github.com/arogozhnikov/einops', author='<NAME>', packages=['einops', 'einops.layers'], classifiers=[ 'Intended Audience" ]
[ "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('website',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('website', '0083_remove_aisubmission_code'), ] operations", "], ), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='website.exampair'),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "= [ ('website', '0083_remove_aisubmission_code'), ] operations = [ migrations.AddField( model_name='exam',", "2021-02-15 19:01 from django.db import migrations, models import django.db.models.deletion class", "dependencies = [ ('website', '0083_remove_aisubmission_code'), ] operations = [ migrations.AddField(", "migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[ ('id',", "models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ), migrations.AddField( model_name='exam',", "Migration(migrations.Migration): dependencies = [ ('website', '0083_remove_aisubmission_code'), ] operations = [", "verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ),", "class Migration(migrations.Migration): dependencies = [ ('website', '0083_remove_aisubmission_code'), ] operations =", "field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('website', '0083_remove_aisubmission_code'), ]", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True,", "to='website.contest')), ], ), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams',", "on 2021-02-15 19:01 from django.db import migrations, models import django.db.models.deletion", "name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "related_name='exampairs', to='website.contest')), ], ), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "by Django 3.1.6 on 2021-02-15 19:01 from django.db import migrations,", "[ ('website', '0083_remove_aisubmission_code'), ] operations = [ migrations.AddField( model_name='exam', name='division',", "unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ), migrations.AddField( model_name='exam', name='exampair',", "'0083_remove_aisubmission_code'), ] operations = [ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False,", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ],", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest',", "('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ), migrations.AddField(", "name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)),", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')),", "('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')), ], ), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True,", "Generated by Django 3.1.6 on 2021-02-15 19:01 from django.db import", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "operations = [ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel(", "), migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='website.exampair'), ),", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "] operations = [ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ),", "= [ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair',", "('website', '0083_remove_aisubmission_code'), ] operations = [ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1),", "migrations.AddField( model_name='exam', name='exampair', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='website.exampair'), ), ]", "# Generated by Django 3.1.6 on 2021-02-15 19:01 from django.db", "[ migrations.AddField( model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[", "), migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs',", "3.1.6 on 2021-02-15 19:01 from django.db import migrations, models import", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('website', '0083_remove_aisubmission_code'),", "migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100,", "preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "model_name='exam', name='division', field=models.IntegerField(default=1), preserve_default=False, ), migrations.CreateModel( name='ExamPair', fields=[ ('id', models.AutoField(auto_created=True,", "Django 3.1.6 on 2021-02-15 19:01 from django.db import migrations, models", "19:01 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):" ]
[ "\"\"\"Measure whether the given probe is axis aligned. Args: probe", "sample.relations labels = torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head,", "computed from the list of samples. \"\"\" if dist is", "from ldp.parse import ptb from ldp.parse import representations as reps", "rels = self.indexer(annotations) # Find all pairs of words sharing", "pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution to use when", "ntags = train_dataset.count_unique_features() assert ntags is not None, 'no label", "(Sequence[ptb.Samples]): The samples from which to pull possible word pairs.", "to determine possible relations. unk (str): Label to use when", "tags per word type. By default, is computed from the", "each dependency label to an index. The kwargs are forwarded", "a probe on dependency label prediction. Args: train_dataset (TaskDataset): Training", "probes with the same projection. E.g. if the probe is", "(Px)A(Qy) for distinct projections P, Q. Defaults to NOT shared.", "= dep words = (sample.sentence[dep], sample.sentence[head]) labels[dep, head] = self.rels.get(words,", "many epochs, then stop training. Defaults to 4. lr (float,", "(v, w) is the index of the label describing the", "do this for pairs of words which have a head-dependent", "everything before returning it. bigrams = torch.stack([ torch.stack((representations[i], representations[j])) for", "rels[i, j]] assert pairs and len(pairs) == len(representations), 'missing edges?'", "passes through the training dataset. Defaults to 25. patience (int,", "labeling task has %d tags', ntags) if project_to is None", "scalars, there is no such shape! \"\"\" return () def", "len(annotations): raise ValueError(f'got {len(representations)} representations ' f'but {len(annotations)} annotations') self.representations", "range(len(self)): yield self[index] def __len__(self) -> int: \"\"\"Return the number", "to the \"unk\" label, even if there is no relationship", "has %d tags', ntags) if project_to is None or ndims", "of the label describing the relationship between word v and", "DLPIndexer: \"\"\"Map pairs of words to their syntactic relationship, if", "dependency label to an index. The kwargs are forwarded to", "%d, test accuracy %f', best_axis, accuracy) if also_log_to_wandb: wandb.log({ 'axis':", "best_accuracy = probe, -1, -1. for axis in axes: model", "(W, W) matrix where element (v, w) is the index", "is axis aligned. Args: probe (Probe): The probe to evaluate.", "= learning.test(model, dev_dataset, device=device) if accuracy > best_accuracy: best_model =", "projections from ldp.parse import ptb from ldp.parse import representations as", "(sentence_length, representation_dimension) containing word representations, and second is shape (sentence_length,)", "The probe to evaluate. dev_dataset (datasets.TaskDataset): Data used to determine", "= logging.getLogger(__name__) projection = probe.project assert projection is not None,", "from the list of samples. \"\"\" if dist is None:", "paired with optimal probe accuracy after that axis is zeroed.", "relationship, if any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], unk: str =", "len( annotations.sentence), 'diff sentence lengths?' rels = self.indexer(annotations) # Find", "to use when sampling tags per word type. By default,", "self.representations.dataset.length(index) for index in range(len(self.representations))) def count_unique_features(self) -> int: \"\"\"Return", "with the same projection. E.g. if the probe is bilinear", "Type[Probe] = probes.Linear, project_to: Optional[int] = None, share_projection: bool =", "test accuracy %f', best_axis, accuracy) if also_log_to_wandb: wandb.log({ 'axis': best_axis,", "pairwise probes with the same projection. E.g. if the probe", "= 1e-3, device: Optional[Device] = None, also_log_to_wandb: bool = False)", "Args: train_dataset (TaskDataset): Training data for probe. dev_dataset (TaskDataset): Validation", "First tensor is shape (sentence_length, representation_dimension) containing word representations, and", "in the dataset.\"\"\" return len(self.annotations) @property def sample_representations_shape(self) -> Sequence[int]:", "probe_t: Type[Probe] = probes.Linear, project_to: Optional[int] = None, share_projection: bool", "datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear, project_to: Optional[int] = None, share_projection:", "word, (head, rel) in enumerate(zip(heads, relations)): if head == -1:", "Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t:", "relations = sample.heads, sample.relations labels = torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk])", "if len(representations) != len(annotations): raise ValueError(f'got {len(representations)} representations ' f'but", "4. lr (float, optional): Learning rate for optimizer. Defaults to", "tags', ntags) if project_to is None or ndims == project_to:", "have a head-dependent relationship in the original dataset. Args: samples", "[(i, j) for i in indexes for j in indexes", "def __len__(self) -> int: \"\"\"Return the number of sentences (batches)", "of unique labels for this task.\"\"\" return len(self.indexer) class ControlDLPIndexer:", "import Device import numpy import torch import wandb UNK =", "returns shape (W, W) matrix where element (v, w) is", "index in range(len(self.representations))) def count_unique_features(self) -> int: \"\"\"Return number of", "1 self.rels[words] = rel def __call__(self, sample: ptb.Sample) -> torch.Tensor:", "trained probe and its test accuracy. \"\"\" log = logging.getLogger(__name__)", "kwargs are forwarded to indexer when it is instantiated. Args:", "to 1e-3. device (Optional[Device], optional): Torch device on which to", "i, j in pairs ]) labels = torch.stack([rels[i, j] for", "= (sample.sentence[dep], sample.sentence[head]) labels[dep, head] = self.rels.get(words, 0) return labels", "set, log training data to wandb. By default, wandb is", "import logging from typing import (Any, Dict, Iterator, Optional, Sequence,", "__len__(self) -> int: \"\"\"Return the number of unique labels for", "pairs of words which have a head-dependent relationship in the", "sorted(ablated | {axis})] = 0 accuracy = learning.test(model, dev_dataset, device=device)", "on dependency label prediction. Args: train_dataset (TaskDataset): Training data for", "in pairs ]) labels = torch.stack([rels[i, j] for i, j", "POS tags. \"\"\" representations = self.representations[index] annotations = self.annotations[index] assert", "len(representations) != len(annotations): raise ValueError(f'got {len(representations)} representations ' f'but {len(annotations)}", "# Find all pairs of words sharing an edge. indexes", "probe is axis aligned. Args: probe (Probe): The probe to", "unique POS seen in data.\"\"\" return len(self.indexer) # Define the", "'unk' class DLPIndexer: \"\"\"Map pairs of words to their syntactic", "Tuple[Probe, float]: The trained probe and its test accuracy. \"\"\"", "tags are integral scalars, there is no such shape! \"\"\"", "Args: samples (Sequence[ptb.Samples]): The samples from which to pull possible", "for this many epochs, then stop training. Defaults to 4.", "range(len(self.representations))) def count_unique_features(self) -> int: \"\"\"Return number of unique POS", "type to train. Defaults to probes.Linear. project_to (Optional[int], optional): Project", "the probe is bilinear of the form xAy, we will", "== len( annotations.sentence), 'diff sentence lengths?' rels = self.indexer(annotations) #", "i in indexes for j in indexes if rels[i, j]]", "POS. def axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device:", "sample.relations} self.indexer = {unk: 0} for label in sorted(labels): self.indexer[label]", "import representations as reps from ldp.utils.typing import Device import numpy", "encountered. \"\"\" labels = {rel for sample in samples for", "\"\"\"Return the shape of each individual POS tag. Since POS", "representations self.annotations = annotations kwargs = kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer", "samples from which to pull possible word pairs. dist (Optional[Union[numpy.ndarray,", "representations (representations.RepresentationsLayerDataset): Word representations corresponding to the words to be", "ntags is not None, 'no label count, is dataset for", "dimensionality of the representation pairs.\"\"\" return (2, self.representations.dataset.dimension) @property def", "the sentence in the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor", "return sum( self.representations.dataset.length(index) for index in range(len(self.representations))) def count_unique_features(self) ->", "samples (Sequence[ptb.Samples]): The samples from which to pull possible word", "head == -1: head = dep words = (sentence[dep], sentence[head])", "relation label to an arbitrary (integer) label. We only do", "str], int] = {} for sample in samples: sentence =", "TODO(evandez): May as well commonize this, since it's shared with", "= train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d', ndims) ntags = train_dataset.count_unique_features()", "return () def count_samples(self) -> int: \"\"\"Return the number of", "== -1: labels[word, word] = self.indexer[rel] else: label = self.indexer.get(rel,", "(bool, optional): If set, log results to wandb. Returns: Sequence[Tuple[int,", "and w, if any. Defaults to the \"unk\" label, even", "determine the effect of cutting an axis. device (Optional[Device], optional):", "original dataset. Args: samples (Sequence[ptb.Samples]): The samples from which to", "Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If set, log training", "labels = torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head, rel)", "projection = projections.Projection(ndims, project_to) else: projection = projections.Projection(2 * ndims,", "also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy,", "projecting') projection = None elif share_projection: projection = projections.Projection(ndims, project_to)", "an integer. Args: samples (Sequence[ptb.Sample]): The samples from which to", "optional): Maximum passes through the training dataset. Defaults to 25.", "ndims), ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device,", "logging from typing import (Any, Dict, Iterator, Optional, Sequence, Set,", "dependency label annotations to integer tensors. Instantiated with given annotations", "self.indexer[rel] else: label = self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] = label", "int = 4, lr: float = 1e-3, device: Optional[Device] =", "# Add one so that 0 is reserved for \"no", "in range(len(self)): yield self[index] def __len__(self) -> int: \"\"\"Return the", "project_to: Optional[int] = None, share_projection: bool = False, epochs: int", "import copy import logging from typing import (Any, Dict, Iterator,", "this task. Probe = Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset:", "j]] assert pairs and len(pairs) == len(representations), 'missing edges?' #", "to an arbitrary (integer) label. We only do this for", "including the null one.\"\"\" return len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset):", "accuracy %f', best_axis, accuracy) if also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev", "indexes if rels[i, j]] assert pairs and len(pairs) == len(representations),", "distribution to use when sampling tags per word type. By", "annotations) self.indexer = indexer(**kwargs) def __getitem__(self, index: int) -> Tuple[torch.Tensor,", "to use when un-indexed dependency label is encountered. \"\"\" labels", "labels. Args: sample (ptb.Sample): The sample to label. Returns: torch.Tensor:", "the valid probe types for this task. Probe = Union[probes.Linear,", "torch.Tensor]]: \"\"\"Yield all (sentence representations, sentence POS tags) samples.\"\"\" for", "shape of each individual POS tag. Since POS tags are", "projection?' axes = set(range(projection.project.in_features)) ablated: Set[int] = set() accuracies =", "bigrams, labels def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence", "__len__(self) -> int: \"\"\"Return the number of relationships, including the", "when it is instantiated. Args: representations (representations.RepresentationsLayerDataset): Word representations corresponding", "= representations self.annotations = annotations kwargs = kwargs.copy() kwargs.setdefault('samples', annotations)", "projection?' model.project.project.weight.data[:, sorted(ablated | {axis})] = 0 accuracy = learning.test(model,", "given annotations unless the samples keyword is set in kwargs.", "heads, relations = sample.heads, sample.relations labels = torch.empty(len(heads), len(heads), dtype=torch.long)", "assert ntags is not None, 'no label count, is dataset", "there is no relationship between v and w. \"\"\" heads,", "Learning rate for optimizer. Defaults to 1e-3. device (Optional[Device], optional):", "length W sentence, returns shape (W, W) matrix where element", "keyword is set in kwargs. Raises: ValueError: If number of", "probe. Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If set, log", "= probe.project assert projection is not None, 'no projection?' axes", "each relation label to an integer. Args: samples (Sequence[ptb.Sample]): The", "numpy.array([float(count) for count in counts.values()]) dist /= numpy.sum(dist) assert dist", "between v and w. \"\"\" heads = sample.heads labels =", "{} for sample in samples: sentence = sample.sentence heads =", "v and w. \"\"\" heads = sample.heads labels = torch.zeros(len(heads),", "number of sentences (batches) in the dataset.\"\"\" return len(self.annotations) @property", "bool = False, epochs: int = 25, patience: int =", "project_to (Optional[int], optional): Project representations to this dimensionality. Defaults to", "annotations kwargs = kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs) def", "False, epochs: int = 25, patience: int = 4, lr:", "the index of the label describing the relationship between word", "Args: probe (Probe): The probe to evaluate. dev_dataset (datasets.TaskDataset): Data", "DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation pair, dependency label) pairs.\"\"\" def", "a head-dependent relationship in the original dataset. Args: samples (Sequence[ptb.Samples]):", "null one.\"\"\" return len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over", "+ 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation pair, dependency", "and its test accuracy. \"\"\" log = logging.getLogger(__name__) device =", "learning.test(model, dev_dataset, device=device) if accuracy > best_accuracy: best_model = model", "May as well commonize this, since it's shared with POS.", "def count_unique_features(self) -> int: \"\"\"Return number of unique POS seen", "ndims, 2 * project_to) probe = probe_t(2 * (project_to or", "return len(self.annotations) @property def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the dimensionality", "Word representations corresponding to the words to be paired and", "j] for i, j in pairs]) return bigrams, labels def", "to an integer. Args: samples (Sequence[ptb.Sample]): The samples from which", "describing the relationship between word v and w, if any.", "= (sentence[dep], sentence[head]) if words not in self.rels: # Add", "'uninitialized distribution?' self.dist = dist self.rels: Dict[Tuple[str, str], int] =", "@property def sample_features_shape(self) -> Sequence[int]: \"\"\"Return the shape of each", "probe and its test accuracy. \"\"\" log = logging.getLogger(__name__) device", "): \"\"\"Initialize dataset by mapping each dependency label to an", "ControlDLPIndexer: \"\"\"Map pairs of words to arbitrary syntactic relationships.\"\"\" def", "in sorted(labels): self.indexer[label] = len(self.indexer) self.unk = unk def __call__(self,", "reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs: Any,", "to wandb. Returns: Sequence[Tuple[int, float]]: The ablated axes paired with", "= rel def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all", "early stopping. test_dataset (TaskDataset): Test data for probe, used to", "Torch device on which to train probe. Defaults to CPU.", "optional): Torch device on which to train probe. Defaults to", "def __init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map", "is set in kwargs. Raises: ValueError: If number of representations/annotations", "words not in self.rels: # Add one so that 0", "dep words = (sample.sentence[dep], sample.sentence[head]) labels[dep, head] = self.rels.get(words, 0)", "Validation data for probe, used for early stopping. test_dataset (TaskDataset):", "pair, dependency label) pairs.\"\"\" def __init__( self, representations: reps.RepresentationLayerDataset, annotations:", "in the dataset.\"\"\" return sum( self.representations.dataset.length(index) for index in range(len(self.representations)))", "there is no such shape! \"\"\" return () def count_samples(self)", "dataset. Args: samples (Sequence[ptb.Samples]): The samples from which to pull", "__len__(self) -> int: \"\"\"Return the number of sentences (batches) in", "sentences (batches) in the dataset.\"\"\" return len(self.annotations) @property def sample_representations_shape(self)", "to train. Defaults to probes.Linear. project_to (Optional[int], optional): Project representations", "best_model = model best_axis = axis best_accuracy = accuracy accuracy", "j in pairs]) return bigrams, labels def __iter__(self) -> Iterator[Tuple[torch.Tensor,", "train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe,", "dev loss to not improve for this many epochs, then", "Sequence[float]]] = None): \"\"\"Map each relation label to an arbitrary", "If number of representations/annotations do not match. \"\"\" if len(representations)", "= self.indexer(annotations) # Find all pairs of words sharing an", "lengths?' rels = self.indexer(annotations) # Find all pairs of words", "share_projection: bool = False, epochs: int = 25, patience: int", "train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d', ndims) ntags = train_dataset.count_unique_features() assert", "aligned. Args: probe (Probe): The probe to evaluate. dev_dataset (datasets.TaskDataset):", "axes to cut. test_dataset (datasets.TaskDataset): Data used to determine the", "probe, used to compute final accuracy after training. probe_t (Type[Probe],", "sample.heads, sample.relations labels = torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word,", "patience: int = 4, lr: float = 1e-3, device: Optional[Device]", "best_axis, accuracy) if also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev accuracy': best_accuracy,", "to cut. test_dataset (datasets.TaskDataset): Data used to determine the effect", "opposed to (Px)A(Qy) for distinct projections P, Q. Defaults to", "float]: The trained probe and its test accuracy. \"\"\" log", "Sequence[ptb.Sample], unk: str = UNK): \"\"\"Map each relation label to", "Defaults to 4. lr (float, optional): Learning rate for optimizer.", "def axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device]", "]) labels = torch.stack([rels[i, j] for i, j in pairs])", "count_samples(self) -> int: \"\"\"Return the number of words in the", "ndims == project_to: logging.info('projection dim = reps dim, not projecting')", "learning from ldp.models import probes, projections from ldp.parse import ptb", "number of representations/annotations do not match. \"\"\" if len(representations) !=", "annotations (Sequence[ptb.PTBSample]): The PTB annotations from which to pull dependency", "seen in data.\"\"\" return len(self.indexer) # Define the valid probe", "accuracies = [] while axes: best_model, best_axis, best_accuracy = probe,", "torch.Tensor: For length W sentence, returns shape (W, W) matrix", "Union) from ldp import datasets, learning from ldp.models import probes,", "and w. \"\"\" heads = sample.heads labels = torch.zeros(len(heads), len(heads),", "not None, 'no label count, is dataset for different task?'", "test_dataset, device=device) log.info('ablating axis %d, test accuracy %f', best_axis, accuracy)", "of words to their syntactic relationship, if any.\"\"\" def __init__(self,", "integer tensors. Instantiated with given annotations unless the samples keyword", "for label in sorted(labels): self.indexer[label] = len(self.indexer) self.unk = unk", "sample.sentence[head]) labels[dep, head] = self.rels.get(words, 0) return labels def __len__(self)", "(word, word) pairs to labels. Args: sample (ptb.Sample): The sample", "sentence[head]) if words not in self.rels: # Add one so", "representation pair, dependency label) pairs.\"\"\" def __init__( self, representations: reps.RepresentationLayerDataset,", "set, project the left and right components of pairwise probes", "probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe]", "for sample in samples: sentence = sample.sentence heads = sample.heads", "numpy import torch import wandb UNK = 'unk' class DLPIndexer:", "for index'th sentence. Args: index (int): Index of the sentence", "to labels. Args: sample (ptb.Sample): The sample to label. Returns:", "also_log_to_wandb: bool = False) -> Tuple[Probe, float]: \"\"\"Train a probe", "not used. Returns: Tuple[Probe, float]: The trained probe and its", "use when un-indexed dependency label is encountered. \"\"\" labels =", "self.annotations = annotations kwargs = kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer =", "head] = label return labels def __len__(self) -> int: \"\"\"Return", "def __init__( self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]]", "pairs = [(i, j) for i in indexes for j", "paired and labeled. annotations (Sequence[ptb.PTBSample]): The PTB annotations from which", "By default, wandb is not used. Returns: Tuple[Probe, float]: The", "to be paired and labeled. annotations (Sequence[ptb.PTBSample]): The PTB annotations", "= {rel for sample in samples for rel in sample.relations}", "0 is reserved for \"no relationship\" tag. rel = numpy.random.choice(len(dist),", "v and w. \"\"\" heads, relations = sample.heads, sample.relations labels", "to CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If set, log training data", "an edge. indexes = set(range(len(representations))) pairs = [(i, j) for", "Set[int] = set() accuracies = [] while axes: best_model, best_axis,", "is dataset for different task?' log.info('dependency labeling task has %d", "before returning it. bigrams = torch.stack([ torch.stack((representations[i], representations[j])) for i,", "logging.getLogger(__name__) device = device or 'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations", "data.\"\"\" return len(self.indexer) # Define the valid probe types for", "\"\"\"Map pairs of words to arbitrary syntactic relationships.\"\"\" def __init__(self,", "representations, sentence POS tags) samples.\"\"\" for index in range(len(self)): yield", "final accuracy after training. probe_t (Type[Probe], optional): Probe type to", "since it's shared with POS. def axis_alignment( probe: Probe, dev_dataset:", "counts.values()]) dist /= numpy.sum(dist) assert dist is not None, 'uninitialized", "device or 'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d',", "log results to wandb. Returns: Sequence[Tuple[int, float]]: The ablated axes", "that 0 is reserved for \"no relationship\" tag. rel =", "words sharing an edge. indexes = set(range(len(representations))) pairs = [(i,", "is shape (sentence_length, representation_dimension) containing word representations, and second is", "ldp.parse import representations as reps from ldp.utils.typing import Device import", "to this dimensionality. Defaults to no projection. share_projection (bool): If", "all pairs of words sharing an edge. indexes = set(range(len(representations)))", "the number of words in the dataset.\"\"\" return sum( self.representations.dataset.length(index)", "in samples: sentence = sample.sentence heads = sample.heads for dep,", "kwargs. Raises: ValueError: If number of representations/annotations do not match.", "-1: labels[word, word] = self.indexer[rel] else: label = self.indexer.get(rel, self.indexer[self.unk])", "sentence. Args: index (int): Index of the sentence in the", "\"\"\"Core experiments for the dependency label prediction task.\"\"\" import collections", "in range(len(self.representations))) def count_unique_features(self) -> int: \"\"\"Return number of unique", "Dict[str, int] = collections.defaultdict(lambda: 0) for sample in samples: for", "index of the label describing the relationship between word v", "\"\"\"Return the number of words in the dataset.\"\"\" return sum(", "use for mapping PTB dependency label annotations to integer tensors.", "if accuracy > best_accuracy: best_model = model best_axis = axis", "axis best_accuracy = accuracy accuracy = learning.test(best_model, test_dataset, device=device) log.info('ablating", "the given probe is axis aligned. Args: probe (Probe): The", "Test data for probe, used to compute final accuracy after", "wandb. By default, wandb is not used. Returns: Tuple[Probe, float]:", "self.indexer = {unk: 0} for label in sorted(labels): self.indexer[label] =", "of words which have a head-dependent relationship in the original", "word) pairs to labels. Args: sample (ptb.Sample): The sample to", "dataset. Defaults to 25. patience (int, optional): Allow dev loss", "for word, (head, rel) in enumerate(zip(heads, relations)): if head ==", "it. bigrams = torch.stack([ torch.stack((representations[i], representations[j])) for i, j in", "CPU. also_log_to_wandb (bool, optional): If set, log results to wandb.", "in sample.relations: counts[relation] += 1 dist = numpy.array([float(count) for count", "datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear, project_to: Optional[int] =", "dim = reps dim, not projecting') projection = None elif", "axes paired with optimal probe accuracy after that axis is", "raise ValueError(f'got {len(representations)} representations ' f'but {len(annotations)} annotations') self.representations =", "(datasets.TaskDataset): Data used to determine the effect of cutting an", "task. Probe = Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset,", "sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the dimensionality of the representation pairs.\"\"\"", "1 dist = numpy.array([float(count) for count in counts.values()]) dist /=", "the dependency label prediction task.\"\"\" import collections import copy import", "for sample in samples for rel in sample.relations} self.indexer =", "%f', best_axis, accuracy) if also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev accuracy':", "[] while axes: best_model, best_axis, best_accuracy = probe, -1, -1.", "1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation pair, dependency label)", "1e-3. device (Optional[Device], optional): Torch device on which to train", "(datasets.TaskDataset): Data used to determine which axes to cut. test_dataset", "sample.relations: counts[relation] += 1 dist = numpy.array([float(count) for count in", "from which to determine possible relations. unk (str): Label to", "for rel in sample.relations} self.indexer = {unk: 0} for label", "in indexes for j in indexes if rels[i, j]] assert", "\"\"\" if dist is None: counts: Dict[str, int] = collections.defaultdict(lambda:", "len(representations) == len( annotations.sentence), 'diff sentence lengths?' rels = self.indexer(annotations)", "torch.Tensor: \"\"\"Map all possible (word, word) pairs to labels. Args:", "training. probe_t (Type[Probe], optional): Probe type to train. Defaults to", "Defaults to CPU. also_log_to_wandb (bool, optional): If set, log results", "for probe, used to compute final accuracy after training. probe_t", "If set, project the left and right components of pairwise", "in the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor is shape", "head] = self.rels.get(words, 0) return labels def __len__(self) -> int:", "(float, optional): Learning rate for optimizer. Defaults to 1e-3. device", "sample.heads for dep, head in enumerate(heads): if head == -1:", "ntags) if project_to is None or ndims == project_to: logging.info('projection", "sentence POS tags) samples.\"\"\" for index in range(len(self)): yield self[index]", "will always compute (Px)A(Py) as opposed to (Px)A(Qy) for distinct", "integer. Args: samples (Sequence[ptb.Sample]): The samples from which to determine", "as opposed to (Px)A(Qy) for distinct projections P, Q. Defaults", "data to wandb. By default, wandb is not used. Returns:", "\"\"\"Return the number of sentences (batches) in the dataset.\"\"\" return", "self.annotations[index] assert len(representations) == len( annotations.sentence), 'diff sentence lengths?' rels", "to arbitrary syntactic relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray,", "= kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs) def __getitem__(self, index:", "relationship in the original dataset. Args: samples (Sequence[ptb.Samples]): The samples", "PTB dependency label annotations to integer tensors. Instantiated with given", "containing word representations, and second is shape (sentence_length,) containing integral", "Sequence[int]: \"\"\"Return the shape of each individual POS tag. Since", "(representations, integral POS tags) for index'th sentence. Args: index (int):", "Defaults to no projection. share_projection (bool): If set, project the", "training data to wandb. By default, wandb is not used.", "= 4, lr: float = 1e-3, device: Optional[Device] = None,", "= numpy.random.choice(len(dist), p=dist) + 1 self.rels[words] = rel def __call__(self,", "'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d', ndims) ntags", "to their syntactic relationship, if any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample],", "The sample to label. Returns: torch.Tensor: For length W sentence,", "Dict[Tuple[str, str], int] = {} for sample in samples: sentence", "it's shared with POS. def axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset,", "is no relationship between v and w. \"\"\" heads =", "pairs.\"\"\" return (2, self.representations.dataset.dimension) @property def sample_features_shape(self) -> Sequence[int]: \"\"\"Return", "'diff sentence lengths?' rels = self.indexer(annotations) # Find all pairs", "datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear, project_to:", "j) for i in indexes for j in indexes if", "determine possible relations. unk (str): Label to use when un-indexed", "best_accuracy = accuracy accuracy = learning.test(best_model, test_dataset, device=device) log.info('ablating axis", "len(self.indexer) self.unk = unk def __call__(self, sample: ptb.Sample) -> torch.Tensor:", "= False, epochs: int = 25, patience: int = 4,", "sample in samples: for relation in sample.relations: counts[relation] += 1", "representations[j])) for i, j in pairs ]) labels = torch.stack([rels[i,", "W) matrix where element (v, w) is the index of", "match. \"\"\" if len(representations) != len(annotations): raise ValueError(f'got {len(representations)} representations", "head == -1: labels[word, word] = self.indexer[rel] else: label =", "to train probe. Defaults to CPU. also_log_to_wandb (bool, optional): If", "for i, j in pairs]) return bigrams, labels def __iter__(self)", "in counts.values()]) dist /= numpy.sum(dist) assert dist is not None,", "sampling tags per word type. By default, is computed from", "labels def __len__(self) -> int: \"\"\"Return the number of unique", "word] = self.indexer[rel] else: label = self.indexer.get(rel, self.indexer[self.unk]) labels[word, head]", "any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK): \"\"\"Map", "labels = {rel for sample in samples for rel in", "head = dep words = (sentence[dep], sentence[head]) if words not", "= self.representations[index] annotations = self.annotations[index] assert len(representations) == len( annotations.sentence),", "of the form xAy, we will always compute (Px)A(Py) as", "is not None, 'no label count, is dataset for different", "when un-indexed dependency label is encountered. \"\"\" labels = {rel", "word v and w, if any. Defaults to the \"unk\"", "indexes for j in indexes if rels[i, j]] assert pairs", "axes = set(range(projection.project.in_features)) ablated: Set[int] = set() accuracies = []", "= dist self.rels: Dict[Tuple[str, str], int] = {} for sample", "dependency label) pairs.\"\"\" def __init__( self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample],", "relationship\" tag. rel = numpy.random.choice(len(dist), p=dist) + 1 self.rels[words] =", "UNK): \"\"\"Map each relation label to an integer. Args: samples", "relationship between v and w. \"\"\" heads, relations = sample.heads,", "and labeled. annotations (Sequence[ptb.PTBSample]): The PTB annotations from which to", "Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map each relation label", "Args: representations (representations.RepresentationsLayerDataset): Word representations corresponding to the words to", "distribution?' self.dist = dist self.rels: Dict[Tuple[str, str], int] = {}", "W sentence, returns shape (W, W) matrix where element (v,", "(Optional[int], optional): Project representations to this dimensionality. Defaults to no", "if any. Defaults to the \"unk\" label, even if there", "-> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral POS tags) for index'th", "is not None, 'no projection?' axes = set(range(projection.project.in_features)) ablated: Set[int]", "E.g. if the probe is bilinear of the form xAy,", "= UNK): \"\"\"Map each relation label to an integer. Args:", "head = dep words = (sample.sentence[dep], sample.sentence[head]) labels[dep, head] =", "word representations, and second is shape (sentence_length,) containing integral POS", "collections.defaultdict(lambda: 0) for sample in samples: for relation in sample.relations:", "improve for this many epochs, then stop training. Defaults to", "set, log results to wandb. Returns: Sequence[Tuple[int, float]]: The ablated", "whether the given probe is axis aligned. Args: probe (Probe):", "evaluate. dev_dataset (datasets.TaskDataset): Data used to determine which axes to", "= None, share_projection: bool = False, epochs: int = 25,", "its test accuracy. \"\"\" log = logging.getLogger(__name__) device = device", "int] = collections.defaultdict(lambda: 0) for sample in samples: for relation", "all (sentence representations, sentence POS tags) samples.\"\"\" for index in", "for i, j in pairs ]) labels = torch.stack([rels[i, j]", "is not used. Returns: Tuple[Probe, float]: The trained probe and", "axes: model = copy.deepcopy(best_model).eval() assert model.project is not None, 'no", "Device import numpy import torch import wandb UNK = 'unk'", "train. Defaults to probes.Linear. project_to (Optional[int], optional): Project representations to", "instantiated. Args: representations (representations.RepresentationsLayerDataset): Word representations corresponding to the words", "(str): Label to use when un-indexed dependency label is encountered.", "ptb from ldp.parse import representations as reps from ldp.utils.typing import", "sample.heads labels = torch.zeros(len(heads), len(heads), dtype=torch.long) for dep, head in", "= numpy.array([float(count) for count in counts.values()]) dist /= numpy.sum(dist) assert", "kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs) def __getitem__(self, index: int)", "= probe, -1, -1. for axis in axes: model =", "share_projection: projection = projections.Projection(ndims, project_to) else: projection = projections.Projection(2 *", "ldp.models import probes, projections from ldp.parse import ptb from ldp.parse", "assert pairs and len(pairs) == len(representations), 'missing edges?' # Stack", "the left and right components of pairwise probes with the", "no such shape! \"\"\" return () def count_samples(self) -> int:", "Project representations to this dimensionality. Defaults to no projection. share_projection", "%d', ndims) ntags = train_dataset.count_unique_features() assert ntags is not None,", "numpy.random.choice(len(dist), p=dist) + 1 self.rels[words] = rel def __call__(self, sample:", "integral scalars, there is no such shape! \"\"\" return ()", "over (word representation pair, dependency label) pairs.\"\"\" def __init__( self,", "task?' log.info('dependency labeling task has %d tags', ntags) if project_to", "for mapping PTB dependency label annotations to integer tensors. Instantiated", "(integer) label. We only do this for pairs of words", "0) return labels def __len__(self) -> int: \"\"\"Return the number", "= probe_t(2 * (project_to or ndims), ntags, project=projection) learning.train(probe, train_dataset,", "which axes to cut. test_dataset (datasets.TaskDataset): Data used to determine", "compute (Px)A(Py) as opposed to (Px)A(Qy) for distinct projections P,", "-1: head = dep words = (sample.sentence[dep], sample.sentence[head]) labels[dep, head]", "sample (ptb.Sample): The sample to label. Returns: torch.Tensor: For length", "label) pairs.\"\"\" def __init__( self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer:", "probe = probe_t(2 * (project_to or ndims), ntags, project=projection) learning.train(probe,", "= [] while axes: best_model, best_axis, best_accuracy = probe, -1,", "len(representations), 'missing edges?' # Stack everything before returning it. bigrams", "of relationships, including the null one.\"\"\" return len(self.dist) + 1", "\"\"\"Map all possible (word, word) pairs to labels. Args: sample", "The ablated axes paired with optimal probe accuracy after that", "per word type. By default, is computed from the list", "Probe = Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset:", "not in self.rels: # Add one so that 0 is", "dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor is shape (sentence_length, representation_dimension)", "-1: head = dep words = (sentence[dep], sentence[head]) if words", "from ldp.utils.typing import Device import numpy import torch import wandb", "device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset, device=device) return probe, accuracy", "(sentence_length,) containing integral POS tags. \"\"\" representations = self.representations[index] annotations", "sum( self.representations.dataset.length(index) for index in range(len(self.representations))) def count_unique_features(self) -> int:", "accuracy > best_accuracy: best_model = model best_axis = axis best_accuracy", "accuracy after training. probe_t (Type[Probe], optional): Probe type to train.", "un-indexed dependency label is encountered. \"\"\" labels = {rel for", "= set(range(projection.project.in_features)) ablated: Set[int] = set() accuracies = [] while", "(Sequence[ptb.PTBSample]): The PTB annotations from which to pull dependency labels.", "None, 'no label count, is dataset for different task?' log.info('dependency", "to an index. The kwargs are forwarded to indexer when", "an axis. device (Optional[Device], optional): Torch device on which to", "def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all possible (word,", "possible (word, word) pairs to labels. Args: sample (ptb.Sample): The", "relationships, including the null one.\"\"\" return len(self.dist) + 1 class", "= reps dim, not projecting') projection = None elif share_projection:", "kwargs = kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs) def __getitem__(self,", "components of pairwise probes with the same projection. E.g. if", "The kwargs are forwarded to indexer when it is instantiated.", "of samples. \"\"\" if dist is None: counts: Dict[str, int]", "{len(annotations)} annotations') self.representations = representations self.annotations = annotations kwargs =", "the training dataset. Defaults to 25. patience (int, optional): Allow", "rel in sample.relations} self.indexer = {unk: 0} for label in", "probe, used for early stopping. test_dataset (TaskDataset): Test data for", "Args: sample (ptb.Sample): The sample to label. Returns: torch.Tensor: For", "this, since it's shared with POS. def axis_alignment( probe: Probe,", "indexer when it is instantiated. Args: representations (representations.RepresentationsLayerDataset): Word representations", "probe.project assert projection is not None, 'no projection?' axes =", "-> int: \"\"\"Return the number of unique labels for this", "pull dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer", "projection = probe.project assert projection is not None, 'no projection?'", "annotations unless the samples keyword is set in kwargs. Raises:", "that axis is zeroed. \"\"\" log = logging.getLogger(__name__) projection =", "counts: Dict[str, int] = collections.defaultdict(lambda: 0) for sample in samples:", "valid probe types for this task. Probe = Union[probes.Linear, probes.MLP]", "label prediction task.\"\"\" import collections import copy import logging from", "Dict, Iterator, Optional, Sequence, Set, Tuple, Type, Union) from ldp", "accuracy = learning.test(best_model, test_dataset, device=device) log.info('ablating axis %d, test accuracy", "when sampling tags per word type. By default, is computed", "(Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution to use when sampling tags", "each relation label to an arbitrary (integer) label. We only", "copy import logging from typing import (Any, Dict, Iterator, Optional,", "POS tags) samples.\"\"\" for index in range(len(self)): yield self[index] def", "index in range(len(self)): yield self[index] def __len__(self) -> int: \"\"\"Return", "wandb. Returns: Sequence[Tuple[int, float]]: The ablated axes paired with optimal", "head in enumerate(heads): if head == -1: head = dep", "model = copy.deepcopy(best_model).eval() assert model.project is not None, 'no projection?'", "this for pairs of words which have a head-dependent relationship", "probe (Probe): The probe to evaluate. dev_dataset (datasets.TaskDataset): Data used", "to 25. patience (int, optional): Allow dev loss to not", "and second is shape (sentence_length,) containing integral POS tags. \"\"\"", "representations as reps from ldp.utils.typing import Device import numpy import", "not None, 'no projection?' model.project.project.weight.data[:, sorted(ablated | {axis})] = 0", "unk def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all possible", "len(heads), dtype=torch.long) for dep, head in enumerate(heads): if head ==", "to indexer when it is instantiated. Args: representations (representations.RepresentationsLayerDataset): Word", "dependency label prediction task.\"\"\" import collections import copy import logging", "for index in range(len(self.representations))) def count_unique_features(self) -> int: \"\"\"Return number", "return len(self.indexer) # Define the valid probe types for this", "yield self[index] def __len__(self) -> int: \"\"\"Return the number of", "samples from which to determine possible relations. unk (str): Label", "forwarded to indexer when it is instantiated. Args: representations (representations.RepresentationsLayerDataset):", "() def count_samples(self) -> int: \"\"\"Return the number of words", "datasets.TaskDataset, device: Optional[Device] = None, also_log_to_wandb: bool = False) ->", "for early stopping. test_dataset (TaskDataset): Test data for probe, used", "integral POS tags. \"\"\" representations = self.representations[index] annotations = self.annotations[index]", "to CPU. also_log_to_wandb (bool, optional): If set, log results to", "counts[relation] += 1 dist = numpy.array([float(count) for count in counts.values()])", "elif share_projection: projection = projections.Projection(ndims, project_to) else: projection = projections.Projection(2", "(batches) in the dataset.\"\"\" return len(self.annotations) @property def sample_representations_shape(self) ->", "for distinct projections P, Q. Defaults to NOT shared. epochs", "Tuple, Type, Union) from ldp import datasets, learning from ldp.models", "annotations from which to pull dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]):", "= probes.Linear, project_to: Optional[int] = None, share_projection: bool = False,", "for relation in sample.relations: counts[relation] += 1 dist = numpy.array([float(count)", "relation label to an integer. Args: samples (Sequence[ptb.Sample]): The samples", "it is instantiated. Args: representations (representations.RepresentationsLayerDataset): Word representations corresponding to", "optional): Probe type to train. Defaults to probes.Linear. project_to (Optional[int],", "words to their syntactic relationship, if any.\"\"\" def __init__(self, samples:", "probes.Linear, project_to: Optional[int] = None, share_projection: bool = False, epochs:", "learning.test(best_model, test_dataset, device=device) log.info('ablating axis %d, test accuracy %f', best_axis,", "from ldp import datasets, learning from ldp.models import probes, projections", "ControlDLPIndexer]] = DLPIndexer, **kwargs: Any, ): \"\"\"Initialize dataset by mapping", "to wandb. By default, wandb is not used. Returns: Tuple[Probe,", "on which to train probe. Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path],", "bilinear of the form xAy, we will always compute (Px)A(Py)", "pairs and len(pairs) == len(representations), 'missing edges?' # Stack everything", "tags) samples.\"\"\" for index in range(len(self)): yield self[index] def __len__(self)", "which to train probe. Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path], optional):", "log = logging.getLogger(__name__) projection = probe.project assert projection is not", "label return labels def __len__(self) -> int: \"\"\"Return the number", "Since POS tags are integral scalars, there is no such", "Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral POS tags) for index'th sentence.", "of pairwise probes with the same projection. E.g. if the", "reserved for \"no relationship\" tag. rel = numpy.random.choice(len(dist), p=dist) +", "probe on dependency label prediction. Args: train_dataset (TaskDataset): Training data", "ldp import datasets, learning from ldp.models import probes, projections from", "set(range(len(representations))) pairs = [(i, j) for i in indexes for", "is zeroed. \"\"\" log = logging.getLogger(__name__) projection = probe.project assert", "Returns: Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe", "POS tags) for index'th sentence. Args: index (int): Index of", "indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs: Any, ): \"\"\"Initialize dataset", "= accuracy accuracy = learning.test(best_model, test_dataset, device=device) log.info('ablating axis %d,", "w. \"\"\" heads, relations = sample.heads, sample.relations labels = torch.empty(len(heads),", "indexer to use for mapping PTB dependency label annotations to", "number of unique POS seen in data.\"\"\" return len(self.indexer) #", "integral POS tags) for index'th sentence. Args: index (int): Index", "cut. test_dataset (datasets.TaskDataset): Data used to determine the effect of", "CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If set, log training data to", "'dev accuracy': best_accuracy, 'test accuracy': accuracy, }) axes.remove(best_axis) ablated.add(best_axis) accuracies.append((best_axis,", "# Define the valid probe types for this task. Probe", "import collections import copy import logging from typing import (Any,", "project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy", "label annotations to integer tensors. Instantiated with given annotations unless", "def sample_features_shape(self) -> Sequence[int]: \"\"\"Return the shape of each individual", "log training data to wandb. By default, wandb is not", "have dimension %d', ndims) ntags = train_dataset.count_unique_features() assert ntags is", "None elif share_projection: projection = projections.Projection(ndims, project_to) else: projection =", "Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence representations, sentence POS tags) samples.\"\"\"", "heads = sample.heads labels = torch.zeros(len(heads), len(heads), dtype=torch.long) for dep,", "The samples from which to determine possible relations. unk (str):", "if head == -1: head = dep words = (sample.sentence[dep],", "not None, 'no projection?' axes = set(range(projection.project.in_features)) ablated: Set[int] =", "possible word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution to", "relations. unk (str): Label to use when un-indexed dependency label", "{len(representations)} representations ' f'but {len(annotations)} annotations') self.representations = representations self.annotations", "(2, self.representations.dataset.dimension) @property def sample_features_shape(self) -> Sequence[int]: \"\"\"Return the shape", "the samples keyword is set in kwargs. Raises: ValueError: If", "model.project is not None, 'no projection?' model.project.project.weight.data[:, sorted(ablated | {axis})]", "is no such shape! \"\"\" return () def count_samples(self) ->", "= 0 accuracy = learning.test(model, dev_dataset, device=device) if accuracy >", "Training data for probe. dev_dataset (TaskDataset): Validation data for probe,", "from ldp.parse import representations as reps from ldp.utils.typing import Device", "-> Sequence[Tuple[int, float]]: \"\"\"Measure whether the given probe is axis", "(sentence[dep], sentence[head]) if words not in self.rels: # Add one", "-> Sequence[int]: \"\"\"Return the dimensionality of the representation pairs.\"\"\" return", "project_to) probe = probe_t(2 * (project_to or ndims), ntags, project=projection)", "dimensionality. Defaults to no projection. share_projection (bool): If set, project", "samples. \"\"\" if dist is None: counts: Dict[str, int] =", "accuracy = learning.test(probe, test_dataset, device=device) return probe, accuracy # TODO(evandez):", "probes, projections from ldp.parse import ptb from ldp.parse import representations", "PTB annotations from which to pull dependency labels. indexer (Union[DLPIndexer,", "test_dataset: datasets.TaskDataset, device: Optional[Device] = None, also_log_to_wandb: bool = False)", "dataset by mapping each dependency label to an index. The", "torch.stack([rels[i, j] for i, j in pairs]) return bigrams, labels", "projection. share_projection (bool): If set, project the left and right", "'no projection?' axes = set(range(projection.project.in_features)) ablated: Set[int] = set() accuracies", "= unk def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all", "and right components of pairwise probes with the same projection.", "patience (int, optional): Allow dev loss to not improve for", "is reserved for \"no relationship\" tag. rel = numpy.random.choice(len(dist), p=dist)", "def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK): \"\"\"Map each", "**kwargs: Any, ): \"\"\"Initialize dataset by mapping each dependency label", "__iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence representations, sentence POS", "return len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs of words to arbitrary", "-1. for axis in axes: model = copy.deepcopy(best_model).eval() assert model.project", "tags. \"\"\" representations = self.representations[index] annotations = self.annotations[index] assert len(representations)", "is shape (sentence_length,) containing integral POS tags. \"\"\" representations =", "from which to pull dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type", "one.\"\"\" return len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word", "(TaskDataset): Training data for probe. dev_dataset (TaskDataset): Validation data for", "rel) in enumerate(zip(heads, relations)): if head == -1: labels[word, word]", "w, if any. Defaults to the \"unk\" label, even if", "rel = numpy.random.choice(len(dist), p=dist) + 1 self.rels[words] = rel def", "The samples from which to pull possible word pairs. dist", "ControlDLPIndexer]): Type of the indexer to use for mapping PTB", "probe. dev_dataset (TaskDataset): Validation data for probe, used for early", "Raises: ValueError: If number of representations/annotations do not match. \"\"\"", "shape! \"\"\" return () def count_samples(self) -> int: \"\"\"Return the", "projection = None elif share_projection: projection = projections.Projection(ndims, project_to) else:", "representations to this dimensionality. Defaults to no projection. share_projection (bool):", "xAy, we will always compute (Px)A(Py) as opposed to (Px)A(Qy)", "class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation pair, dependency label) pairs.\"\"\"", "for i in indexes for j in indexes if rels[i,", "project the left and right components of pairwise probes with", "second is shape (sentence_length,) containing integral POS tags. \"\"\" representations", "of sentences (batches) in the dataset.\"\"\" return len(self.annotations) @property def", "dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset,", "the \"unk\" label, even if there is no relationship between", "in enumerate(heads): if head == -1: head = dep words", "Add one so that 0 is reserved for \"no relationship\"", "device = device or 'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations have", "len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head, rel) in enumerate(zip(heads, relations)):", "well commonize this, since it's shared with POS. def axis_alignment(", "self.indexer[label] = len(self.indexer) self.unk = unk def __call__(self, sample: ptb.Sample)", "the effect of cutting an axis. device (Optional[Device], optional): Torch", "label in sorted(labels): self.indexer[label] = len(self.indexer) self.unk = unk def", "Sequence, Set, Tuple, Type, Union) from ldp import datasets, learning", "label. We only do this for pairs of words which", "-> int: \"\"\"Return the number of words in the dataset.\"\"\"", "from ldp.models import probes, projections from ldp.parse import ptb from", "enumerate(heads): if head == -1: head = dep words =", "(int): Index of the sentence in the dataset. Returns: Tuple[torch.Tensor,", "with optimal probe accuracy after that axis is zeroed. \"\"\"", "def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe] =", "1e-3, device: Optional[Device] = None, also_log_to_wandb: bool = False) ->", "to NOT shared. epochs (int, optional): Maximum passes through the", "not improve for this many epochs, then stop training. Defaults", "None or ndims == project_to: logging.info('projection dim = reps dim,", "(TaskDataset): Test data for probe, used to compute final accuracy", "best_model, best_axis, best_accuracy = probe, -1, -1. for axis in", "len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation pair,", "Optional[Device] = None, also_log_to_wandb: bool = False) -> Tuple[Probe, float]:", "= annotations kwargs = kwargs.copy() kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs)", "for probe, used for early stopping. test_dataset (TaskDataset): Test data", "4, lr: float = 1e-3, device: Optional[Device] = None, also_log_to_wandb:", "__getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral POS", "in axes: model = copy.deepcopy(best_model).eval() assert model.project is not None,", "given probe is axis aligned. Args: probe (Probe): The probe", "task.\"\"\" return len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs of words to", "Type of the indexer to use for mapping PTB dependency", "to compute final accuracy after training. probe_t (Type[Probe], optional): Probe", "no relationship between v and w. \"\"\" heads, relations =", "is bilinear of the form xAy, we will always compute", "which to pull possible word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional):", "best_axis, best_accuracy = probe, -1, -1. for axis in axes:", "axis is zeroed. \"\"\" log = logging.getLogger(__name__) projection = probe.project", "used to determine which axes to cut. test_dataset (datasets.TaskDataset): Data", "to evaluate. dev_dataset (datasets.TaskDataset): Data used to determine which axes", "int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral POS tags) for", "Defaults to 1e-3. device (Optional[Device], optional): Torch device on which", "Optional[Device] = None, also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]:", "Args: samples (Sequence[ptb.Sample]): The samples from which to determine possible", "for different task?' log.info('dependency labeling task has %d tags', ntags)", "2 * project_to) probe = probe_t(2 * (project_to or ndims),", "class ControlDLPIndexer: \"\"\"Map pairs of words to arbitrary syntactic relationships.\"\"\"", "-> int: \"\"\"Return number of unique POS seen in data.\"\"\"", "By default, is computed from the list of samples. \"\"\"", "-> int: \"\"\"Return the number of relationships, including the null", "same projection. E.g. if the probe is bilinear of the", "then stop training. Defaults to 4. lr (float, optional): Learning", "probe, -1, -1. for axis in axes: model = copy.deepcopy(best_model).eval()", "containing integral POS tags. \"\"\" representations = self.representations[index] annotations =", "for index in range(len(self)): yield self[index] def __len__(self) -> int:", "in data.\"\"\" return len(self.indexer) # Define the valid probe types", "edge. indexes = set(range(len(representations))) pairs = [(i, j) for i", "__init__( self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] =", "Instantiated with given annotations unless the samples keyword is set", "the words to be paired and labeled. annotations (Sequence[ptb.PTBSample]): The", "device on which to train probe. Defaults to CPU. also_log_to_wandb", "= torch.stack([rels[i, j] for i, j in pairs]) return bigrams,", "arbitrary (integer) label. We only do this for pairs of", "this task.\"\"\" return len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs of words", "/= numpy.sum(dist) assert dist is not None, 'uninitialized distribution?' self.dist", "to pull possible word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A", "\"\"\"Return number of unique POS seen in data.\"\"\" return len(self.indexer)", "return (2, self.representations.dataset.dimension) @property def sample_features_shape(self) -> Sequence[int]: \"\"\"Return the", "samples: for relation in sample.relations: counts[relation] += 1 dist =", "device=device) log.info('ablating axis %d, test accuracy %f', best_axis, accuracy) if", "used to compute final accuracy after training. probe_t (Type[Probe], optional):", "Sequence[int]: \"\"\"Return the dimensionality of the representation pairs.\"\"\" return (2,", "25, patience: int = 4, lr: float = 1e-3, device:", "ValueError: If number of representations/annotations do not match. \"\"\" if", "torch.zeros(len(heads), len(heads), dtype=torch.long) for dep, head in enumerate(heads): if head", "from typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple,", "samples keyword is set in kwargs. Raises: ValueError: If number", "Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map each relation label to an", "def count_samples(self) -> int: \"\"\"Return the number of words in", "in self.rels: # Add one so that 0 is reserved", "be paired and labeled. annotations (Sequence[ptb.PTBSample]): The PTB annotations from", "Allow dev loss to not improve for this many epochs,", "return len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate over (word representation", "= axis best_accuracy = accuracy accuracy = learning.test(best_model, test_dataset, device=device)", "optional): If set, log results to wandb. Returns: Sequence[Tuple[int, float]]:", "dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution to use when sampling", "Tuple[Probe, float]: \"\"\"Train a probe on dependency label prediction. Args:", "dependency label is encountered. \"\"\" labels = {rel for sample", "to determine which axes to cut. test_dataset (datasets.TaskDataset): Data used", "# Stack everything before returning it. bigrams = torch.stack([ torch.stack((representations[i],", "If set, log training data to wandb. By default, wandb", "representations = self.representations[index] annotations = self.annotations[index] assert len(representations) == len(", "* (project_to or ndims), ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience),", "ablated: Set[int] = set() accuracies = [] while axes: best_model,", "ValueError(f'got {len(representations)} representations ' f'but {len(annotations)} annotations') self.representations = representations", "is computed from the list of samples. \"\"\" if dist", "accuracy # TODO(evandez): May as well commonize this, since it's", "25. patience (int, optional): Allow dev loss to not improve", "0) for sample in samples: for relation in sample.relations: counts[relation]", "probe is bilinear of the form xAy, we will always", "= DLPIndexer, **kwargs: Any, ): \"\"\"Initialize dataset by mapping each", "syntactic relationship, if any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], unk: str", "default, is computed from the list of samples. \"\"\" if", "int: \"\"\"Return the number of unique labels for this task.\"\"\"", "of each individual POS tag. Since POS tags are integral", "index'th sentence. Args: index (int): Index of the sentence in", "labels = torch.stack([rels[i, j] for i, j in pairs]) return", "indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer to use for", "of representations/annotations do not match. \"\"\" if len(representations) != len(annotations):", "' f'but {len(annotations)} annotations') self.representations = representations self.annotations = annotations", "also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset, device=device) return probe, accuracy #", "* project_to) probe = probe_t(2 * (project_to or ndims), ntags,", "if project_to is None or ndims == project_to: logging.info('projection dim", "not match. \"\"\" if len(representations) != len(annotations): raise ValueError(f'got {len(representations)}", "optimizer. Defaults to 1e-3. device (Optional[Device], optional): Torch device on", "= [(i, j) for i in indexes for j in", "the representation pairs.\"\"\" return (2, self.representations.dataset.dimension) @property def sample_features_shape(self) ->", "= Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset,", "= None): \"\"\"Map each relation label to an arbitrary (integer)", "label to an integer. Args: samples (Sequence[ptb.Sample]): The samples from", "to no projection. share_projection (bool): If set, project the left", "loss to not improve for this many epochs, then stop", "returning it. bigrams = torch.stack([ torch.stack((representations[i], representations[j])) for i, j", "self.dist = dist self.rels: Dict[Tuple[str, str], int] = {} for", "'missing edges?' # Stack everything before returning it. bigrams =", "logging.info('projection dim = reps dim, not projecting') projection = None", "(ptb.Sample): The sample to label. Returns: torch.Tensor: For length W", "wandb UNK = 'unk' class DLPIndexer: \"\"\"Map pairs of words", "= {unk: 0} for label in sorted(labels): self.indexer[label] = len(self.indexer)", "optional): Allow dev loss to not improve for this many", "shape (W, W) matrix where element (v, w) is the", "learning.test(probe, test_dataset, device=device) return probe, accuracy # TODO(evandez): May as", "in kwargs. Raises: ValueError: If number of representations/annotations do not", "prediction. Args: train_dataset (TaskDataset): Training data for probe. dev_dataset (TaskDataset):", "ndims = train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d', ndims) ntags =", "words = (sentence[dep], sentence[head]) if words not in self.rels: #", "for the dependency label prediction task.\"\"\" import collections import copy", "sample to label. Returns: torch.Tensor: For length W sentence, returns", "samples (Sequence[ptb.Sample]): The samples from which to determine possible relations.", "self.rels.get(words, 0) return labels def __len__(self) -> int: \"\"\"Return the", "= model best_axis = axis best_accuracy = accuracy accuracy =", "(Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer to use for mapping", "used for early stopping. test_dataset (TaskDataset): Test data for probe,", "dist /= numpy.sum(dist) assert dist is not None, 'uninitialized distribution?'", "projection = projections.Projection(2 * ndims, 2 * project_to) probe =", "on which to train probe. Defaults to CPU. also_log_to_wandb (bool,", "\"\"\"Return the dimensionality of the representation pairs.\"\"\" return (2, self.representations.dataset.dimension)", "axes: best_model, best_axis, best_accuracy = probe, -1, -1. for axis", "(bool): If set, project the left and right components of", "samples: sentence = sample.sentence heads = sample.heads for dep, head", "axis. device (Optional[Device], optional): Torch device on which to train", "\"\"\" return () def count_samples(self) -> int: \"\"\"Return the number", "where element (v, w) is the index of the label", "\"no relationship\" tag. rel = numpy.random.choice(len(dist), p=dist) + 1 self.rels[words]", "Iterator, Optional, Sequence, Set, Tuple, Type, Union) from ldp import", "relation in sample.relations: counts[relation] += 1 dist = numpy.array([float(count) for", "head == -1: head = dep words = (sample.sentence[dep], sample.sentence[head])", "device: Optional[Device] = None, also_log_to_wandb: bool = False) -> Tuple[Probe,", "self.rels[words] = rel def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map", "\"\"\" representations = self.representations[index] annotations = self.annotations[index] assert len(representations) ==", "f'but {len(annotations)} annotations') self.representations = representations self.annotations = annotations kwargs", "number of relationships, including the null one.\"\"\" return len(self.dist) +", "= indexer(**kwargs) def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return", "tensor is shape (sentence_length, representation_dimension) containing word representations, and second", "edges?' # Stack everything before returning it. bigrams = torch.stack([", "best_accuracy: best_model = model best_axis = axis best_accuracy = accuracy", "== len(representations), 'missing edges?' # Stack everything before returning it.", "device: Optional[Device] = None, also_log_to_wandb: bool = False) -> Sequence[Tuple[int,", "dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer to", "determine which axes to cut. test_dataset (datasets.TaskDataset): Data used to", "self.representations[index] annotations = self.annotations[index] assert len(representations) == len( annotations.sentence), 'diff", "\"\"\" heads, relations = sample.heads, sample.relations labels = torch.empty(len(heads), len(heads),", "{rel for sample in samples for rel in sample.relations} self.indexer", "the number of relationships, including the null one.\"\"\" return len(self.dist)", "(Px)A(Py) as opposed to (Px)A(Qy) for distinct projections P, Q.", "representation_dimension) containing word representations, and second is shape (sentence_length,) containing", "words which have a head-dependent relationship in the original dataset.", "from which to pull possible word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]],", "also_log_to_wandb (bool, optional): If set, log results to wandb. Returns:", "optional): A distribution to use when sampling tags per word", "j in indexes if rels[i, j]] assert pairs and len(pairs)", "(sample.sentence[dep], sample.sentence[head]) labels[dep, head] = self.rels.get(words, 0) return labels def", "sample.sentence heads = sample.heads for dep, head in enumerate(heads): if", "to not improve for this many epochs, then stop training.", "* ndims, 2 * project_to) probe = probe_t(2 * (project_to", "tag. rel = numpy.random.choice(len(dist), p=dist) + 1 self.rels[words] = rel", "shape (sentence_length,) containing integral POS tags. \"\"\" representations = self.representations[index]", "(TaskDataset): Validation data for probe, used for early stopping. test_dataset", "heads = sample.heads for dep, head in enumerate(heads): if head", "\"\"\"Yield all (sentence representations, sentence POS tags) samples.\"\"\" for index", "sentence = sample.sentence heads = sample.heads for dep, head in", "= copy.deepcopy(best_model).eval() assert model.project is not None, 'no projection?' model.project.project.weight.data[:,", "= device or 'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations have dimension", "\"\"\" labels = {rel for sample in samples for rel", "the dataset.\"\"\" return len(self.annotations) @property def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return", "labels def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence representations,", "no relationship between v and w. \"\"\" heads = sample.heads", "an index. The kwargs are forwarded to indexer when it", "task has %d tags', ntags) if project_to is None or", "= dep words = (sentence[dep], sentence[head]) if words not in", "projections P, Q. Defaults to NOT shared. epochs (int, optional):", "count_unique_features(self) -> int: \"\"\"Return number of unique POS seen in", "self.indexer[self.unk]) labels[word, head] = label return labels def __len__(self) ->", "the original dataset. Args: samples (Sequence[ptb.Samples]): The samples from which", "lr: float = 1e-3, device: Optional[Device] = None, also_log_to_wandb: bool", "epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset, device=device) return", "log = logging.getLogger(__name__) device = device or 'cpu' ndims =", "{unk: 0} for label in sorted(labels): self.indexer[label] = len(self.indexer) self.unk", "the label describing the relationship between word v and w,", "assert len(representations) == len( annotations.sentence), 'diff sentence lengths?' rels =", "__init__(self, samples: Sequence[ptb.Sample], unk: str = UNK): \"\"\"Map each relation", "Data used to determine which axes to cut. test_dataset (datasets.TaskDataset):", "'no projection?' model.project.project.weight.data[:, sorted(ablated | {axis})] = 0 accuracy =", "are integral scalars, there is no such shape! \"\"\" return", "mapping PTB dependency label annotations to integer tensors. Instantiated with", "Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor is shape (sentence_length, representation_dimension) containing", "index. The kwargs are forwarded to indexer when it is", "is None: counts: Dict[str, int] = collections.defaultdict(lambda: 0) for sample", "Set, Tuple, Type, Union) from ldp import datasets, learning from", "the same projection. E.g. if the probe is bilinear of", "log.info('representations have dimension %d', ndims) ntags = train_dataset.count_unique_features() assert ntags", "any. Defaults to the \"unk\" label, even if there is", "possible relations. unk (str): Label to use when un-indexed dependency", "torch.Tensor]: \"\"\"Return (representations, integral POS tags) for index'th sentence. Args:", "dim, not projecting') projection = None elif share_projection: projection =", "the number of sentences (batches) in the dataset.\"\"\" return len(self.annotations)", "assert dist is not None, 'uninitialized distribution?' self.dist = dist", "dist is None: counts: Dict[str, int] = collections.defaultdict(lambda: 0) for", "are forwarded to indexer when it is instantiated. Args: representations", "to train probe. Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If", "def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral", "relations)): if head == -1: labels[word, word] = self.indexer[rel] else:", "the dataset.\"\"\" return sum( self.representations.dataset.length(index) for index in range(len(self.representations))) def", "self.unk = unk def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map", "only do this for pairs of words which have a", "labels[dep, head] = self.rels.get(words, 0) return labels def __len__(self) ->", "accuracy) if also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev accuracy': best_accuracy, 'test", "if the probe is bilinear of the form xAy, we", "dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device] = None, also_log_to_wandb: bool", "the relationship between word v and w, if any. Defaults", "mapping each dependency label to an index. The kwargs are", "words = (sample.sentence[dep], sample.sentence[head]) labels[dep, head] = self.rels.get(words, 0) return", "if words not in self.rels: # Add one so that", "as reps from ldp.utils.typing import Device import numpy import torch", "POS tags are integral scalars, there is no such shape!", "-> Tuple[Probe, float]: \"\"\"Train a probe on dependency label prediction.", "= torch.zeros(len(heads), len(heads), dtype=torch.long) for dep, head in enumerate(heads): if", "best_axis = axis best_accuracy = accuracy accuracy = learning.test(best_model, test_dataset,", "int: \"\"\"Return the number of relationships, including the null one.\"\"\"", "\"\"\"Return the number of unique labels for this task.\"\"\" return", "v and w, if any. Defaults to the \"unk\" label,", "(word representation pair, dependency label) pairs.\"\"\" def __init__( self, representations:", "dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head, rel) in enumerate(zip(heads, relations)): if", "= learning.test(best_model, test_dataset, device=device) log.info('ablating axis %d, test accuracy %f',", "for optimizer. Defaults to 1e-3. device (Optional[Device], optional): Torch device", "\"\"\"Train a probe on dependency label prediction. Args: train_dataset (TaskDataset):", "so that 0 is reserved for \"no relationship\" tag. rel", "tensors. Instantiated with given annotations unless the samples keyword is", "dtype=torch.long) for dep, head in enumerate(heads): if head == -1:", "default, wandb is not used. Returns: Tuple[Probe, float]: The trained", "labels = torch.zeros(len(heads), len(heads), dtype=torch.long) for dep, head in enumerate(heads):", "and len(pairs) == len(representations), 'missing edges?' # Stack everything before", "(Optional[pathlib.Path], optional): If set, log training data to wandb. By", "pairs of words to arbitrary syntactic relationships.\"\"\" def __init__(self, samples:", "= None, also_log_to_wandb: bool = False) -> Tuple[Probe, float]: \"\"\"Train", "label, even if there is no relationship between v and", "count in counts.values()]) dist /= numpy.sum(dist) assert dist is not", "Defaults to probes.Linear. project_to (Optional[int], optional): Project representations to this", "sample in samples: sentence = sample.sentence heads = sample.heads for", "0} for label in sorted(labels): self.indexer[label] = len(self.indexer) self.unk =", "epochs: int = 25, patience: int = 4, lr: float", "in enumerate(zip(heads, relations)): if head == -1: labels[word, word] =", "after training. probe_t (Type[Probe], optional): Probe type to train. Defaults", "label is encountered. \"\"\" labels = {rel for sample in", "sample in samples for rel in sample.relations} self.indexer = {unk:", "Args: index (int): Index of the sentence in the dataset.", "label describing the relationship between word v and w, if", "for dep, head in enumerate(heads): if head == -1: head", "= label return labels def __len__(self) -> int: \"\"\"Return the", "probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device] = None,", "shared with POS. def axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset:", "(representations.RepresentationsLayerDataset): Word representations corresponding to the words to be paired", "the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor is shape (sentence_length,", "there is no relationship between v and w. \"\"\" heads", "probe to evaluate. dev_dataset (datasets.TaskDataset): Data used to determine which", "unless the samples keyword is set in kwargs. Raises: ValueError:", "kwargs.setdefault('samples', annotations) self.indexer = indexer(**kwargs) def __getitem__(self, index: int) ->", "labels.fill_(self.indexer[self.unk]) for word, (head, rel) in enumerate(zip(heads, relations)): if head", "torch.stack([ torch.stack((representations[i], representations[j])) for i, j in pairs ]) labels", "False) -> Tuple[Probe, float]: \"\"\"Train a probe on dependency label", "assert projection is not None, 'no projection?' axes = set(range(projection.project.in_features))", "sentence, returns shape (W, W) matrix where element (v, w)", "dataset.\"\"\" return len(self.annotations) @property def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the", "== project_to: logging.info('projection dim = reps dim, not projecting') projection", "index (int): Index of the sentence in the dataset. Returns:", "this dimensionality. Defaults to no projection. share_projection (bool): If set,", "test_dataset (TaskDataset): Test data for probe, used to compute final", "> best_accuracy: best_model = model best_axis = axis best_accuracy =", "unk (str): Label to use when un-indexed dependency label is", "sentence in the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First tensor is", "(Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type, Union) from", "word type. By default, is computed from the list of", "torch.stack((representations[i], representations[j])) for i, j in pairs ]) labels =", "logging.getLogger(__name__) projection = probe.project assert projection is not None, 'no", "for axis in axes: model = copy.deepcopy(best_model).eval() assert model.project is", "representations ' f'but {len(annotations)} annotations') self.representations = representations self.annotations =", "not projecting') projection = None elif share_projection: projection = projections.Projection(ndims,", "samples.\"\"\" for index in range(len(self)): yield self[index] def __len__(self) ->", "probe, accuracy # TODO(evandez): May as well commonize this, since", "between v and w. \"\"\" heads, relations = sample.heads, sample.relations", "dependency label prediction. Args: train_dataset (TaskDataset): Training data for probe.", "(int, optional): Allow dev loss to not improve for this", "None, 'uninitialized distribution?' self.dist = dist self.rels: Dict[Tuple[str, str], int]", "the shape of each individual POS tag. Since POS tags", "dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map each relation label to", "word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution to use", "projections.Projection(ndims, project_to) else: projection = projections.Projection(2 * ndims, 2 *", "= torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head, rel) in", "= len(self.indexer) self.unk = unk def __call__(self, sample: ptb.Sample) ->", "(int, optional): Maximum passes through the training dataset. Defaults to", "torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for word, (head, rel) in enumerate(zip(heads,", "words to arbitrary syntactic relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], dist:", "experiments for the dependency label prediction task.\"\"\" import collections import", "samples: Sequence[ptb.Sample], unk: str = UNK): \"\"\"Map each relation label", "data for probe. dev_dataset (TaskDataset): Validation data for probe, used", "Index of the sentence in the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]:", "is encountered. \"\"\" labels = {rel for sample in samples", "compute final accuracy after training. probe_t (Type[Probe], optional): Probe type", "== -1: head = dep words = (sentence[dep], sentence[head]) if", "rate for optimizer. Defaults to 1e-3. device (Optional[Device], optional): Torch", "commonize this, since it's shared with POS. def axis_alignment( probe:", "the form xAy, we will always compute (Px)A(Py) as opposed", "optional): Project representations to this dimensionality. Defaults to no projection.", "best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy, }) axes.remove(best_axis) ablated.add(best_axis)", "POS tag. Since POS tags are integral scalars, there is", "optimal probe accuracy after that axis is zeroed. \"\"\" log", "label = self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] = label return labels", "typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type,", "distinct projections P, Q. Defaults to NOT shared. epochs (int,", "-1, -1. for axis in axes: model = copy.deepcopy(best_model).eval() assert", "labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer to use", "of the indexer to use for mapping PTB dependency label", "train_dataset.count_unique_features() assert ntags is not None, 'no label count, is", "shape (sentence_length, representation_dimension) containing word representations, and second is shape", "= sample.sentence heads = sample.heads for dep, head in enumerate(heads):", "zeroed. \"\"\" log = logging.getLogger(__name__) projection = probe.project assert projection", "dev_dataset (TaskDataset): Validation data for probe, used for early stopping.", "copy.deepcopy(best_model).eval() assert model.project is not None, 'no projection?' model.project.project.weight.data[:, sorted(ablated", "all possible (word, word) pairs to labels. Args: sample (ptb.Sample):", "reps dim, not projecting') projection = None elif share_projection: projection", "even if there is no relationship between v and w.", "{axis})] = 0 accuracy = learning.test(model, dev_dataset, device=device) if accuracy", "after that axis is zeroed. \"\"\" log = logging.getLogger(__name__) projection", "Sequence[Tuple[int, float]]: \"\"\"Measure whether the given probe is axis aligned.", "A distribution to use when sampling tags per word type.", "%d tags', ntags) if project_to is None or ndims ==", "model.project.project.weight.data[:, sorted(ablated | {axis})] = 0 accuracy = learning.test(model, dev_dataset,", "results to wandb. Returns: Sequence[Tuple[int, float]]: The ablated axes paired", "types for this task. Probe = Union[probes.Linear, probes.MLP] def train(train_dataset:", "task.\"\"\" import collections import copy import logging from typing import", "int] = {} for sample in samples: sentence = sample.sentence", "(Type[Probe], optional): Probe type to train. Defaults to probes.Linear. project_to", "\"\"\" if len(representations) != len(annotations): raise ValueError(f'got {len(representations)} representations '", "representations/annotations do not match. \"\"\" if len(representations) != len(annotations): raise", "self.rels: # Add one so that 0 is reserved for", "model best_axis = axis best_accuracy = accuracy accuracy = learning.test(best_model,", "ldp.utils.typing import Device import numpy import torch import wandb UNK", "lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset, device=device) return probe,", "label. Returns: torch.Tensor: For length W sentence, returns shape (W,", "is the index of the label describing the relationship between", "(sentence representations, sentence POS tags) samples.\"\"\" for index in range(len(self)):", "ptb.Sample) -> torch.Tensor: \"\"\"Map all possible (word, word) pairs to", "DLPIndexer, **kwargs: Any, ): \"\"\"Initialize dataset by mapping each dependency", "\"unk\" label, even if there is no relationship between v", "of words in the dataset.\"\"\" return sum( self.representations.dataset.length(index) for index", "import ptb from ldp.parse import representations as reps from ldp.utils.typing", "import probes, projections from ldp.parse import ptb from ldp.parse import", "Find all pairs of words sharing an edge. indexes =", "= self.rels.get(words, 0) return labels def __len__(self) -> int: \"\"\"Return", "torch import wandb UNK = 'unk' class DLPIndexer: \"\"\"Map pairs", "log.info('dependency labeling task has %d tags', ntags) if project_to is", "datasets, learning from ldp.models import probes, projections from ldp.parse import", "return probe, accuracy # TODO(evandez): May as well commonize this,", "= {} for sample in samples: sentence = sample.sentence heads", "accuracy after that axis is zeroed. \"\"\" log = logging.getLogger(__name__)", "# TODO(evandez): May as well commonize this, since it's shared", "w) is the index of the label describing the relationship", "pairs]) return bigrams, labels def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield", "pairs to labels. Args: sample (ptb.Sample): The sample to label.", "(head, rel) in enumerate(zip(heads, relations)): if head == -1: labels[word,", "\"\"\"Return the number of relationships, including the null one.\"\"\" return", "None, 'no projection?' model.project.project.weight.data[:, sorted(ablated | {axis})] = 0 accuracy", "train(train_dataset: datasets.TaskDataset, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear,", "of the representation pairs.\"\"\" return (2, self.representations.dataset.dimension) @property def sample_features_shape(self)", "pairs.\"\"\" def __init__( self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer,", "through the training dataset. Defaults to 25. patience (int, optional):", "float]]: The ablated axes paired with optimal probe accuracy after", "and w. \"\"\" heads, relations = sample.heads, sample.relations labels =", "labeled. annotations (Sequence[ptb.PTBSample]): The PTB annotations from which to pull", "Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe accuracy", "ablated axes paired with optimal probe accuracy after that axis", "__init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map each", "dataset for different task?' log.info('dependency labeling task has %d tags',", "projection is not None, 'no projection?' axes = set(range(projection.project.in_features)) ablated:", "\"\"\"Map each relation label to an arbitrary (integer) label. We", "-> Sequence[int]: \"\"\"Return the shape of each individual POS tag.", "Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs: Any, ): \"\"\"Initialize", "set() accuracies = [] while axes: best_model, best_axis, best_accuracy =", "representation pairs.\"\"\" return (2, self.representations.dataset.dimension) @property def sample_features_shape(self) -> Sequence[int]:", "dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear, project_to: Optional[int]", "'no label count, is dataset for different task?' log.info('dependency labeling", "as well commonize this, since it's shared with POS. def", "The PTB annotations from which to pull dependency labels. indexer", "= collections.defaultdict(lambda: 0) for sample in samples: for relation in", "effect of cutting an axis. device (Optional[Device], optional): Torch device", "\"\"\"Initialize dataset by mapping each dependency label to an index.", "if rels[i, j]] assert pairs and len(pairs) == len(representations), 'missing", "individual POS tag. Since POS tags are integral scalars, there", "different task?' log.info('dependency labeling task has %d tags', ntags) if", "axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device] =", "int = 25, patience: int = 4, lr: float =", "or 'cpu' ndims = train_dataset.sample_representations_shape[-1] log.info('representations have dimension %d', ndims)", "in samples: for relation in sample.relations: counts[relation] += 1 dist", "data for probe, used for early stopping. test_dataset (TaskDataset): Test", "Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs: Any, ): \"\"\"Initialize dataset by", "syntactic relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] =", "j in pairs ]) labels = torch.stack([rels[i, j] for i,", "annotations to integer tensors. Instantiated with given annotations unless the", "number of words in the dataset.\"\"\" return sum( self.representations.dataset.length(index) for", "Returns: Tuple[Probe, float]: The trained probe and its test accuracy.", "\"\"\" heads = sample.heads labels = torch.zeros(len(heads), len(heads), dtype=torch.long) for", "Probe type to train. Defaults to probes.Linear. project_to (Optional[int], optional):", "return labels def __len__(self) -> int: \"\"\"Return the number of", "Tuple[torch.Tensor, torch.Tensor]: First tensor is shape (sentence_length, representation_dimension) containing word", "words in the dataset.\"\"\" return sum( self.representations.dataset.length(index) for index in", "reps from ldp.utils.typing import Device import numpy import torch import", "to determine the effect of cutting an axis. device (Optional[Device],", "import datasets, learning from ldp.models import probes, projections from ldp.parse", "len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs of words to arbitrary syntactic", "None, 'no projection?' axes = set(range(projection.project.in_features)) ablated: Set[int] = set()", "probe types for this task. Probe = Union[probes.Linear, probes.MLP] def", "unk: str = UNK): \"\"\"Map each relation label to an", "Optional[int] = None, share_projection: bool = False, epochs: int =", "sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all possible (word, word) pairs", "stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy = learning.test(probe, test_dataset, device=device)", "float = 1e-3, device: Optional[Device] = None, also_log_to_wandb: bool =", "tag. Since POS tags are integral scalars, there is no", "if any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK):", "(Sequence[ptb.Sample]): The samples from which to determine possible relations. unk", "== -1: head = dep words = (sample.sentence[dep], sample.sentence[head]) labels[dep,", "is instantiated. Args: representations (representations.RepresentationsLayerDataset): Word representations corresponding to the", "enumerate(zip(heads, relations)): if head == -1: labels[word, word] = self.indexer[rel]", "label count, is dataset for different task?' log.info('dependency labeling task", "training. Defaults to 4. lr (float, optional): Learning rate for", "accuracy. \"\"\" log = logging.getLogger(__name__) device = device or 'cpu'", "rel def __call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all possible", "always compute (Px)A(Py) as opposed to (Px)A(Qy) for distinct projections", "annotations.sentence), 'diff sentence lengths?' rels = self.indexer(annotations) # Find all", "-> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence representations, sentence POS tags)", "to probes.Linear. project_to (Optional[int], optional): Project representations to this dimensionality.", "dimension %d', ndims) ntags = train_dataset.count_unique_features() assert ntags is not", "Maximum passes through the training dataset. Defaults to 25. patience", "pairs ]) labels = torch.stack([rels[i, j] for i, j in", "Q. Defaults to NOT shared. epochs (int, optional): Maximum passes", "NOT shared. epochs (int, optional): Maximum passes through the training", "self.indexer = indexer(**kwargs) def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:", "indexes = set(range(len(representations))) pairs = [(i, j) for i in", "None, also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]: \"\"\"Measure whether", "test accuracy. \"\"\" log = logging.getLogger(__name__) device = device or", "prediction task.\"\"\" import collections import copy import logging from typing", "such shape! \"\"\" return () def count_samples(self) -> int: \"\"\"Return", "dist is not None, 'uninitialized distribution?' self.dist = dist self.rels:", "= None, also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]: \"\"\"Measure", "\"\"\" log = logging.getLogger(__name__) projection = probe.project assert projection is", "train_dataset (TaskDataset): Training data for probe. dev_dataset (TaskDataset): Validation data", "\"\"\"Map each relation label to an integer. Args: samples (Sequence[ptb.Sample]):", "with given annotations unless the samples keyword is set in", "use when sampling tags per word type. By default, is", "probes.Linear. project_to (Optional[int], optional): Project representations to this dimensionality. Defaults", "int: \"\"\"Return number of unique POS seen in data.\"\"\" return", "to integer tensors. Instantiated with given annotations unless the samples", "stop training. Defaults to 4. lr (float, optional): Learning rate", "is not None, 'no projection?' model.project.project.weight.data[:, sorted(ablated | {axis})] =", "import wandb UNK = 'unk' class DLPIndexer: \"\"\"Map pairs of", "labels def __len__(self) -> int: \"\"\"Return the number of relationships,", "len(self.annotations) @property def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the dimensionality of", "= sample.heads, sample.relations labels = torch.empty(len(heads), len(heads), dtype=torch.long) labels.fill_(self.indexer[self.unk]) for", "accuracy = learning.test(model, dev_dataset, device=device) if accuracy > best_accuracy: best_model", "with POS. def axis_alignment( probe: Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset,", "dataset.\"\"\" return sum( self.representations.dataset.length(index) for index in range(len(self.representations))) def count_unique_features(self)", "to (Px)A(Qy) for distinct projections P, Q. Defaults to NOT", "samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None): \"\"\"Map each relation", "for sample in samples: for relation in sample.relations: counts[relation] +=", "torch.Tensor]: First tensor is shape (sentence_length, representation_dimension) containing word representations,", "used to determine the effect of cutting an axis. device", "import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type, Union)", "for this task. Probe = Union[probes.Linear, probes.MLP] def train(train_dataset: datasets.TaskDataset,", "form xAy, we will always compute (Px)A(Py) as opposed to", "dep, head in enumerate(heads): if head == -1: head =", "False) -> Sequence[Tuple[int, float]]: \"\"\"Measure whether the given probe is", "in the original dataset. Args: samples (Sequence[ptb.Samples]): The samples from", "= set(range(len(representations))) pairs = [(i, j) for i in indexes", "= 25, patience: int = 4, lr: float = 1e-3,", "not None, 'uninitialized distribution?' self.dist = dist self.rels: Dict[Tuple[str, str],", "is not None, 'uninitialized distribution?' self.dist = dist self.rels: Dict[Tuple[str,", "-> int: \"\"\"Return the number of sentences (batches) in the", "= train_dataset.count_unique_features() assert ntags is not None, 'no label count,", "the indexer to use for mapping PTB dependency label annotations", "is no relationship between v and w. \"\"\" heads, relations", "+= 1 dist = numpy.array([float(count) for count in counts.values()]) dist", "self[index] def __len__(self) -> int: \"\"\"Return the number of sentences", "stopping. test_dataset (TaskDataset): Test data for probe, used to compute", "arbitrary syntactic relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]]", "device=device) return probe, accuracy # TODO(evandez): May as well commonize", "labels for this task.\"\"\" return len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs", "this many epochs, then stop training. Defaults to 4. lr", "data for probe, used to compute final accuracy after training.", "None: counts: Dict[str, int] = collections.defaultdict(lambda: 0) for sample in", "P, Q. Defaults to NOT shared. epochs (int, optional): Maximum", "dep words = (sentence[dep], sentence[head]) if words not in self.rels:", "pairs of words sharing an edge. indexes = set(range(len(representations))) pairs", "while axes: best_model, best_axis, best_accuracy = probe, -1, -1. for", "test_dataset, device=device) return probe, accuracy # TODO(evandez): May as well", "!= len(annotations): raise ValueError(f'got {len(representations)} representations ' f'but {len(annotations)} annotations')", "None, also_log_to_wandb: bool = False) -> Tuple[Probe, float]: \"\"\"Train a", "for pairs of words which have a head-dependent relationship in", "which to determine possible relations. unk (str): Label to use", "accuracy': best_accuracy, 'test accuracy': accuracy, }) axes.remove(best_axis) ablated.add(best_axis) accuracies.append((best_axis, accuracy))", "training dataset. Defaults to 25. patience (int, optional): Allow dev", "ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb)", "the list of samples. \"\"\" if dist is None: counts:", "if head == -1: labels[word, word] = self.indexer[rel] else: label", "Data used to determine the effect of cutting an axis.", "to label. Returns: torch.Tensor: For length W sentence, returns shape", "count, is dataset for different task?' log.info('dependency labeling task has", "POS seen in data.\"\"\" return len(self.indexer) # Define the valid", "ndims) ntags = train_dataset.count_unique_features() assert ntags is not None, 'no", "else: label = self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] = label return", "import numpy import torch import wandb UNK = 'unk' class", "= logging.getLogger(__name__) device = device or 'cpu' ndims = train_dataset.sample_representations_shape[-1]", "representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs:", "of unique POS seen in data.\"\"\" return len(self.indexer) # Define", "= torch.stack([ torch.stack((representations[i], representations[j])) for i, j in pairs ])", "dist self.rels: Dict[Tuple[str, str], int] = {} for sample in", "or ndims), ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr,", "probe_t (Type[Probe], optional): Probe type to train. Defaults to probes.Linear.", "assert model.project is not None, 'no projection?' model.project.project.weight.data[:, sorted(ablated |", "axis aligned. Args: probe (Probe): The probe to evaluate. dev_dataset", "'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy, }) axes.remove(best_axis)", "between word v and w, if any. Defaults to the", "bigrams = torch.stack([ torch.stack((representations[i], representations[j])) for i, j in pairs", "the null one.\"\"\" return len(self.dist) + 1 class DLPTaskDataset(datasets.TaskDataset): \"\"\"Iterate", "(project_to or ndims), ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs,", "words to be paired and labeled. annotations (Sequence[ptb.PTBSample]): The PTB", "train probe. Defaults to CPU. also_log_to_wandb (bool, optional): If set,", "\"\"\"Iterate over (word representation pair, dependency label) pairs.\"\"\" def __init__(", "sorted(labels): self.indexer[label] = len(self.indexer) self.unk = unk def __call__(self, sample:", "-> torch.Tensor: \"\"\"Map all possible (word, word) pairs to labels.", "= projections.Projection(ndims, project_to) else: projection = projections.Projection(2 * ndims, 2", "Stack everything before returning it. bigrams = torch.stack([ torch.stack((representations[i], representations[j]))", "= None elif share_projection: projection = projections.Projection(ndims, project_to) else: projection", "in sample.relations} self.indexer = {unk: 0} for label in sorted(labels):", "else: projection = projections.Projection(2 * ndims, 2 * project_to) probe", "if also_log_to_wandb: wandb.log({ 'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy':", "annotations') self.representations = representations self.annotations = annotations kwargs = kwargs.copy()", "label to an index. The kwargs are forwarded to indexer", "probe accuracy after that axis is zeroed. \"\"\" log =", "sample_features_shape(self) -> Sequence[int]: \"\"\"Return the shape of each individual POS", "class DLPIndexer: \"\"\"Map pairs of words to their syntactic relationship,", "'test accuracy': accuracy, }) axes.remove(best_axis) ablated.add(best_axis) accuracies.append((best_axis, accuracy)) return tuple(accuracies)", "one so that 0 is reserved for \"no relationship\" tag.", "matrix where element (v, w) is the index of the", "i, j in pairs]) return bigrams, labels def __iter__(self) ->", "project_to) else: projection = projections.Projection(2 * ndims, 2 * project_to)", "| {axis})] = 0 accuracy = learning.test(model, dev_dataset, device=device) if", "ldp.parse import ptb from ldp.parse import representations as reps from", "UNK = 'unk' class DLPIndexer: \"\"\"Map pairs of words to", "corresponding to the words to be paired and labeled. annotations", "self.representations.dataset.dimension) @property def sample_features_shape(self) -> Sequence[int]: \"\"\"Return the shape of", "to the words to be paired and labeled. annotations (Sequence[ptb.PTBSample]):", "Any, ): \"\"\"Initialize dataset by mapping each dependency label to", "optional): Learning rate for optimizer. Defaults to 1e-3. device (Optional[Device],", "p=dist) + 1 self.rels[words] = rel def __call__(self, sample: ptb.Sample)", "their syntactic relationship, if any.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], unk:", "Defaults to NOT shared. epochs (int, optional): Maximum passes through", "Returns: torch.Tensor: For length W sentence, returns shape (W, W)", "Type, Union) from ldp import datasets, learning from ldp.models import", "= False) -> Sequence[Tuple[int, float]]: \"\"\"Measure whether the given probe", "train probe. Defaults to CPU. also_log_to_wandb (Optional[pathlib.Path], optional): If set,", "set(range(projection.project.in_features)) ablated: Set[int] = set() accuracies = [] while axes:", "type. By default, is computed from the list of samples.", "which to pull dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of", "collections import copy import logging from typing import (Any, Dict,", "w. \"\"\" heads = sample.heads labels = torch.zeros(len(heads), len(heads), dtype=torch.long)", "label prediction. Args: train_dataset (TaskDataset): Training data for probe. dev_dataset", "for \"no relationship\" tag. rel = numpy.random.choice(len(dist), p=dist) + 1", "= self.indexer[rel] else: label = self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] =", "best_accuracy, 'test accuracy': accuracy, }) axes.remove(best_axis) ablated.add(best_axis) accuracies.append((best_axis, accuracy)) return", "str = UNK): \"\"\"Map each relation label to an integer.", "labels[word, word] = self.indexer[rel] else: label = self.indexer.get(rel, self.indexer[self.unk]) labels[word,", "self.representations = representations self.annotations = annotations kwargs = kwargs.copy() kwargs.setdefault('samples',", "pull possible word pairs. dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A distribution", "= self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] = label return labels def", "also_log_to_wandb (Optional[pathlib.Path], optional): If set, log training data to wandb.", "def __len__(self) -> int: \"\"\"Return the number of relationships, including", "\"\"\" log = logging.getLogger(__name__) device = device or 'cpu' ndims", "axis in axes: model = copy.deepcopy(best_model).eval() assert model.project is not", "indexer(**kwargs) def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations,", "number of unique labels for this task.\"\"\" return len(self.indexer) class", "len(self.indexer) # Define the valid probe types for this task.", "head-dependent relationship in the original dataset. Args: samples (Sequence[ptb.Samples]): The", "+ 1 self.rels[words] = rel def __call__(self, sample: ptb.Sample) ->", "label to an arbitrary (integer) label. We only do this", "def __len__(self) -> int: \"\"\"Return the number of unique labels", "For length W sentence, returns shape (W, W) matrix where", "relationship between v and w. \"\"\" heads = sample.heads labels", "relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample], dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None):", "cutting an axis. device (Optional[Device], optional): Torch device on which", "of words sharing an edge. indexes = set(range(len(representations))) pairs =", "Defaults to 25. patience (int, optional): Allow dev loss to", "We only do this for pairs of words which have", "lr (float, optional): Learning rate for optimizer. Defaults to 1e-3.", "if dist is None: counts: Dict[str, int] = collections.defaultdict(lambda: 0)", "to 4. lr (float, optional): Learning rate for optimizer. Defaults", "relationship between word v and w, if any. Defaults to", "@property def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the dimensionality of the", "dist = numpy.array([float(count) for count in counts.values()]) dist /= numpy.sum(dist)", "epochs (int, optional): Maximum passes through the training dataset. Defaults", "def sample_representations_shape(self) -> Sequence[int]: \"\"\"Return the dimensionality of the representation", "len(pairs) == len(representations), 'missing edges?' # Stack everything before returning", "(Probe): The probe to evaluate. dev_dataset (datasets.TaskDataset): Data used to", "in samples for rel in sample.relations} self.indexer = {unk: 0}", "by mapping each dependency label to an index. The kwargs", "probe. Defaults to CPU. also_log_to_wandb (bool, optional): If set, log", "= self.annotations[index] assert len(representations) == len( annotations.sentence), 'diff sentence lengths?'", "set in kwargs. Raises: ValueError: If number of representations/annotations do", "project_to is None or ndims == project_to: logging.info('projection dim =", "def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all (sentence representations, sentence", "(Optional[Device], optional): Torch device on which to train probe. Defaults", "do not match. \"\"\" if len(representations) != len(annotations): raise ValueError(f'got", "dev_dataset, device=device) if accuracy > best_accuracy: best_model = model best_axis", "or ndims == project_to: logging.info('projection dim = reps dim, not", "self.indexer(annotations) # Find all pairs of words sharing an edge.", "annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer, **kwargs: Any, ):", "of cutting an axis. device (Optional[Device], optional): Torch device on", "= sample.heads for dep, head in enumerate(heads): if head ==", "labels[word, head] = label return labels def __len__(self) -> int:", "which have a head-dependent relationship in the original dataset. Args:", "of words to arbitrary syntactic relationships.\"\"\" def __init__(self, samples: Sequence[ptb.Sample],", "element (v, w) is the index of the label describing", "bool = False) -> Sequence[Tuple[int, float]]: \"\"\"Measure whether the given", "sentence lengths?' rels = self.indexer(annotations) # Find all pairs of", "for probe. dev_dataset (TaskDataset): Validation data for probe, used for", "wandb.log({ 'axis': best_axis, 'dev accuracy': best_accuracy, 'test accuracy': accuracy, })", "float]]: \"\"\"Measure whether the given probe is axis aligned. Args:", "the number of unique labels for this task.\"\"\" return len(self.indexer)", "index: int) -> Tuple[torch.Tensor, torch.Tensor]: \"\"\"Return (representations, integral POS tags)", "int: \"\"\"Return the number of words in the dataset.\"\"\" return", "optional): If set, log training data to wandb. By default,", "Define the valid probe types for this task. Probe =", "float]: \"\"\"Train a probe on dependency label prediction. Args: train_dataset", "no projection. share_projection (bool): If set, project the left and", "device=device) if accuracy > best_accuracy: best_model = model best_axis =", "in pairs]) return bigrams, labels def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]:", "= sample.heads labels = torch.zeros(len(heads), len(heads), dtype=torch.long) for dep, head", "of the sentence in the dataset. Returns: Tuple[torch.Tensor, torch.Tensor]: First", "sharing an edge. indexes = set(range(len(representations))) pairs = [(i, j)", "device (Optional[Device], optional): Torch device on which to train probe.", "for j in indexes if rels[i, j]] assert pairs and", "for count in counts.values()]) dist /= numpy.sum(dist) assert dist is", "used. Returns: Tuple[Probe, float]: The trained probe and its test", "int: \"\"\"Return the number of sentences (batches) in the dataset.\"\"\"", "samples for rel in sample.relations} self.indexer = {unk: 0} for", "= set() accuracies = [] while axes: best_model, best_axis, best_accuracy", "representations, and second is shape (sentence_length,) containing integral POS tags.", "Defaults to the \"unk\" label, even if there is no", "epochs, then stop training. Defaults to 4. lr (float, optional):", "annotations = self.annotations[index] assert len(representations) == len( annotations.sentence), 'diff sentence", "pairs of words to their syntactic relationship, if any.\"\"\" def", "is None or ndims == project_to: logging.info('projection dim = reps", "0 accuracy = learning.test(model, dev_dataset, device=device) if accuracy > best_accuracy:", "which to train probe. Defaults to CPU. also_log_to_wandb (bool, optional):", "None): \"\"\"Map each relation label to an arbitrary (integer) label.", "import torch import wandb UNK = 'unk' class DLPIndexer: \"\"\"Map", "we will always compute (Px)A(Py) as opposed to (Px)A(Qy) for", "Probe, dev_dataset: datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device] = None, also_log_to_wandb:", "test_dataset (datasets.TaskDataset): Data used to determine the effect of cutting", "list of samples. \"\"\" if dist is None: counts: Dict[str,", "if there is no relationship between v and w. \"\"\"", "tags) for index'th sentence. Args: index (int): Index of the", "The trained probe and its test accuracy. \"\"\" log =", "for this task.\"\"\" return len(self.indexer) class ControlDLPIndexer: \"\"\"Map pairs of", "right components of pairwise probes with the same projection. E.g.", "Sequence[float]]], optional): A distribution to use when sampling tags per", "to use for mapping PTB dependency label annotations to integer", "probe_t(2 * (project_to or ndims), ntags, project=projection) learning.train(probe, train_dataset, dev_dataset=dev_dataset,", "project_to: logging.info('projection dim = reps dim, not projecting') projection =", "= projections.Projection(2 * ndims, 2 * project_to) probe = probe_t(2", "= 'unk' class DLPIndexer: \"\"\"Map pairs of words to their", "to pull dependency labels. indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the", "= False) -> Tuple[Probe, float]: \"\"\"Train a probe on dependency", "accuracy accuracy = learning.test(best_model, test_dataset, device=device) log.info('ablating axis %d, test", "if head == -1: head = dep words = (sentence[dep],", "self.rels: Dict[Tuple[str, str], int] = {} for sample in samples:", "test_dataset: datasets.TaskDataset, probe_t: Type[Probe] = probes.Linear, project_to: Optional[int] = None,", "__call__(self, sample: ptb.Sample) -> torch.Tensor: \"\"\"Map all possible (word, word)", "self, representations: reps.RepresentationLayerDataset, annotations: Sequence[ptb.Sample], indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer,", "Optional, Sequence, Set, Tuple, Type, Union) from ldp import datasets,", "in indexes if rels[i, j]] assert pairs and len(pairs) ==", "projections.Projection(2 * ndims, 2 * project_to) probe = probe_t(2 *", "Label to use when un-indexed dependency label is encountered. \"\"\"", "unique labels for this task.\"\"\" return len(self.indexer) class ControlDLPIndexer: \"\"\"Map", "each individual POS tag. Since POS tags are integral scalars,", "the dimensionality of the representation pairs.\"\"\" return (2, self.representations.dataset.dimension) @property", "return bigrams, labels def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]: \"\"\"Yield all", "None, share_projection: bool = False, epochs: int = 25, patience:", "share_projection (bool): If set, project the left and right components", "an arbitrary (integer) label. We only do this for pairs", "\"\"\"Map pairs of words to their syntactic relationship, if any.\"\"\"", "bool = False) -> Tuple[Probe, float]: \"\"\"Train a probe on", "shared. epochs (int, optional): Maximum passes through the training dataset.", "representations corresponding to the words to be paired and labeled.", "\"\"\"Return (representations, integral POS tags) for index'th sentence. Args: index", "left and right components of pairwise probes with the same", "datasets.TaskDataset, test_dataset: datasets.TaskDataset, device: Optional[Device] = None, also_log_to_wandb: bool =", "projection. E.g. if the probe is bilinear of the form", "numpy.sum(dist) assert dist is not None, 'uninitialized distribution?' self.dist =", "learning.train(probe, train_dataset, dev_dataset=dev_dataset, stopper=learning.EarlyStopping(patience=patience), epochs=epochs, lr=lr, device=device, also_log_to_wandb=also_log_to_wandb) accuracy =", "axis %d, test accuracy %f', best_axis, accuracy) if also_log_to_wandb: wandb.log({", "log.info('ablating axis %d, test accuracy %f', best_axis, accuracy) if also_log_to_wandb:", "self.indexer.get(rel, self.indexer[self.unk]) labels[word, head] = label return labels def __len__(self)", "wandb is not used. Returns: Tuple[Probe, float]: The trained probe", "If set, log results to wandb. Returns: Sequence[Tuple[int, float]]: The", "dev_dataset (datasets.TaskDataset): Data used to determine which axes to cut.", "= learning.test(probe, test_dataset, device=device) return probe, accuracy # TODO(evandez): May", "also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]: \"\"\"Measure whether the" ]
[ "byteorder='big', signed=False) t += self.renew_till.to_bytes(4, byteorder='big', signed=False) return t class", "generated. directory_path: str the directory to write the kirbi files", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) return t def to_bytes(self): t =", "CCACHEOctetString() o.length = len(data) if isinstance(data,str): o.data = data.encode() else:", "[] @staticmethod def from_asn1(principal, realm): p = CCACHEPrincipal() p.name_type =", "-1: tgt = [cred.to_tgt(), cred.time] tgts.append(tgt) return tgts def get_all_tgs(self):", "for ad in self.authdata: t += ad.to_bytes() t += self.ticket.to_bytes()", "True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time =", "asn1 encoded TGS_REP data when the user requests a tgs", "None self.starttime = None self.endtime = None self.renew_till = None", "else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat()", "self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big', signed=False) t += self.tktflags.to_bytes(4, byteorder='little',", "cred.server.to_string(separator = '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return tgss def get_hashes(self,", "in self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return", "all credential object in the CCACHE object to the kirbi", "@staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime = dt_to_kerbtime(start)", "enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_tgs(self): \"\"\"", "kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def to_kirbidir(self, directory_path): \"\"\"", "self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)]", "crash miserably :( if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() ==", "= data return o @staticmethod def parse(reader): o = CCACHEOctetString()", "0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in enc_as_rep_part and", "self.keylen.to_bytes(2, byteorder='big', signed=False) t += self.keyvalue return t class Times:", "c.time = Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not", "to \"\"\" kf_abs = os.path.abspath(directory_path) for cred in self.credentials: kirbi,", "tgt = [cred.to_tgt(), cred.time] tgts.append(tgt) return tgts def get_all_tgs(self): tgss", "tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm']", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm", "1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type']", "self.ticket = None self.second_ticket = None def to_hash(self): res =", "decryption of the encrypted part of the tgs_rep object, it", "the header is rarely used -mostly static- you'd need to", "= CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2),", "in self.addrs: t += addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big', signed=False)", "self.num_authdata = None self.authdata = [] self.ticket = None self.second_ticket", "trimming it') t = ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server =", "return c def to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr", "pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from", "self.data.decode() @staticmethod def from_string(data): o = CCACHEOctetString() o.data = data.encode()", "if int(res['enc-part']['etype']) == 23 or all_hashes == True: hashes.append(cred.to_hash()) return", "signed=False) for _ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big',", "a = Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata =", "Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader) return", "None @staticmethod def parse(reader): a = Authdata() a.authtype = int.from_bytes(reader.read(2),", "byteorder='big', signed=False) t += len(self.components).to_bytes(4, byteorder='big', signed=False) t += self.realm.to_bytes()", "signed=False) t_hdr = b'' for header in self.headers: t_hdr +=", "native representation of the asn1 encoded AS_REP data that the", "= c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key", "int.from_bytes(reader.read(4), byteorder='big', signed=False) return t def to_bytes(self): t = self.authtime.to_bytes(4,", "c.ticket = CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return c @staticmethod def", "h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen =", "in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return", "+= self.num_address.to_bytes(4, byteorder='big', signed=False) for addr in self.addrs: t +=", "encoded AS_REP data that the AD sends upon a succsessful", "CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp == True: self.primary_principal = c.client c.server", "return [ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()),", "expected that the decrypted XXX is supplied in enc_as_rep_part override_pp:", "sname name-string contains a realm as well htne impacket will", "ticket are AP_REP we check for the server principal to", "of hashes in hashcat-firendly format for tickets with encryption type", "= '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {}", "dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime = dt_to_kerbtime(start) t.starttime =", "glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata = f.read() kirbi", "io import datetime import glob import hashlib from pycquery_krb.protocol.asn1_structs import", "add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP \"\"\" Creates", "self.tag t += 'taglen: %s\\n' % self.taglen t += 'tagdata:", "b'' return o def to_asn1(self): return self.data def to_string(self): return", "basically the native representation of the asn1 encoded AS_REP data", "def to_asn1(self): return self.data def to_string(self): return self.data.decode() @staticmethod def", "the native representation of the asn1 encoded TGS_REP data when", "t += self.second_ticket.to_bytes() return t class Keyblock: def __init__(self): self.keytype", "reader.tell() < eof: cred = Credential.parse(reader) if not (len(cred.server.components) >", "None @staticmethod def from_asn1(enc_as_rep_part): t = Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime'])", "enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno']", "@staticmethod def parse(data): \"\"\" returns a list of header tags", "doesn't do decryption of the encrypted part of the tgs_rep", "empty == False: self.__setup() def __setup(self): self.file_format_version = 0x0504 header", "c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp == True: self.primary_principal =", "= Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False)", "for a lot of files being generated. directory_path: str the", "= {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] =", "reader.tell() reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0) while reader.tell() < eof:", "file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if", "= None @staticmethod def parse(reader): d = DateTime() d.time_offset =", "None self.etype = None self.keylen = None self.keyvalue = None", "krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0: #this parameter is", "Writes the contents of the CCACHE object to a file", "and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm as well,", "header already present \"\"\" def __init__(self, empty = False): self.file_format_version", "all_hashes: overrides the encryption type filtering and returns hash for", "@staticmethod def parse(reader): a = Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big',", "miserably :( if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():", "tgs_encryption_type = int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if t == 1:", "byteorder='big', signed=False) t += self.etype.to_bytes(2, byteorder='big', signed=False) t += self.keylen.to_bytes(2,", "self.authtime.to_bytes(4, byteorder='big', signed=False) t += self.starttime.to_bytes(4, byteorder='big', signed=False) t +=", "k.keylen = len(data['keyvalue']) k.keyvalue = data['keyvalue'] return k def to_asn1(self):", "self.time.renew_till != 0 else 'N/A', ] def to_bytes(self): t =", "a CCACHE object \"\"\" with open(filename, 'rb') as f: return", "c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return c @staticmethod def", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen) return k def", "which ticket are AP_REP we check for the server principal", "f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\" Iterates trough all", "= None self.num_address = None self.addrs = [] self.num_authdata =", "o = CCACHEOctetString() o.length = 0 o.data = b'' return", "signed=False) o.data = reader.read(o.length) return o def to_bytes(self): if isinstance(self.data,str):", "sure k.keylen = len(data['keyvalue']) k.keyvalue = data['keyvalue'] return k def", "self.taglen = None self.tagdata = None @staticmethod def parse(data): \"\"\"", "reader.read(h.taglen) headers.append(h) return headers def to_bytes(self): t = self.tag.to_bytes(2, byteorder='big',", "%s\\n' % self.taglen t += 'tagdata: %s\\n' % self.tagdata return", "p def to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big', signed=False) t +=", "len(self.data) t = len(self.data).to_bytes(4, byteorder='big', signed=False) t += self.data return", "signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big',", "return tgt_rep, t def to_tgs(self): \"\"\" Returns the native format", "0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self,", "<NAME> (@skelsec) # import os import io import datetime import", "t += self.num_address.to_bytes(4, byteorder='big', signed=False) for addr in self.addrs: t", "from_kirbidir(directory_path): \"\"\" Iterates trough all .kirbi files in a given", "+= header.to_bytes() #self.headerlen = 1 #size of the entire header", "Keyblock: def __init__(self): self.keytype = None self.etype = None self.keylen", "if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt = [cred.to_tgt(), cred.time] tgts.append(tgt) return", "o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data = reader.read(o.length) return o", "return t class Address: def __init__(self): self.addrtype = None self.addrdata", "or all_hashes == True: hashes.append(cred.to_hash()) return hashes @staticmethod def parse(reader):", "tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return", "byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t", "len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return", "returns hash for all tickets \"\"\" hashes = [] for", "return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum =", "Header.parse(reader.read(hdr_size)) #c.headerlen = #for i in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal", "= KrbCredInfo ### c = Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm'])", "files in a given directory and converts all of them", "sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0", "the encrypted part of the tgs_rep object, it is expected", "@staticmethod def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi)", "c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return c @staticmethod", "t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime =", "cred in self.credentials: kirbi, filename = cred.to_kirbi() filename = '%s.kirbi'", "native format (dict). To determine which ticket are AP_REP we", "t = len(self.data).to_bytes(4, byteorder='big', signed=False) t += self.data return t", "byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2),", "[] self.num_authdata = None self.authdata = [] self.ticket = None", "to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big', signed=False) t += len(self.components).to_bytes(4, byteorder='big',", "0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in enc_as_rep_part and", "if t == 1: tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string =", "with open(filepath, 'wb') as o: o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\"", "CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time = Times.parse(reader)", "self.endtime.to_bytes(4, byteorder='big', signed=False) t += self.renew_till.to_bytes(4, byteorder='big', signed=False) return t", "type 23 (which is RC4) all_hashes: overrides the encryption type", "sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0", "self.headers.append(header) #t_hdr = b'' #for header in self.headers: # t_hdr", "signed=False) return t class Address: def __init__(self): self.addrtype = None", "== True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time", "of the entire header in bytes, encoded in 2 byte", "sends upon a succsessful TGT request. This function doesn't do", "header in self.headers: t+= '%s\\n' % header t+= 'primary_principal :", "= Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not sure!", "return p def to_string(self, separator='-'): return separator.join([c.to_string() for c in", "= [] if empty == False: self.__setup() def __setup(self): self.file_format_version", "self.time.starttime != 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime !=", "Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t", "part of the as_rep object, it is expected that the", "int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data = reader.read(o.length) return o def to_bytes(self):", "return t def to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big', signed=False) t", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader) return a def", "byteorder='big', signed=False) for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader)", "self.addrdata.to_bytes() return t class Authdata: def __init__(self): self.authtype = None", "-mostly static- you'd need to init this object with empty", "True: hashes.append(cred.to_hash()) return hashes @staticmethod def parse(reader): c = CCACHE(True)", "c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "+= 'taglen: %s\\n' % self.taglen t += 'tagdata: %s\\n' %", "object \"\"\" with open(filename, 'rb') as f: return CCACHE.parse(f) def", "'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0 return t @staticmethod", "t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_kirbi(self): filename =", "%s\\n' % self.tag t += 'taglen: %s\\n' % self.taglen t", "CCACHEOctetString.empty() return c @staticmethod def parse(reader): c = Credential() c.client", "= reader.read(k.keylen) return k def to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big',", "signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_address):", "native representation of the asn1 encoded TGS_REP data when the", "# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self): self.tag = None self.taglen", "= reader.tell() reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0) while reader.tell() <", "self.credentials: kirbi, filename = cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!')", "EncryptionKey native format \"\"\" enc_part = EncryptedData({'etype': 1, 'cipher': b''})", "from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi) return cc", "byteorder='big', signed=False) t += self.tagdata return t def __str__(self): t", "### c = Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server =", "byteorder='big', signed=False) t += self.keyvalue return t class Times: def", "[ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat()", "len(t_hdr).to_bytes(2, byteorder='big', signed=False) t += t_hdr t += self.primary_principal.to_bytes() for", "None self.taglen = None self.tagdata = None @staticmethod def parse(data):", "parse(data): \"\"\" returns a list of header tags \"\"\" reader", "+ datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2)) return t @staticmethod", "signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen", "krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime != 0: krbcredinfo['starttime'] =", "datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred = {}", "data = KrbCredInfo ### c = Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'],", "if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0 t.renew_till =", "self.headers: # t_hdr += header.to_bytes() #self.headerlen = 1 #size of", "t += self.ticket.to_bytes() t += self.second_ticket.to_bytes() return t class Keyblock:", "signed=False) t += self.etype.to_bytes(2, byteorder='big', signed=False) t += self.keylen.to_bytes(2, byteorder='big',", "cred.to_bytes() return t @staticmethod def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata", "c = Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp ==", "int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata =", "[cred.to_tgt(), cred.time] tgts.append(tgt) return tgts def get_all_tgs(self): tgss = []", "int.from_bytes(reader.read(4), byteorder='big', signed=False) return d def to_bytes(self): t = self.time_offset.to_bytes(4,", "the CCACHE object to a file \"\"\" with open(filename, 'wb')", "0 else 'N/A', ] def to_bytes(self): t = self.client.to_bytes() t", "%s\\n' % self.file_format_version for header in self.headers: t+= '%s\\n' %", "self.data = self.data.encode() self.length = len(self.data) t = len(self.data).to_bytes(4, byteorder='big',", "c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket", "k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "all_hashes = False): \"\"\" Returns a list of hashes in", "o.data = reader.read(o.length) return o def to_bytes(self): if isinstance(self.data,str): self.data", "def dummy(): p = CCACHEPrincipal() p.name_type = 1 p.num_components =", "__init__(self): self.authtype = None self.authdata = None @staticmethod def parse(reader):", "hashes.append(cred.to_hash()) return hashes @staticmethod def parse(reader): c = CCACHE(True) c.file_format_version", "enc_as_rep_part['starttime'] else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in", "= CCACHEOctetString() o.length = len(data) if isinstance(data,str): o.data = data.encode()", "= [] for cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1:", "credential object in the CCACHE object to the kirbi file", "k def to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big', signed=False) t +=", "TGT This function doesn't do decryption of the encrypted part", "= None self.usec_offset = None @staticmethod def parse(reader): d =", "= EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):", "byteorder='big', signed=False) return d def to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big',", "adds to the ccache file The TGT is basically the", "else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A', ]", "CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred, override_pp =", "self.time.endtime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0", "#from AS_REP \"\"\" Creates credential object from the TGT and", "dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0", "include_expired is forcing me to add it to cache! This", "% (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime", "TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump())", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen", "enc_as_rep_part['renew_till'] else 0 return t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t", "expired, skipping') return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp", "self.num_components = None self.realm = None self.components = [] @staticmethod", "4 additional weirdness!!!! #if sname name-string contains a realm as", "tgts.append(tgt) return tgts def get_all_tgs(self): tgss = [] for cred", "= Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp == True:", "Returns a list of hashes in hashcat-firendly format for tickets", "native format of an AS_REP message and the sessionkey in", "0x0504 header = Header() header.tag = 1 header.taglen = 8", "c.client = CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time", "'name-string': [name.to_string() for name in self.components]} return t, self.realm.to_string() @staticmethod", "= CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time =", "datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime", "include_expired == True: logging.debug('This ticket has most likely expired, but", "present \"\"\" def __init__(self, empty = False): self.file_format_version = None", "parameter is not mandatory, and sometimes it's not present krbcredinfo['renew-till']", "= None self.num_components = None self.realm = None self.components =", "+= self.keyvalue return t class Times: def __init__(self): self.authtime =", "os import io import datetime import glob import hashlib from", "signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big',", "def parse(reader): c = Credential() c.client = CCACHEPrincipal.parse(reader) c.server =", "CCACHE object to the kirbi file format used by mimikatz.", "big-endian unsigned int self.primary_principal = CCACHEPrincipal.dummy() def __str__(self): t =", "@staticmethod def parse(reader): a = Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big',", "def to_file(self, filename): \"\"\" Writes the contents of the CCACHE", "c in self.components]) def to_asn1(self): t = {'name-type': self.name_type, 'name-string':", "\"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp", "reader = io.BytesIO(data) headers = [] while reader.tell() < len(data):", "realm): p = CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components = len(principal['name-string'])", "self.primary_principal = CCACHEPrincipal.dummy() def __str__(self): t = '== CCACHE ==\\n'", "return self.data.decode() @staticmethod def from_string(data): o = CCACHEOctetString() o.data =", "all .kirbi files in a given directory and converts all", "@staticmethod def parse(reader): c = Credential() c.client = CCACHEPrincipal.parse(reader) c.server", "self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native", "c.credentials.append(cred) return c def to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)", "cred in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23", "self.keyvalue return t @staticmethod def parse(reader): k = Keyblock() k.keytype", "= EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] =", "for addr in self.addrs: t += addr.to_bytes() t += self.num_authdata.to_bytes(4,", "to_kirbidir(self, directory_path): \"\"\" Converts all credential object in the CCACHE", "empty = False): self.file_format_version = None #0x0504 self.headers = []", "self.renew_till = None @staticmethod def from_asn1(enc_as_rep_part): t = Times() t.authtime", "self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt = [cred.to_tgt(), cred.time] tgts.append(tgt)", "@staticmethod def parse(reader): t = Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big',", "self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad in self.authdata: t += ad.to_bytes()", "o def to_bytes(self): if isinstance(self.data,str): self.data = self.data.encode() self.length =", "for cred in self.credentials: t += cred.to_bytes() return t @staticmethod", "= None self.taglen = None self.tagdata = None @staticmethod def", "TGS_REP data when the user requests a tgs to a", "= None self.key = None self.time = None self.is_skey =", "signed=False) t += len(self.components).to_bytes(4, byteorder='big', signed=False) t += self.realm.to_bytes() for", "% (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\" Returns the", "t += self.keylen.to_bytes(2, byteorder='big', signed=False) t += self.keyvalue return t", "self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime !=", "ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired == True: logging.debug('This ticket has", "return t @staticmethod def parse(reader): t = Times() t.authtime =", "t class CCACHE: \"\"\" As the header is rarely used", "= CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big',", "t class Credential: def __init__(self): self.client = None self.server =", "service \"\"\" tgts = [] for cred in self.credentials: if", "= CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp == True: self.primary_principal = c.client", "0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till != 0: #this", "krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0]", "t += self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big', signed=False) t +=", "Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big',", "type filtering and returns hash for all tickets \"\"\" hashes", "None self.credentials = [] if empty == False: self.__setup() def", "def __setup(self): self.file_format_version = 0x0504 header = Header() header.tag =", "= enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_tgs(self):", "Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags", "signed=False) t += self.num_address.to_bytes(4, byteorder='big', signed=False) for addr in self.addrs:", "+= len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t += self.tagdata return t def", "class Authdata: def __init__(self): self.authtype = None self.authdata = None", "tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\" Returns the native format", "override_pp == True: self.primary_principal = c.client #yaaaaay 4 additional weirdness!!!!", "in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt = [cred.to_tgt(), cred.time]", "mimikatz. The kirbi file format supports one credential per file,", "t = self.authtype.to_bytes(2, byteorder='big', signed=False) t += self.authdata.to_bytes() return t", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue", "self.server = None self.key = None self.time = None self.is_skey", "\\ if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0 return", "c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file", "hashlib from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey,", "CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey =", "t_hdr += header.to_bytes() #self.headerlen = 1 #size of the entire", "to be the kerberos service \"\"\" tgts = [] for", "o.data = b'' return o def to_asn1(self): return self.data def", "of header tags \"\"\" reader = io.BytesIO(data) headers = []", "datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2)) return t @staticmethod def", "AS_REP tickets in native format (dict). To determine which ticket", "c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])", "do decryption of the encrypted part of the as_rep object,", "o.data = data return o @staticmethod def parse(reader): o =", "self.keytype = None self.etype = None self.keylen = None self.keyvalue", ": %s\\n' % self.primary_principal return t def add_tgt(self, as_rep, enc_as_rep_part,", "kirbi = KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi) return cc def", "you'd need to init this object with empty = True", "tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1] tgs_realm = res['realm']", "= Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm'])", "= CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey =", "self.authtime = None self.starttime = None self.endtime = None self.renew_till", "filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with open(filepath, 'wb') as o:", "else: logging.debug('This ticket has most likely expired, skipping') return \"\"\"", "signed=False) for ad in self.authdata: t += ad.to_bytes() t +=", "as f: kirbidata = f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path):", "dt_to_kerbtime(start + datetime.timedelta(days=2)) return t @staticmethod def parse(reader): t =", "= c.client #yaaaaay 4 additional weirdness!!!! #if sname name-string contains", "with encryption type 23 (which is RC4) all_hashes: overrides the", "cred.time] tgts.append(tgt) return tgts def get_all_tgs(self): tgss = [] for", "= Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader)", "@staticmethod def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata = None with", "of the asn1 encoded TGS_REP data when the user requests", "c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket", "= Header.parse(reader.read(hdr_size)) #c.headerlen = #for i in range(c.headerlen): # c.headers.append(Header.parse(reader))", "class Keyblock: def __init__(self): self.keytype = None self.etype = None", "ticket has most likely expired, but include_expired is forcing me", "== 'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return c def", "def from_asn1(ticket, data): ### # data = KrbCredInfo ### c", "requests a tgs to a specific service principal with a", "logging.debug('This ticket has most likely expired, but include_expired is forcing", "= b'' for header in self.headers: t_hdr += header.to_bytes() t", "o = CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data =", "= Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if", "supplied in enc_as_rep_part override_pp: bool to determine if client principal", "\"\"\" Parses the ccache file and returns a CCACHE object", "c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags =", "= CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key'])", "0 #not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata", "23 (which is RC4) all_hashes: overrides the encryption type filtering", "the sessionkey in EncryptionKey native format \"\"\" enc_part = EncryptedData({'etype':", "self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0: #this parameter", "ticket has most likely expired, skipping') return \"\"\" c.client =", "o.data = data.encode() o.length = len(o.data) return o @staticmethod def", "pos = reader.tell() reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0) while reader.tell()", "= {} t['keytype'] = self.keytype t['keyvalue'] = self.keyvalue return t", "in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self, separator='-'): return separator.join([c.to_string()", "= data.encode() else: o.data = data return o @staticmethod def", "if self.time.authtime != 0: #this parameter is not mandatory, and", "__init__(self): self.name_type = None self.num_components = None self.realm = None", "TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher'])", "in 2 byte big-endian unsigned int self.primary_principal = CCACHEPrincipal.dummy() def", "__init__(self): self.addrtype = None self.addrdata = None @staticmethod def parse(reader):", "0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A',", "parse(reader): t = Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime", "c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep,", "not (len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string()", "and enc_as_rep_part['endtime'] else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till'", "enc_as_rep_part['endtime'] else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in", "= None self.authdata = [] self.ticket = None self.second_ticket =", "% self.tag t += 'taglen: %s\\n' % self.taglen t +=", "EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return kirbi, filename @staticmethod", "h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "def empty(): o = CCACHEOctetString() o.length = 0 o.data =", "return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp == True:", "to_string(self): return self.data.decode() @staticmethod def from_string(data): o = CCACHEOctetString() o.data", "self.tag.to_bytes(2, byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t +=", "with empty = True to get an object without header", "@staticmethod def parse(reader): o = CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big',", "self.name_type, 'name-string': [name.to_string() for name in self.components]} return t, self.realm.to_string()", "import hashlib from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo,", "= b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b'' #for header in self.headers:", "#header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b''", "t += ad.to_bytes() t += self.ticket.to_bytes() t += self.second_ticket.to_bytes() return", "tickets \"\"\" hashes = [] for cred in self.credentials: res", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata", "t class Keyblock: def __init__(self): self.keytype = None self.etype =", "else 0 return t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t =", "{} krbcred['pvno'] = krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())]", "return t, self.realm.to_string() @staticmethod def parse(reader): p = CCACHEPrincipal() p.name_type", "= Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2),", "override_pp: bool to determine if client principal should be used", "self.keylen = None self.keyvalue = None @staticmethod def from_asn1(data): k", "datetime import glob import hashlib from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData,", "decrypted XXX is supplied in enc_as_rep_part override_pp: bool to determine", "headers def to_bytes(self): t = self.tag.to_bytes(2, byteorder='big', signed=False) t +=", "for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self, separator='-'):", "o = CCACHEOctetString() o.length = len(data) if isinstance(data,str): o.data =", "and enc_as_rep_part['renew_till'] else 0 return t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)):", "= reader.read(o.length) return o def to_bytes(self): if isinstance(self.data,str): self.data =", "self.realm.to_string() @staticmethod def parse(reader): p = CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4),", "@staticmethod def from_asn1(principal, realm): p = CCACHEPrincipal() p.name_type = principal['name-type']", "as f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return", "CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big', signed=False)", "CCACHEOctetString: def __init__(self): self.length = None self.data = None @staticmethod", "t = Times() t.authtime = dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start )", "+= ad.to_bytes() t += self.ticket.to_bytes() t += self.second_ticket.to_bytes() return t", "= self.authtime.to_bytes(4, byteorder='big', signed=False) t += self.starttime.to_bytes(4, byteorder='big', signed=False) t", "\"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp", "krbcred = {} krbcred['pvno'] = krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets']", "@staticmethod def from_file(filename): \"\"\" Parses the ccache file and returns", "a def to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big', signed=False) t +=", "t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp", "cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt = [cred.to_tgt(), cred.time] tgts.append(tgt) return tgts", "= core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0: #this parameter is not", "tgss.append(cred.to_tgs()) return tgss def get_hashes(self, all_hashes = False): \"\"\" Returns", "== True: logging.debug('This ticket has most likely expired, but include_expired", "self.time_offset.to_bytes(4, byteorder='big', signed=False) t += self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t", "c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket", "None self.second_ticket = None def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type", "pycquery_krb import logger from asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class", "return self.data def to_string(self): return self.data.decode() @staticmethod def from_string(data): o", "t += com.to_bytes() return t class CCACHEOctetString: def __init__(self): self.length", "for the server principal to be the kerberos service \"\"\"", "reader.tell() reader.seek(pos,0) while reader.tell() < eof: cred = Credential.parse(reader) if", "MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native", "!= 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till != 0:", "res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex()", "the time not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime", "1 #size of the entire header in bytes, encoded in", "trough all .kirbi files in a given directory and converts", "= {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] =", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime", "comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def dummy(): p", "list of hashes in hashcat-firendly format for tickets with encryption", "t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2))", "int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till =", "entire header in bytes, encoded in 2 byte big-endian unsigned", "{} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0]", "tags \"\"\" reader = io.BytesIO(data) headers = [] while reader.tell()", "len(res['sname']['name-string']) if t == 1: tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string", "Credential() c.client = CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader)", "filepath = os.path.join(kf_abs, filename) with open(filepath, 'wb') as o: o.write(kirbi.dump())", "self.time.starttime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0", "datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till", "krbcred, override_pp = True, include_expired = False): c = Credential()", "CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp == True: self.primary_principal = c.client #yaaaaay", "self.time_offset = None self.usec_offset = None @staticmethod def parse(reader): d", "f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def to_kirbidir(self, directory_path):", "kerberos service \"\"\" tgts = [] for cred in self.credentials:", "res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1] tgs_realm = res['realm'] if tgs_encryption_type", "the tgs_rep object, it is expected that the decrypted XXX", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers", "headers.append(h) return headers def to_bytes(self): t = self.tag.to_bytes(2, byteorder='big', signed=False)", "None self.num_address = None self.addrs = [] self.num_authdata = None", "CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc =", "datetime.datetime.now(datetime.timezone.utc): if include_expired == True: logging.debug('This ticket has most likely", "summary(self): return [ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'),", "= None self.renew_till = None @staticmethod def from_asn1(enc_as_rep_part): t =", "glob import hashlib from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\ krb5_pvno,", "def parse(reader): o = CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "c.second_ticket = CCACHEOctetString.empty() return c @staticmethod def parse(reader): c =", "Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address", "= os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with open(filename, 'rb')", "tgt_rep, t def to_tgs(self): \"\"\" Returns the native format of", "a realm as well htne impacket will crash miserably :(", "c.primary_principal = CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2) eof = reader.tell()", "0 return t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times()", "a def to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big', signed=False) t +=", "parse(reader): c = Credential() c.client = CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader)", "+= self.tagdata return t def __str__(self): t = 'tag: %s\\n'", "all of them into one CCACHE object \"\"\" cc =", "return ['client','server','starttime','endtime','renew-till'] def summary(self): return [ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()),", "of the asn1 encoded AS_REP data that the AD sends", "Keyblock.from_asn1(data['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address", "signed=False) for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket", "= dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2)) return", "self.keytype.to_bytes(2, byteorder='big', signed=False) t += self.etype.to_bytes(2, byteorder='big', signed=False) t +=", "c = CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size =", "None def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t", "of the as_rep object, it is expected that the decrypted", "sessionkey in EncryptionKey native format \"\"\" enc_part = EncryptedData({'etype': 1,", "None self.usec_offset = None @staticmethod def parse(reader): d = DateTime()", "os.path.join(kf_abs, filename) with open(filepath, 'wb') as o: o.write(kirbi.dump()) @staticmethod def", "a valid TGT This function doesn't do decryption of the", "#this parameter is not mandatory, and sometimes it's not present", "t += cred.to_bytes() return t @staticmethod def from_kirbifile(kirbi_filename): kf_abs =", "tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return", "c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key =", "CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return c @staticmethod def parse(reader): c", "used -mostly static- you'd need to init this object with", "#size of the entire header in bytes, encoded in 2", "format of an AS_REP message and the sessionkey in EncryptionKey", "for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def dummy():", "hashes @staticmethod def parse(reader): c = CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2),", "1 header.taglen = 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'", "credential object from the TGS and adds to the ccache", "class CCACHE: \"\"\" As the header is rarely used -mostly", "format for tickets with encryption type 23 (which is RC4)", "= Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not sure!", "determine which ticket are AP_REP we check for the server", "of the encrypted part of the tgs_rep object, it is", "'wb') as o: o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\" Parses the", "TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from pycquery_krb import logger", "int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _", "enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno'] = krb5_pvno krbcred['msg-type']", "None self.server = None self.key = None self.time = None", "EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return kirbi, filename @staticmethod def from_asn1(ticket,", "= Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2),", "and the sessionkey in EncryptionKey native format \"\"\" enc_part =", "c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one", "% self.taglen t += 'tagdata: %s\\n' % self.tagdata return t", "o = CCACHEOctetString() o.data = data.encode() o.length = len(o.data) return", "byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4),", "== 23 or all_hashes == True: hashes.append(cred.to_hash()) return hashes @staticmethod", "!= 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else", "a list of header tags \"\"\" reader = io.BytesIO(data) headers", "for header in self.headers: t_hdr += header.to_bytes() t += len(t_hdr).to_bytes(2,", "format (dict). To determine which ticket are AP_REP we check", "part of the tgs_rep object, it is expected that the", "= None self.is_skey = None self.tktflags = None self.num_address =", "name in self.components]} return t, self.realm.to_string() @staticmethod def parse(reader): p", "header.taglen = 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header)", "= None self.realm = None self.components = [] @staticmethod def", "int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader) for _ in range(p.num_components):", "EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if", "CCACHEOctetString() o.data = data.encode() o.length = len(o.data) return o @staticmethod", "by mimikatz. The kirbi file format supports one credential per", "== 'X-CACHECONF:'): c.credentials.append(cred) return c def to_bytes(self): t = self.file_format_version.to_bytes(2,", "t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till']", "def __str__(self): t = '== CCACHE ==\\n' t+= 'file_format_version :", "To determine which ticket are AP_REP we check for the", "if override_pp == True: self.primary_principal = c.client #yaaaaay 4 additional", "already present \"\"\" def __init__(self, empty = False): self.file_format_version =", "if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12]", "2 byte big-endian unsigned int self.primary_principal = CCACHEPrincipal.dummy() def __str__(self):", "= None self.etype = None self.keylen = None self.keyvalue =", "= CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp == True: self.primary_principal = c.client", "o def to_asn1(self): return self.data def to_string(self): return self.data.decode() @staticmethod", "p = CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components =", "c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags =", "in self.components]) def to_asn1(self): t = {'name-type': self.name_type, 'name-string': [name.to_string()", "def __str__(self): t = 'tag: %s\\n' % self.tag t +=", "with open(filename, 'rb') as f: kirbidata = f.read() kirbi =", "a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader) return a", "Creates credential object from the TGS and adds to the", "c.is_skey = 0 #not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address =", "in self.headers: t_hdr += header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)", "= self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr = b'' for header in", "kf_abs = os.path.abspath(directory_path) for cred in self.credentials: kirbi, filename =", "b'' for header in self.headers: t_hdr += header.to_bytes() t +=", "return t class DateTime: def __init__(self): self.time_offset = None self.usec_offset", "self.authtype = None self.authdata = None @staticmethod def parse(reader): a", "cc def to_kirbidir(self, directory_path): \"\"\" Converts all credential object in", "c.is_skey = 0 #not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address =", "byteorder='big', signed=False) t += self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t class", "signed=False) return t class Credential: def __init__(self): self.client = None", "None self.addrdata = None @staticmethod def parse(reader): a = Address()", "c @staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self): return [", "enc_as_rep_part and enc_as_rep_part['authtime'] else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if", "enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0", "skipping') return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp ==", "= [] self.ticket = None self.second_ticket = None def to_hash(self):", "None self.num_components = None self.realm = None self.components = []", "is the native representation of the asn1 encoded TGS_REP data", "t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False) return t def to_bytes(self): t", "pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from pycquery_krb import logger from asn1crypto", "krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return", "self.renew_till.to_bytes(4, byteorder='big', signed=False) return t class Address: def __init__(self): self.addrtype", "{} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno'] = krb5_pvno", "t += self.etype.to_bytes(2, byteorder='big', signed=False) t += self.keylen.to_bytes(2, byteorder='big', signed=False)", "'file_format_version : %s\\n' % self.file_format_version for header in self.headers: t+=", "% filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with open(filepath, 'wb') as", "= Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4),", "self.ticket.to_bytes() t += self.second_ticket.to_bytes() return t class Keyblock: def __init__(self):", "t += self.data return t class CCACHE: \"\"\" As the", "t += self.is_skey.to_bytes(1, byteorder='big', signed=False) t += self.tktflags.to_bytes(4, byteorder='little', signed=False)", "signed=False) t += t_hdr t += self.primary_principal.to_bytes() for cred in", "== True: hashes.append(cred.to_hash()) return hashes @staticmethod def parse(reader): c =", "!= 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime != 0:", "if client principal should be used as the primary principal", "@staticmethod def from_asn1(data): o = CCACHEOctetString() o.length = len(data) if", "= KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def to_kirbidir(self, directory_path): \"\"\" Converts", "\"\"\" reader = io.BytesIO(data) headers = [] while reader.tell() <", "c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c)", "def get_all_tgs(self): tgss = [] for cred in self.credentials: if", "adds to the ccache file The TGS is the native", "t += self.tagdata return t def __str__(self): t = 'tag:", "t += self.starttime.to_bytes(4, byteorder='big', signed=False) t += self.endtime.to_bytes(4, byteorder='big', signed=False)", "data['keyvalue'] return k def to_asn1(self): t = {} t['keytype'] =", "return tgts def get_all_tgs(self): tgss = [] for cred in", "return t class Authdata: def __init__(self): self.authtype = None self.authdata", "= CCACHEOctetString.from_string(realm) for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod", "Header() header.tag = 1 header.taglen = 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00'", "def __init__(self, empty = False): self.file_format_version = None #0x0504 self.headers", "= len(self.data) t = len(self.data).to_bytes(4, byteorder='big', signed=False) t += self.data", "that the decrypted XXX is supplied in enc_as_rep_part override_pp: bool", "to add it to cache! This can cause problems!') else:", "if not (len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and", "= Keyblock() k.keytype = data['keytype'] k.etype = 0 # not", "t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_tgs(self): \"\"\" Returns", "@staticmethod def from_asn1(enc_as_rep_part): t = Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\", "signed=False) t += self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t class Credential:", "+= self.endtime.to_bytes(4, byteorder='big', signed=False) t += self.renew_till.to_bytes(4, byteorder='big', signed=False) return", "to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big', signed=False) t += self.starttime.to_bytes(4, byteorder='big',", "signed=False) t += self.tktflags.to_bytes(4, byteorder='little', signed=False) t += self.num_address.to_bytes(4, byteorder='big',", "= None self.addrdata = None @staticmethod def parse(reader): a =", "from the TGT and adds to the ccache file The", "reader.seek(pos,0) while reader.tell() < eof: cred = Credential.parse(reader) if not", "Iterates trough all .kirbi files in a given directory and", "None self.renew_till = None @staticmethod def from_asn1(enc_as_rep_part): t = Times()", "of the tgs_rep object, it is expected that the decrypted", "< eof: cred = Credential.parse(reader) if not (len(cred.server.components) > 0", "to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big', signed=False) t += self.authdata.to_bytes() return", "= KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi) return cc def get_all_tgt(self):", "self.time = None self.is_skey = None self.tktflags = None self.num_address", "= True to get an object without header already present", "if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0 return t", "self.authtype.to_bytes(2, byteorder='big', signed=False) t += self.authdata.to_bytes() return t class CCACHEPrincipal:", "int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm =", "self.client.to_bytes() t += self.server.to_bytes() t += self.key.to_bytes() t += self.time.to_bytes()", "self.primary_principal = None self.credentials = [] if empty == False:", "pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags,", "def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime = dt_to_kerbtime(start) t.starttime", "principal with a valid TGT This function doesn't do decryption", "ccache file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])", "= EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_kirbi(self): filename = '%s@%s_%s'", "for header in self.headers: t+= '%s\\n' % header t+= 'primary_principal", "CCACHEOctetString.from_string(realm) for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def", "in self.headers: t+= '%s\\n' % header t+= 'primary_principal : %s\\n'", "if override_pp == True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'],", "CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return c @staticmethod def summary_header(): return", "t += self.realm.to_bytes() for com in self.components: t += com.to_bytes()", "cred in self.credentials: t += cred.to_bytes() return t @staticmethod def", "data): ### # data = KrbCredInfo ### c = Credential()", "static- you'd need to init this object with empty =", "p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def dummy(): p = CCACHEPrincipal() p.name_type", "__init__(self): self.length = None self.data = None @staticmethod def empty():", "the native representation of the asn1 encoded AS_REP data that", "AS_REP message and the sessionkey in EncryptionKey native format \"\"\"", "from_asn1(principal, realm): p = CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components =", "byteorder='big', signed=False) t += self.authdata.to_bytes() return t class CCACHEPrincipal: def", "def from_kirbidir(directory_path): \"\"\" Iterates trough all .kirbi files in a", "a given directory and converts all of them into one", "from pycquery_krb import logger from asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt", "open(filename, 'wb') as f: f.write(self.to_bytes()) @staticmethod def from_bytes(data): return CCACHE.parse(io.BytesIO(data))", "datetime.timezone.utc) if self.time.endtime != 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if", "c.server = CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey", "t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime']", "p.num_components = len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for comp in principal['name-string']:", "byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) return d def", "CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file c.second_ticket =", "kirbi, filename = cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!') filepath", "#not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata =", "return t def __str__(self): t = 'tag: %s\\n' % self.tag", "if self.time.renew_till != 0: #this parameter is not mandatory, and", "add it to cache! This can cause problems!') else: logging.debug('This", "= CCACHE() cc.add_kirbi(kirbi) return cc def get_all_tgt(self): \"\"\" Returns a", "k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen) return k", "TGS is the native representation of the asn1 encoded TGS_REP", "= b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b'' #for", "the ccache file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'],", "kirbi files to \"\"\" kf_abs = os.path.abspath(directory_path) for cred in", "self.addrdata = None @staticmethod def parse(reader): a = Address() a.addrtype", "= datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till != 0: #this parameter is", "byteorder='big', signed=False) t += self.tktflags.to_bytes(4, byteorder='little', signed=False) t += self.num_address.to_bytes(4,", "return o def to_bytes(self): if isinstance(self.data,str): self.data = self.data.encode() self.length", "request. This function doesn't do decryption of the encrypted part", "tgss def get_hashes(self, all_hashes = False): \"\"\" Returns a list", "= cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs,", "with open(filename, 'rb') as f: return CCACHE.parse(f) def to_file(self, filename):", "int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen) return k def to_bytes(self):", "= '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return tgss def get_hashes(self, all_hashes", "principal['name-type'] p.num_components = len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for comp in", "@staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self): return [ '%s@%s'", "self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr = b'' for header in self.headers:", "native format \"\"\" enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep", "include_expired = False): c = Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info", "= Header() header.tag = 1 header.taglen = 8 #header.tagdata =", "enc_as_rep_part, override_pp = True): #from AS_REP \"\"\" Creates credential object", "# # Author: # <NAME> (@skelsec) # import os import", "if self.time.starttime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime !=", "in bytes, encoded in 2 byte big-endian unsigned int self.primary_principal", "dummy(): p = CCACHEPrincipal() p.name_type = 1 p.num_components = 1", "t = self.keytype.to_bytes(2, byteorder='big', signed=False) t += self.etype.to_bytes(2, byteorder='big', signed=False)", "t+= '%s\\n' % header t+= 'primary_principal : %s\\n' % self.primary_principal", "None self.authdata = None @staticmethod def parse(reader): a = Authdata()", "header in self.headers: t_hdr += header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big',", "enc_as_rep_part and enc_as_rep_part['starttime'] else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if", "KrbCredInfo ### c = Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server", "the ccache file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'],", "to cache! This can cause problems!') else: logging.debug('This ticket has", "range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in", "import os import io import datetime import glob import hashlib", "krbcred['pvno'] = krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part']", "signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers def to_bytes(self): t", "c = Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\"", "as_rep['crealm']) if override_pp == True: self.primary_principal = c.client c.server =", "has most likely expired, but include_expired is forcing me to", "= CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False):", "\"\"\" Returns a list of AS_REP tickets in native format", "signed=False) t += self.tagdata return t def __str__(self): t =", "return c @staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self): return", "= CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey = 0 #not", "byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers def to_bytes(self):", "byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader) for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader))", "of the time not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if", "\"\"\" Returns the native format of an AS_REP message and", "= self.keyvalue return t @staticmethod def parse(reader): k = Keyblock()", "= CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey", "Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp == True: self.primary_principal", "b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value", "most likely expired, but include_expired is forcing me to add", "tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part']", "t = self.tag.to_bytes(2, byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False)", "None self.tagdata = None @staticmethod def parse(data): \"\"\" returns a", "= self.authtype.to_bytes(2, byteorder='big', signed=False) t += self.authdata.to_bytes() return t class", "header t+= 'primary_principal : %s\\n' % self.primary_principal return t def", "in native format (dict). To determine which ticket are AP_REP", "decryption of the encrypted part of the as_rep object, it", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader) return a def", "def __init__(self): self.length = None self.data = None @staticmethod def", "in self.credentials: t += cred.to_bytes() return t @staticmethod def from_kirbifile(kirbi_filename):", "logger from asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def", "sometimes it's not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] =", "= Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native", "'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return kirbi, filename @staticmethod def", "int self.primary_principal = CCACHEPrincipal.dummy() def __str__(self): t = '== CCACHE", "override_pp = True, include_expired = False): c = Credential() enc_credinfo", "kirbi file format supports one credential per file, so prepare", "== True: self.primary_principal = c.client #yaaaaay 4 additional weirdness!!!! #if", "Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime']", "self.client = None self.server = None self.key = None self.time", "it to cache! This can cause problems!') else: logging.debug('This ticket", "CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0) while", "self.data = None @staticmethod def empty(): o = CCACHEOctetString() o.length", "enc_as_rep_part and enc_as_rep_part['endtime'] else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if", "== False: self.__setup() def __setup(self): self.file_format_version = 0x0504 header =", "stores one ticket per file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod", "= b'' #for header in self.headers: # t_hdr += header.to_bytes()", "[Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred)", "\"\"\" hashes = [] for cred in self.credentials: res =", "'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return c def to_bytes(self):", "t += self.keyvalue return t class Times: def __init__(self): self.authtime", "t = len(res['sname']['name-string']) if t == 1: tgs_name_string = res['sname']['name-string'][0]", "TGT and adds to the ccache file The TGT is", "!= 0: #this parameter is not mandatory, and most of", "o.length = len(data) if isinstance(data,str): o.data = data.encode() else: o.data", "<filename>pycquery_krb/common/ccache.py #!/usr/bin/env python3 # # Author: # <NAME> (@skelsec) #", "to_kirbi(self): filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo", "c = Credential() c.client = CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key", "res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self):", "def from_asn1(enc_as_rep_part): t = Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if", "None @staticmethod def parse(reader): a = Address() a.addrtype = int.from_bytes(reader.read(2),", "= KRBCRED(krbcred) return kirbi, filename @staticmethod def from_asn1(ticket, data): ###", "KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat", "a specific service principal with a valid TGT This function", "if self.time.starttime != 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime", "+= self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t class Credential: def __init__(self):", "dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from pycquery_krb import", "return t def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from", "= 1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi'))", "return k def to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big', signed=False) t", "likely expired, skipping') return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if", "% self.file_format_version for header in self.headers: t+= '%s\\n' % header", "len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t += self.tagdata return t def __str__(self):", "the encrypted part of the as_rep object, it is expected", "empty(): o = CCACHEOctetString() o.length = 0 o.data = b''", ") else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s'", "= krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] =", "#kirbi only stores one ticket per file c.second_ticket = CCACHEOctetString.empty()", "\"\"\" with open(filename, 'wb') as f: f.write(self.to_bytes()) @staticmethod def from_bytes(data):", "+= len(self.components).to_bytes(4, byteorder='big', signed=False) t += self.realm.to_bytes() for com in", "expired, but include_expired is forcing me to add it to", "self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if", "the server principal to be the kerberos service \"\"\" tgts", "format \"\"\" enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep =", "# data = KrbCredInfo ### c = Credential() c.client =", "signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big',", "Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags", "#yaaaaay 4 additional weirdness!!!! #if sname name-string contains a realm", "return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\"", "= Credential() c.client = CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key =", "self.second_ticket.to_bytes() return t class Keyblock: def __init__(self): self.keytype = None", "c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred, override_pp = True,", "int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address =", "signed=False) return d def to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big', signed=False)", "filename = '%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with", "= DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4),", "in enc_as_rep_part override_pp: bool to determine if client principal should", "+= self.ticket.to_bytes() t += self.second_ticket.to_bytes() return t class Keyblock: def", "# not sure k.keylen = len(data['keyvalue']) k.keyvalue = data['keyvalue'] return", "return cc def to_kirbidir(self, directory_path): \"\"\" Converts all credential object", "the TGT and adds to the ccache file The TGT", "not sure k.keylen = len(data['keyvalue']) k.keyvalue = data['keyvalue'] return k", "= self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime", "one credential per file, so prepare for a lot of", "As the header is rarely used -mostly static- you'd need", "a file \"\"\" with open(filename, 'wb') as f: f.write(self.to_bytes()) @staticmethod", "else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key =", "specific service principal with a valid TGT This function doesn't", "KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants", "from_file(filename): \"\"\" Parses the ccache file and returns a CCACHE", "return o @staticmethod def parse(reader): o = CCACHEOctetString() o.length =", "b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b'' #for header in self.headers: #", "upon a succsessful TGT request. This function doesn't do decryption", "= '== CCACHE ==\\n' t+= 'file_format_version : %s\\n' % self.file_format_version", "in self.components: t += com.to_bytes() return t class CCACHEOctetString: def", "TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())", "CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred, override_pp = True, include_expired =", "+= self.starttime.to_bytes(4, byteorder='big', signed=False) t += self.endtime.to_bytes(4, byteorder='big', signed=False) t", "directory_path): \"\"\" Converts all credential object in the CCACHE object", "= b'' return o def to_asn1(self): return self.data def to_string(self):", "isinstance(data,str): o.data = data.encode() else: o.data = data return o", "get an object without header already present \"\"\" def __init__(self,", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) return d def to_bytes(self): t =", "else 'N/A', ] def to_bytes(self): t = self.client.to_bytes() t +=", "signed=False) t += self.renew_till.to_bytes(4, byteorder='big', signed=False) return t class Address:", "signed=False) t += self.data return t class CCACHE: \"\"\" As", "{} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string()", "= 1 #size of the entire header in bytes, encoded", "Returns a list of AS_REP tickets in native format (dict).", "datetime.timezone.utc) if self.time.renew_till != 0: #this parameter is not mandatory,", "com in self.components: t += com.to_bytes() return t class CCACHEOctetString:", "self.keyvalue return t class Times: def __init__(self): self.authtime = None", "CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data = reader.read(o.length) return", "'rb') as f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi)", "(which is RC4) all_hashes: overrides the encryption type filtering and", "rarely used -mostly static- you'd need to init this object", "tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:]", "it's not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string()", "c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_address): c.addrs.append(Address.parse(reader))", "__str__(self): t = 'tag: %s\\n' % self.tag t += 'taglen:", "them into one CCACHE object \"\"\" cc = CCACHE() dir_path", "in enc_as_rep_part and enc_as_rep_part['endtime'] else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\", "byteorder='big', signed=False) for addr in self.addrs: t += addr.to_bytes() t", "== True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time", "self.credentials: t += cred.to_bytes() return t @staticmethod def from_kirbifile(kirbi_filename): kf_abs", "def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP \"\"\"", "= datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime != 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime,", "into one CCACHE object \"\"\" cc = CCACHE() dir_path =", "without header already present \"\"\" def __init__(self, empty = False):", "ticket_info['prealm']) if override_pp == True: self.primary_principal = c.client #yaaaaay 4", "= CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc", "== 1: tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1] tgs_realm", "contents of the CCACHE object to a file \"\"\" with", "'/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return tgss def get_hashes(self, all_hashes =", "0: #this parameter is not mandatory, and sometimes it's not", "if self.time.endtime != 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till", "CCACHEOctetString.parse(reader) return c @staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self):", "def parse(reader): c = CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "reader.read(o.length) return o def to_bytes(self): if isinstance(self.data,str): self.data = self.data.encode()", "c.client #yaaaaay 4 additional weirdness!!!! #if sname name-string contains a", "from_asn1(data): k = Keyblock() k.keytype = data['keytype'] k.etype = 0", "= None self.credentials = [] if empty == False: self.__setup()", "c @staticmethod def parse(reader): c = Credential() c.client = CCACHEPrincipal.parse(reader)", "ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key", "representation of the asn1 encoded TGS_REP data when the user", "self.addrtype.to_bytes(2, byteorder='big', signed=False) t += self.addrdata.to_bytes() return t class Authdata:", "AS_REP data that the AD sends upon a succsessful TGT", "t def __str__(self): t = 'tag: %s\\n' % self.tag t", "o.length = len(o.data) return o @staticmethod def from_asn1(data): o =", "is RC4) all_hashes: overrides the encryption type filtering and returns", "p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self, separator='-'): return separator.join([c.to_string() for c", "t class CCACHEPrincipal: def __init__(self): self.name_type = None self.num_components =", "c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if", "MESSAGE_TYPE from pycquery_krb import logger from asn1crypto import core #", "= True, include_expired = False): c = Credential() enc_credinfo =", "CCACHE() cc.add_kirbi(kirbi) return cc def get_all_tgt(self): \"\"\" Returns a list", "self.authdata: t += ad.to_bytes() t += self.ticket.to_bytes() t += self.second_ticket.to_bytes()", "= None self.data = None @staticmethod def empty(): o =", "data return o @staticmethod def parse(reader): o = CCACHEOctetString() o.length", "byteorder='big', signed=False) t += t_hdr t += self.primary_principal.to_bytes() for cred", "c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket", "object, it is expected that the decrypted XXX is supplied", "signed=False) t += self.endtime.to_bytes(4, byteorder='big', signed=False) t += self.renew_till.to_bytes(4, byteorder='big',", "as well, trimming it') t = ticket_info['sname'] t['name-string'] = t['name-string'][:-1]", "krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime != 0: krbcredinfo['endtime'] =", "for the ccache file \"\"\" c = Credential() c.client =", "byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen)", "signed=False) t += self.keyvalue return t class Times: def __init__(self):", "enc_krbcred = {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno']", "TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import", "return hashes @staticmethod def parse(reader): c = CCACHE(True) c.file_format_version =", "t = Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime =", "python3 # # Author: # <NAME> (@skelsec) # import os", "% self.tagdata return t class DateTime: def __init__(self): self.time_offset =", "byteorder='little', signed=False) t += self.num_address.to_bytes(4, byteorder='big', signed=False) for addr in", "is rarely used -mostly static- you'd need to init this", "int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers def", "= CCACHEOctetString.empty() return c @staticmethod def parse(reader): c = Credential()", "self.components]) def to_asn1(self): t = {'name-type': self.name_type, 'name-string': [name.to_string() for", "@staticmethod def from_string(data): o = CCACHEOctetString() o.data = data.encode() o.length", "= self.data.encode() self.length = len(self.data) t = len(self.data).to_bytes(4, byteorder='big', signed=False)", "Address: def __init__(self): self.addrtype = None self.addrdata = None @staticmethod", "byteorder='big', signed=False) t += self.data return t class CCACHE: \"\"\"", "encoded in 2 byte big-endian unsigned int self.primary_principal = CCACHEPrincipal.dummy()", "0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per", "to the ccache file The TGS is the native representation", "c def to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr =", "else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in enc_as_rep_part", "int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if t == 1: tgs_name_string =", "c = Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'],", "o.data = data.encode() else: o.data = data return o @staticmethod", "#for i in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos", "= None def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype'])", "t += len(self.components).to_bytes(4, byteorder='big', signed=False) t += self.realm.to_bytes() for com", "the directory to write the kirbi files to \"\"\" kf_abs", "dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with open(filename,", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return headers", "self.addrs: t += addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big', signed=False) for", "t += 'taglen: %s\\n' % self.taglen t += 'tagdata: %s\\n'", "not mandatory, and sometimes it's not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime,", "if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains", "return t class Credential: def __init__(self): self.client = None self.server", "t += self.primary_principal.to_bytes() for cred in self.credentials: t += cred.to_bytes()", "'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime'])", "__init__(self): self.tag = None self.taglen = None self.tagdata = None", "hashcat-firendly format for tickets with encryption type 23 (which is", "for c in self.components]) def to_asn1(self): t = {'name-type': self.name_type,", "should be used as the primary principal for the ccache", "self.primary_principal return t def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True):", "impacket will crash miserably :( if len(ticket_info['sname']['name-string']) > 2 and", "c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not sure! c.tktflags =", "= os.path.abspath(directory_path) for cred in self.credentials: kirbi, filename = cred.to_kirbi()", "object from the TGS and adds to the ccache file", "class Credential: def __init__(self): self.client = None self.server = None", "tgs_realm = res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:]", "tgs_rep object, it is expected that the decrypted XXX is", "return p def to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big', signed=False) t", "object to the kirbi file format used by mimikatz. The", "= None self.authdata = None @staticmethod def parse(reader): a =", "from_asn1(enc_as_rep_part): t = Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime'", "the ccache file The TGT is basically the native representation", "int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket =", "converts all of them into one CCACHE object \"\"\" cc", "encryption type filtering and returns hash for all tickets \"\"\"", "= [KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno'] = krb5_pvno krbcred['msg-type'] =", "= None @staticmethod def parse(data): \"\"\" returns a list of", "(len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string() ==", "from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE", "__init__(self): self.keytype = None self.etype = None self.keylen = None", "return tgss def get_hashes(self, all_hashes = False): \"\"\" Returns a", "tgs_rep['crealm']) if override_pp == True: self.primary_principal = c.client c.server =", "return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\" Iterates trough all .kirbi", "class CCACHEOctetString: def __init__(self): self.length = None self.data = None", "< len(data): h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "service principal with a valid TGT This function doesn't do", "def parse(reader): p = CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "import datetime import glob import hashlib from pycquery_krb.protocol.asn1_structs import Ticket,", "and sometimes it's not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm']", "signed=False) t += self.addrdata.to_bytes() return t class Authdata: def __init__(self):", "to get an object without header already present \"\"\" def", "file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi =", "check for the server principal to be the kerberos service", "return c @staticmethod def parse(reader): c = Credential() c.client =", "k = Keyblock() k.keytype = data['keytype'] k.etype = 0 #", "= self.keytype.to_bytes(2, byteorder='big', signed=False) t += self.etype.to_bytes(2, byteorder='big', signed=False) t", "in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self): t = self.name_type.to_bytes(4,", "overrides the encryption type filtering and returns hash for all", "None self.authdata = [] self.ticket = None self.second_ticket = None", "0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty()", "+= 'tagdata: %s\\n' % self.tagdata return t class DateTime: def", "def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata = None with open(kf_abs,", "c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp == True: self.primary_principal =", "an object without header already present \"\"\" def __init__(self, empty", "None self.realm = None self.components = [] @staticmethod def from_asn1(principal,", "datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime != 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc)", "in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0 return t @staticmethod def", "= None @staticmethod def parse(reader): a = Authdata() a.authtype =", "self.authdata = None @staticmethod def parse(reader): a = Authdata() a.authtype", "== ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm as well, trimming it')", "signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader) for", "+= len(t_hdr).to_bytes(2, byteorder='big', signed=False) t += t_hdr t += self.primary_principal.to_bytes()", "data['keytype'] k.etype = 0 # not sure k.keylen = len(data['keyvalue'])", "sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0", "= {'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]} return", "as_rep, enc_as_rep_part, override_pp = True): #from AS_REP \"\"\" Creates credential", "p def to_string(self, separator='-'): return separator.join([c.to_string() for c in self.components])", "= datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred", "\"\"\" Iterates trough all .kirbi files in a given directory", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till", "c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False)", "server principal to be the kerberos service \"\"\" tgts =", "c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader))", "= data['keyvalue'] return k def to_asn1(self): t = {} t['keytype']", "one CCACHE object \"\"\" cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path),", "CCACHE object \"\"\" with open(filename, 'rb') as f: return CCACHE.parse(f)", "open(filepath, 'wb') as o: o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\" Parses", "self.addrs = [] self.num_authdata = None self.authdata = [] self.ticket", "int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue =", "len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains the", "= int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if t == 1: tgs_name_string", "while reader.tell() < len(data): h = Header() h.tag = int.from_bytes(reader.read(2),", "t = self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr = b'' for header", "to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t = len(res['sname']['name-string'])", "t = {'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]}", "# t_hdr += header.to_bytes() #self.headerlen = 1 #size of the", "= None self.tktflags = None self.num_address = None self.addrs =", "CCACHEOctetString() o.length = 0 o.data = b'' return o def", "byteorder='big', signed=False) t += self.endtime.to_bytes(4, byteorder='big', signed=False) t += self.renew_till.to_bytes(4,", "if isinstance(data,str): o.data = data.encode() else: o.data = data return", "forcing me to add it to cache! This can cause", "= None self.time = None self.is_skey = None self.tktflags =", "byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t", "c = Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp ==", "t = 'tag: %s\\n' % self.tag t += 'taglen: %s\\n'", "def parse(reader): d = DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "directory to write the kirbi files to \"\"\" kf_abs =", "principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def dummy(): p = CCACHEPrincipal()", "format supports one credential per file, so prepare for a", "EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno", "= datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime != 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime,", "t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime']", "is not mandatory, and sometimes it's not present krbcredinfo['renew-till'] =", "#if sname name-string contains a realm as well htne impacket", "import glob import hashlib from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\", "all tickets \"\"\" hashes = [] for cred in self.credentials:", "signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) return d def to_bytes(self):", "the entire header in bytes, encoded in 2 byte big-endian", "o.length = 0 o.data = b'' return o def to_asn1(self):", "Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags", "datetime.timezone.utc) if self.time.starttime != 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if", "can cause problems!') else: logging.debug('This ticket has most likely expired,", "= None self.second_ticket = None def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native", "= CCACHEPrincipal() p.name_type = 1 p.num_components = 1 p.realm =", "__str__(self): t = '== CCACHE ==\\n' t+= 'file_format_version : %s\\n'", "= CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp == True: self.primary_principal = c.client", "object in the CCACHE object to the kirbi file format", "o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\" Parses the ccache file and", "p.num_components = 1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for _ in range(1):", "principal for the ccache file \"\"\" c = Credential() c.client", "def to_string(self, separator='-'): return separator.join([c.to_string() for c in self.components]) def", "self.headers: t_hdr += header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big', signed=False) t", "len(self.data).to_bytes(4, byteorder='big', signed=False) t += self.data return t class CCACHE:", "only stores one ticket per file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c)", "def to_bytes(self): t = self.client.to_bytes() t += self.server.to_bytes() t +=", "def to_bytes(self): if isinstance(self.data,str): self.data = self.data.encode() self.length = len(self.data)", "dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0", "c.key = Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big',", "byteorder='big', signed=False) t += self.realm.to_bytes() for com in self.components: t", "= Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4),", "int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers =", "ccache file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])", "byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4),", "byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t += self.tagdata", "__init__(self): self.authtime = None self.starttime = None self.endtime = None", "DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big',", "os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with open(filename, 'rb') as", "ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm as well, trimming", "= res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1] tgs_realm = res['realm'] if", "= [] for cred in self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt')", "= CCACHEOctetString() o.length = 0 o.data = b'' return o", "while reader.tell() < eof: cred = Credential.parse(reader) if not (len(cred.server.components)", "Parses the ccache file and returns a CCACHE object \"\"\"", "'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till'])", "p = CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components = len(principal['name-string']) p.realm", "problems!') else: logging.debug('This ticket has most likely expired, skipping') return", "cc.add_kirbi(kirbi) return cc def get_all_tgt(self): \"\"\" Returns a list of", "hashes in hashcat-firendly format for tickets with encryption type 23", "if include_expired == True: logging.debug('This ticket has most likely expired,", "tgt_rep, t def to_kirbi(self): filename = '%s@%s_%s' % (self.client.to_string() ,", "self.etype.to_bytes(2, byteorder='big', signed=False) t += self.keylen.to_bytes(2, byteorder='big', signed=False) t +=", "the as_rep object, it is expected that the decrypted XXX", "c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time", "self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t class Credential: def __init__(self): self.client", "k = Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype =", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata", "enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] <", "Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little',", "EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from", "contains the realm as well, trimming it') t = ticket_info['sname']", "def __init__(self): self.addrtype = None self.addrdata = None @staticmethod def", "c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key'])", "+= self.addrdata.to_bytes() return t class Authdata: def __init__(self): self.authtype =", "cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs, filename)", "return o @staticmethod def from_asn1(data): o = CCACHEOctetString() o.length =", "return t class Times: def __init__(self): self.authtime = None self.starttime", "AS_REP \"\"\" Creates credential object from the TGT and adds", "AD sends upon a succsessful TGT request. This function doesn't", "self.file_format_version = 0x0504 header = Header() header.tag = 1 header.taglen", "'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime'])", "krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value,", "write the kirbi files to \"\"\" kf_abs = os.path.abspath(directory_path) for", "= CCACHEOctetString.parse(reader) return c @staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till'] def", "f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc", "cache! This can cause problems!') else: logging.debug('This ticket has most", "t += self.renew_till.to_bytes(4, byteorder='big', signed=False) return t class Address: def", "'taglen: %s\\n' % self.taglen t += 'tagdata: %s\\n' % self.tagdata", "cc.add_kirbi(kirbi) return cc def to_kirbidir(self, directory_path): \"\"\" Converts all credential", "eof = reader.tell() reader.seek(pos,0) while reader.tell() < eof: cred =", "as f: return CCACHE.parse(f) def to_file(self, filename): \"\"\" Writes the", "= [] self.num_authdata = None self.authdata = [] self.ticket =", "byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader)", "%s\\n' % self.tagdata return t class DateTime: def __init__(self): self.time_offset", "= dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else", "range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2)", "if self.time.endtime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till !=", "but include_expired is forcing me to add it to cache!", "= c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key", "23 or all_hashes == True: hashes.append(cred.to_hash()) return hashes @staticmethod def", "class Header: def __init__(self): self.tag = None self.taglen = None", "\"\"\" Creates credential object from the TGS and adds to", "__init__(self): self.time_offset = None self.usec_offset = None @staticmethod def parse(reader):", "self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part)", "to_tgt(self): \"\"\" Returns the native format of an AS_REP message", "succsessful TGT request. This function doesn't do decryption of the", "client principal should be used as the primary principal for", "c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred,", "self.name_type = None self.num_components = None self.realm = None self.components", "Returns the native format of an AS_REP message and the", "Credential: def __init__(self): self.client = None self.server = None self.key", "None self.endtime = None self.renew_till = None @staticmethod def from_asn1(enc_as_rep_part):", "and returns hash for all tickets \"\"\" hashes = []", "separator='-'): return separator.join([c.to_string() for c in self.components]) def to_asn1(self): t", "self.file_format_version = None #0x0504 self.headers = [] self.primary_principal = None", "0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred)", "with open(kf_abs, 'rb') as f: kirbidata = f.read() return CCACHE.from_kirbi(kirbidata)", "for name in self.components]} return t, self.realm.to_string() @staticmethod def parse(reader):", "'%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if", "= '%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with open(filepath,", "= res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(),", "% (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key'] =", "tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP \"\"\" Creates credential", "== -1: tgss.append(cred.to_tgs()) return tgss def get_hashes(self, all_hashes = False):", "int(res['enc-part']['etype']) == 23 or all_hashes == True: hashes.append(cred.to_hash()) return hashes", "range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return c", "c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp =", "range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self, separator='-'): return separator.join([c.to_string() for", "False): c = Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info = enc_credinfo['ticket-info'][0]", "= TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket =", "\"\"\" with open(filename, 'rb') as f: return CCACHE.parse(f) def to_file(self,", "all_hashes == True: hashes.append(cred.to_hash()) return hashes @staticmethod def parse(reader): c", "datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A', ] def to_bytes(self):", "byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4),", "ccache file The TGT is basically the native representation of", "header tags \"\"\" reader = io.BytesIO(data) headers = [] while", "datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred =", "signed=False) for addr in self.addrs: t += addr.to_bytes() t +=", "byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen = #for i in", "Keyblock() k.keytype = data['keytype'] k.etype = 0 # not sure", "\\ if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0 t.endtime", "c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address =", "return t @staticmethod def parse(reader): k = Keyblock() k.keytype =", "= None @staticmethod def from_asn1(enc_as_rep_part): t = Times() t.authtime =", "@staticmethod def parse(reader): p = CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big',", "int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader) return a def to_bytes(self):", "signed=False) t += self.starttime.to_bytes(4, byteorder='big', signed=False) t += self.endtime.to_bytes(4, byteorder='big',", "= Keyblock.from_asn1(data['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native", "_ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self): t =", "\\ if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0 t.starttime", "parse(reader): k = Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype", "= Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata = CCACHEOctetString.parse(reader)", "None self.keylen = None self.keyvalue = None @staticmethod def from_asn1(data):", "= dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else", "byteorder='big', signed=False) return t class Credential: def __init__(self): self.client =", "# Author: # <NAME> (@skelsec) # import os import io", "byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in", "cc = CCACHE() cc.add_kirbi(kirbi) return cc def get_all_tgt(self): \"\"\" Returns", "t += self.tktflags.to_bytes(4, byteorder='little', signed=False) t += self.num_address.to_bytes(4, byteorder='big', signed=False)", "None self.addrs = [] self.num_authdata = None self.authdata = []", "tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket']", "= len(data) if isinstance(data,str): o.data = data.encode() else: o.data =", "= CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file c.second_ticket", "== EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s'", "in a given directory and converts all of them into", "\"\"\" returns a list of header tags \"\"\" reader =", "0 o.data = b'' return o def to_asn1(self): return self.data", "\"\"\" As the header is rarely used -mostly static- you'd", "it is expected that the decrypted XXX is supplied in", "tickets with encryption type 23 (which is RC4) all_hashes: overrides", "self.headers: t+= '%s\\n' % header t+= 'primary_principal : %s\\n' %", "% header t+= 'primary_principal : %s\\n' % self.primary_principal return t", "self.__setup() def __setup(self): self.file_format_version = 0x0504 header = Header() header.tag", "= 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return c", "= None self.starttime = None self.endtime = None self.renew_till =", "res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if", "datetime.timedelta(days=2)) return t @staticmethod def parse(reader): t = Times() t.authtime", "class Address: def __init__(self): self.addrtype = None self.addrdata = None", "return a def to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big', signed=False) t", "def to_tgs(self): \"\"\" Returns the native format of an AS_REP", "= principal['name-type'] p.num_components = len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for comp", "for cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt =", "self.file_format_version for header in self.headers: t+= '%s\\n' % header t+=", "= TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket =", "filtering and returns hash for all tickets \"\"\" hashes =", "# <NAME> (@skelsec) # import os import io import datetime", "ad.to_bytes() t += self.ticket.to_bytes() t += self.second_ticket.to_bytes() return t class", "p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader) for _", "0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in enc_as_rep_part and", "open(filename, 'rb') as f: return CCACHE.parse(f) def to_file(self, filename): \"\"\"", "ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm as well, trimming it') t", "c.time = Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey = 0 #not", "datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till != 0: #this parameter is not", "in self.components]} return t, self.realm.to_string() @staticmethod def parse(reader): p =", "None self.is_skey = None self.tktflags = None self.num_address = None", "this object with empty = True to get an object", "def __init__(self): self.keytype = None self.etype = None self.keylen =", "Times: def __init__(self): self.authtime = None self.starttime = None self.endtime", "to_asn1(self): t = {'name-type': self.name_type, 'name-string': [name.to_string() for name in", "signed=False) a.addrdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t =", "t def to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big', signed=False) t +=", "#t_hdr = b'' #for header in self.headers: # t_hdr +=", "= None #0x0504 self.headers = [] self.primary_principal = None self.credentials", "Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from", "__init__(self): self.client = None self.server = None self.key = None", "parse(reader): p = CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components", "t+= 'file_format_version : %s\\n' % self.file_format_version for header in self.headers:", "@staticmethod def empty(): o = CCACHEOctetString() o.length = 0 o.data", "h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h) return", "==\\n' t+= 'file_format_version : %s\\n' % self.file_format_version for header in", "Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader) return", "len(o.data) return o @staticmethod def from_asn1(data): o = CCACHEOctetString() o.length", "'== CCACHE ==\\n' t+= 'file_format_version : %s\\n' % self.file_format_version for", "def to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big', signed=False) t += self.etype.to_bytes(2,", "determine if client principal should be used as the primary", "= [cred.to_tgt(), cred.time] tgts.append(tgt) return tgts def get_all_tgs(self): tgss =", "header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big', signed=False) t += t_hdr t", "core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self): self.tag = None", "+= cred.to_bytes() return t @staticmethod def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename)", "= [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi =", "t = self.client.to_bytes() t += self.server.to_bytes() t += self.key.to_bytes() t", "encrypted part of the as_rep object, it is expected that", "= Credential.parse(reader) if not (len(cred.server.components) > 0 and cred.server.components[0].to_string() ==", "override_pp == True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])", "= f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\" Iterates trough", "encoded TGS_REP data when the user requests a tgs to", "self.components = [] @staticmethod def from_asn1(principal, realm): p = CCACHEPrincipal()", "= 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket =", ") def to_tgt(self): \"\"\" Returns the native format of an", "self.headers = [] self.primary_principal = None self.credentials = [] if", "logging.debug('This ticket has most likely expired, skipping') return \"\"\" c.client", "self.starttime.to_bytes(4, byteorder='big', signed=False) t += self.endtime.to_bytes(4, byteorder='big', signed=False) t +=", "[KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno'] = krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value", "int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False) return t", "file The TGT is basically the native representation of the", "to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr = b'' for", "enc_as_rep_part['authtime'] else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in", "None @staticmethod def empty(): o = CCACHEOctetString() o.length = 0", "byteorder='big', signed=False) for _ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4),", "t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2)) return t @staticmethod def parse(reader):", "= 1 header.taglen = 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata =", "reader.tell() < len(data): h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big',", "ticket per file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata):", "= res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def", "False): #from AS_REP \"\"\" Creates credential object from the TGS", "realm as well htne impacket will crash miserably :( if", "t @staticmethod def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata = None", "None #0x0504 self.headers = [] self.primary_principal = None self.credentials =", "tgs to a specific service principal with a valid TGT", "for com in self.components: t += com.to_bytes() return t class", "kirbidata = None with open(kf_abs, 'rb') as f: kirbidata =", "data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey = 0 #not sure! c.tktflags", "def summary(self): return [ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' %", "def __init__(self): self.client = None self.server = None self.key =", "def from_string(data): o = CCACHEOctetString() o.data = data.encode() o.length =", "additional weirdness!!!! #if sname name-string contains a realm as well", "used by mimikatz. The kirbi file format supports one credential", "'X-CACHECONF:'): c.credentials.append(cred) return c def to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big',", "= CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data = reader.read(o.length)", "data when the user requests a tgs to a specific", "= io.BytesIO(data) headers = [] while reader.tell() < len(data): h", "= int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen = #for", "as the primary principal for the ccache file \"\"\" c", "+= self.primary_principal.to_bytes() for cred in self.credentials: t += cred.to_bytes() return", "CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from", "be the kerberos service \"\"\" tgts = [] for cred", "This can cause problems!') else: logging.debug('This ticket has most likely", "% self.primary_principal return t def add_tgt(self, as_rep, enc_as_rep_part, override_pp =", "def parse(reader): a = Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "'$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\" Returns", "return t class Keyblock: def __init__(self): self.keytype = None self.etype", "= dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else", "= None self.endtime = None self.renew_till = None @staticmethod def", "htne impacket will crash miserably :( if len(ticket_info['sname']['name-string']) > 2", "in self.credentials: kirbi, filename = cred.to_kirbi() filename = '%s.kirbi' %", "True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time =", "for all tickets \"\"\" hashes = [] for cred in", "in enc_as_rep_part and enc_as_rep_part['authtime'] else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\", "#not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata =", "hashes = [] for cred in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native", "b'' #for header in self.headers: # t_hdr += header.to_bytes() #self.headerlen", "EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils", "ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server", "file \"\"\" with open(filename, 'wb') as f: f.write(self.to_bytes()) @staticmethod def", "most of the time not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)", "principal should be used as the primary principal for the", "= 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def", "return d def to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big', signed=False) t", "else: o.data = data return o @staticmethod def parse(reader): o", "f: return CCACHE.parse(f) def to_file(self, filename): \"\"\" Writes the contents", "+= t_hdr t += self.primary_principal.to_bytes() for cred in self.credentials: t", "the TGS and adds to the ccache file The TGS", "and returns a CCACHE object \"\"\" with open(filename, 'rb') as", "return kirbi, filename @staticmethod def from_asn1(ticket, data): ### # data", "get_hashes(self, all_hashes = False): \"\"\" Returns a list of hashes", "datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime != 0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc)", "= [] @staticmethod def from_asn1(principal, realm): p = CCACHEPrincipal() p.name_type", "it') t = ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t,", "None self.tktflags = None self.num_address = None self.addrs = []", "t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])", "krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info']", "= Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if t", "add_kirbi(self, krbcred, override_pp = True, include_expired = False): c =", "c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native", "the user requests a tgs to a specific service principal", "+= self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big', signed=False) t += self.tktflags.to_bytes(4,", "self.components: t += com.to_bytes() return t class CCACHEOctetString: def __init__(self):", "2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm as", "core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0: #this parameter is not mandatory,", "#not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata =", "= CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey", "= CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path):", "8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr =", "def to_tgt(self): \"\"\" Returns the native format of an AS_REP", "cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return c", "EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_tgs(self): \"\"\" Returns the native", "to the ccache file The TGT is basically the native", ": %s\\n' % self.file_format_version for header in self.headers: t+= '%s\\n'", "ticket_info = enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired", "bytes, encoded in 2 byte big-endian unsigned int self.primary_principal =", "Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address", "t == 1: tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1]", "header.to_bytes() #self.headerlen = 1 #size of the entire header in", "a = Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata =", "t += self.key.to_bytes() t += self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big',", "{'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]} return t,", ", self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm']", "= EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return kirbi,", "signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t += self.tagdata return", "for cred in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) ==", "file The TGS is the native representation of the asn1", "= self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0: #this", "= 0 #not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0", "= CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0)", "def to_kirbi(self): filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8])", "= None @staticmethod def parse(reader): a = Address() a.addrtype =", "data that the AD sends upon a succsessful TGT request.", "t['keytype'] = self.keytype t['keyvalue'] = self.keyvalue return t @staticmethod def", "+= addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad in", "'*.kirbi') for filename in glob.glob(dir_path): with open(filename, 'rb') as f:", "self.etype = None self.keylen = None self.keyvalue = None @staticmethod", "of the encrypted part of the as_rep object, it is", "one ticket per file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def", "header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b'' #for header in", "a list of AS_REP tickets in native format (dict). To", "c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp == True: self.primary_principal =", "= self.name_type.to_bytes(4, byteorder='big', signed=False) t += len(self.components).to_bytes(4, byteorder='big', signed=False) t", "if isinstance(self.data,str): self.data = self.data.encode() self.length = len(self.data) t =", "as o: o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\" Parses the ccache", "+= self.renew_till.to_bytes(4, byteorder='big', signed=False) return t class Address: def __init__(self):", "\"\"\" kf_abs = os.path.abspath(directory_path) for cred in self.credentials: kirbi, filename", "= len(self.data).to_bytes(4, byteorder='big', signed=False) t += self.data return t class", "self.key = None self.time = None self.is_skey = None self.tktflags", "from_asn1(ticket, data): ### # data = KrbCredInfo ### c =", "f: kirbidata = f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\"", "t = self.name_type.to_bytes(4, byteorder='big', signed=False) t += len(self.components).to_bytes(4, byteorder='big', signed=False)", "present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] =", "= res['sname']['name-string'][1] tgs_realm = res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum", "else 0 t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \\ if 'renew_till' in enc_as_rep_part", "Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address", "byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size))", "Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big',", "= self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info'] =", "p @staticmethod def dummy(): p = CCACHEPrincipal() p.name_type = 1", "not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname']", "contains a realm as well htne impacket will crash miserably", "= EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] =", "signed=False) t += self.realm.to_bytes() for com in self.components: t +=", "data.encode() o.length = len(o.data) return o @staticmethod def from_asn1(data): o", "in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos = reader.tell()", "None self.keyvalue = None @staticmethod def from_asn1(data): k = Keyblock()", "+= self.keylen.to_bytes(2, byteorder='big', signed=False) t += self.keyvalue return t class", "CCACHE object \"\"\" cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')", "= dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till =", "returns a CCACHE object \"\"\" with open(filename, 'rb') as f:", "signed=False) p.realm = CCACHEOctetString.parse(reader) for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return", "object without header already present \"\"\" def __init__(self, empty =", "ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0", "override_pp = False): #from AS_REP \"\"\" Creates credential object from", "Creates credential object from the TGT and adds to the", "byte big-endian unsigned int self.primary_principal = CCACHEPrincipal.dummy() def __str__(self): t", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket", "int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) return d", "self.authdata = [] self.ticket = None self.second_ticket = None def", "#c.headerlen = #for i in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal =", "file and returns a CCACHE object \"\"\" with open(filename, 'rb')", "file format supports one credential per file, so prepare for", "= 0 #not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0", "to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big', signed=False) t += self.usec_offset.to_bytes(4, byteorder='big',", "CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if override_pp == True: self.primary_principal = c.client c.server", "t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t += self.tagdata return t", "self.tktflags.to_bytes(4, byteorder='little', signed=False) t += self.num_address.to_bytes(4, byteorder='big', signed=False) for addr", "def get_all_tgt(self): \"\"\" Returns a list of AS_REP tickets in", "= self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred =", "= None self.keyvalue = None @staticmethod def from_asn1(data): k =", "= False): self.file_format_version = None #0x0504 self.headers = [] self.primary_principal", "byteorder='big', signed=False) t_hdr = b'' for header in self.headers: t_hdr", "addr in self.addrs: t += addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big',", "c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket", "return tgt_rep, t def to_kirbi(self): filename = '%s@%s_%s' % (self.client.to_string()", "= reader.read(h.taglen) headers.append(h) return headers def to_bytes(self): t = self.tag.to_bytes(2,", "an AS_REP message and the sessionkey in EncryptionKey native format", "Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp == True: self.primary_principal", "KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def to_kirbidir(self, directory_path): \"\"\" Converts all", "tgts = [] for cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') !=", "def to_bytes(self): t = self.tag.to_bytes(2, byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2,", ".kirbi files in a given directory and converts all of", "= Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep,", "tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t", "parse(reader): a = Authdata() a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata", "for cred in self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1:", "CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big',", "< datetime.datetime.now(datetime.timezone.utc): if include_expired == True: logging.debug('This ticket has most", "0: krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc) if self.time.endtime != 0: krbcredinfo['endtime']", "def parse(data): \"\"\" returns a list of header tags \"\"\"", "add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP \"\"\" Creates", "+= self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad in self.authdata: t +=", "and enc_as_rep_part['starttime'] else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime'", "CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part) c.key = Keyblock.from_asn1(enc_tgs_rep_part['key']) c.is_skey =", "None @staticmethod def parse(reader): d = DateTime() d.time_offset = int.from_bytes(reader.read(4),", "EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_kirbi(self): filename = '%s@%s_%s' %", "parse(reader): c = CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size", "tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm,", "in self.headers: # t_hdr += header.to_bytes() #self.headerlen = 1 #size", "[] if empty == False: self.__setup() def __setup(self): self.file_format_version =", "signed=False) a.authdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t =", "to a file \"\"\" with open(filename, 'wb') as f: f.write(self.to_bytes())", "CCACHE ==\\n' t+= 'file_format_version : %s\\n' % self.file_format_version for header", "p.realm = CCACHEOctetString.parse(reader) for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p", "def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi) return", "(dict). To determine which ticket are AP_REP we check for", "filename @staticmethod def from_asn1(ticket, data): ### # data = KrbCredInfo", "self.addrtype = None self.addrdata = None @staticmethod def parse(reader): a", "present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime != 0: krbcredinfo['starttime']", "res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23 or all_hashes ==", "None self.data = None @staticmethod def empty(): o = CCACHEOctetString()", "def add_kirbi(self, krbcred, override_pp = True, include_expired = False): c", "TGT is basically the native representation of the asn1 encoded", "= CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey", "be used as the primary principal for the ccache file", "t += self.endtime.to_bytes(4, byteorder='big', signed=False) t += self.renew_till.to_bytes(4, byteorder='big', signed=False)", "prepare for a lot of files being generated. directory_path: str", "= os.path.join(kf_abs, filename) with open(filepath, 'wb') as o: o.write(kirbi.dump()) @staticmethod", "EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType,", ":( if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME", "self.tktflags = None self.num_address = None self.addrs = [] self.num_authdata", "t def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP", "krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) krbcredinfo['srealm'] = self.server.realm.to_string() krbcredinfo['sname'] = self.server.to_asn1()[0]", "in self.authdata: t += ad.to_bytes() t += self.ticket.to_bytes() t +=", "io.BytesIO(data) headers = [] while reader.tell() < len(data): h =", "\"\"\" def __init__(self, empty = False): self.file_format_version = None #0x0504", "with open(filename, 'wb') as f: f.write(self.to_bytes()) @staticmethod def from_bytes(data): return", "def __init__(self): self.authtime = None self.starttime = None self.endtime =", "TGS and adds to the ccache file The TGS is", "enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired == True:", "supports one credential per file, so prepare for a lot", "self.starttime = None self.endtime = None self.renew_till = None @staticmethod", "self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23 or all_hashes", "= res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else:", "for _ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket =", "len(self.components).to_bytes(4, byteorder='big', signed=False) t += self.realm.to_bytes() for com in self.components:", "get_all_tgs(self): tgss = [] for cred in self.credentials: if cred.server.to_string(separator", "t_hdr = b'' for header in self.headers: t_hdr += header.to_bytes()", "c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey = 0", "t += 'tagdata: %s\\n' % self.tagdata return t class DateTime:", "self.time.authtime != 0: #this parameter is not mandatory, and most", "def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t =", "credential object from the TGT and adds to the ccache", "= len(res['sname']['name-string']) if t == 1: tgs_name_string = res['sname']['name-string'][0] else:", "format used by mimikatz. The kirbi file format supports one", "given directory and converts all of them into one CCACHE", "c.client c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key =", "krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags']", "header in self.headers: # t_hdr += header.to_bytes() #self.headerlen = 1", "class CCACHEPrincipal: def __init__(self): self.name_type = None self.num_components = None", "open(kf_abs, 'rb') as f: kirbidata = f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod", "signed=False) t += self.authdata.to_bytes() return t class CCACHEPrincipal: def __init__(self):", "= krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] =", "def from_asn1(principal, realm): p = CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components", "data.encode() else: o.data = data return o @staticmethod def parse(reader):", "CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components = len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm)", "= 0x0504 header = Header() header.tag = 1 header.taglen =", "t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP", "\"\"\" Returns a list of hashes in hashcat-firendly format for", "!= 0: #this parameter is not mandatory, and sometimes it's", "a.authdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.authtype.to_bytes(2,", "= 0 #not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0", "Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type = int(res['enc-part']['etype']) t = len(res['sname']['name-string']) if t ==", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False) return", "int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keylen =", "if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired == True: logging.debug('This ticket", "h.tagdata = reader.read(h.taglen) headers.append(h) return headers def to_bytes(self): t =", "and adds to the ccache file The TGS is the", "isinstance(self.data,str): self.data = self.data.encode() self.length = len(self.data) t = len(self.data).to_bytes(4,", "the kirbi files to \"\"\" kf_abs = os.path.abspath(directory_path) for cred", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) p.realm = CCACHEOctetString.parse(reader) for _ in", "realm as well, trimming it') t = ticket_info['sname'] t['name-string'] =", "list of header tags \"\"\" reader = io.BytesIO(data) headers =", "c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen = #for i in range(c.headerlen): #", "the kerberos service \"\"\" tgts = [] for cred in", "t += t_hdr t += self.primary_principal.to_bytes() for cred in self.credentials:", "def parse(reader): t = Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "need to init this object with empty = True to", "\"\"\" Converts all credential object in the CCACHE object to", "# import os import io import datetime import glob import", "self.endtime = None self.renew_till = None @staticmethod def from_asn1(enc_as_rep_part): t", "'$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum = res['enc-part']['cipher'][:16]", "True, include_expired = False): c = Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native", "def to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big', signed=False) t += self.addrdata.to_bytes()", "= reader.tell() reader.seek(pos,0) while reader.tell() < eof: cred = Credential.parse(reader)", "\"\"\" cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename", "to write the kirbi files to \"\"\" kf_abs = os.path.abspath(directory_path)", "None @staticmethod def from_asn1(data): k = Keyblock() k.keytype = data['keytype']", "self.credentials = [] if empty == False: self.__setup() def __setup(self):", "dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0", "= Credential() c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm']) if override_pp == True:", "= CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_tgs(self, tgs_rep, enc_tgs_rep_part,", "to_bytes(self): t = self.tag.to_bytes(2, byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big',", "self.authdata.to_bytes() return t class CCACHEPrincipal: def __init__(self): self.name_type = None", "+= self.tktflags.to_bytes(4, byteorder='little', signed=False) t += self.num_address.to_bytes(4, byteorder='big', signed=False) for", "of AS_REP tickets in native format (dict). To determine which", "to_bytes(self): t = self.client.to_bytes() t += self.server.to_bytes() t += self.key.to_bytes()", "self.server.to_bytes() t += self.key.to_bytes() t += self.time.to_bytes() t += self.is_skey.to_bytes(1,", "True to get an object without header already present \"\"\"", "!= 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else", "byteorder='big', signed=False) return t def to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big',", "0 #not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata", "primary principal for the ccache file \"\"\" c = Credential()", "import Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart", "+= self.data return t class CCACHE: \"\"\" As the header", "def to_kirbidir(self, directory_path): \"\"\" Converts all credential object in the", "CCACHE object to a file \"\"\" with open(filename, 'wb') as", "CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey = 0 #not sure!", "EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags)", "object to a file \"\"\" with open(filename, 'wb') as f:", "object from the TGT and adds to the ccache file", "% (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A',", "t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime']", "if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0 t.starttime =", "@staticmethod def from_asn1(data): k = Keyblock() k.keytype = data['keytype'] k.etype", "byteorder='big', signed=False) t += self.starttime.to_bytes(4, byteorder='big', signed=False) t += self.endtime.to_bytes(4,", "signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata = reader.read(h.taglen) headers.append(h)", "= self.keytype t['keyvalue'] = self.keyvalue return t @staticmethod def parse(reader):", "return t class CCACHE: \"\"\" As the header is rarely", "return o def to_asn1(self): return self.data def to_string(self): return self.data.decode()", "kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def", "= MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher':", "dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0", "def summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self): return [ '%s@%s' %", "the kirbi file format used by mimikatz. The kirbi file", "from_string(data): o = CCACHEOctetString() o.data = data.encode() o.length = len(o.data)", "= [] for cred in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if", "MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()})", "self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi = KRBCRED.load(kirbidata).native cc = CCACHE()", "function doesn't do decryption of the encrypted part of the", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) return", "to init this object with empty = True to get", "per file, so prepare for a lot of files being", "enc_as_rep_part override_pp: bool to determine if client principal should be", "c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi", "tgt_rep['enc-part'] = enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def", "being generated. directory_path: str the directory to write the kirbi", "= int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False) for", "len(data): h = Header() h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen", "_ in range(c.num_authdata): c.authdata.append(Authdata.parse(reader)) c.ticket = CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader)", "= True): #from AS_REP \"\"\" Creates credential object from the", "self.taglen t += 'tagdata: %s\\n' % self.tagdata return t class", "signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen = #for i in range(c.headerlen):", "return headers def to_bytes(self): t = self.tag.to_bytes(2, byteorder='big', signed=False) t", "signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address = int.from_bytes(reader.read(4), byteorder='big',", "def __init__(self): self.tag = None self.taglen = None self.tagdata =", "Credential.parse(reader) if not (len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data'", "in hashcat-firendly format for tickets with encryption type 23 (which", "def to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big', signed=False) t += self.authdata.to_bytes()", "is forcing me to add it to cache! This can", "= False): #from AS_REP \"\"\" Creates credential object from the", "'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if", "The TGT is basically the native representation of the asn1", "c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket", "= res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2", "= t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'],", "True: self.primary_principal = c.client #yaaaaay 4 additional weirdness!!!! #if sname", "= [] self.primary_principal = None self.credentials = [] if empty", "= TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket =", "self.data return t class CCACHE: \"\"\" As the header is", "0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return c @staticmethod", "t += addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad", "of an AS_REP message and the sessionkey in EncryptionKey native", "t.authtime = dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start", "EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi = KRBCRED(krbcred) return kirbi, filename", "CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big', signed=False)", "0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty()", "int.from_bytes(reader.read(2), byteorder='big', signed=False) h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False) h.tagdata =", "i in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos =", "+= self.realm.to_bytes() for com in self.components: t += com.to_bytes() return", "+= self.second_ticket.to_bytes() return t class Keyblock: def __init__(self): self.keytype =", "header = Header() header.tag = 1 header.taglen = 8 #header.tagdata", "= CCACHEPrincipal() p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4),", "parse(reader): o = CCACHEOctetString() o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data", "__setup(self): self.file_format_version = 0x0504 header = Header() header.tag = 1", "def __init__(self): self.name_type = None self.num_components = None self.realm =", "files to \"\"\" kf_abs = os.path.abspath(directory_path) for cred in self.credentials:", "= res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(),", "reader.read(k.keylen) return k def to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big', signed=False)", "enc_tgs_rep_part, override_pp = False): #from AS_REP \"\"\" Creates credential object", "enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0", "self.num_address.to_bytes(4, byteorder='big', signed=False) for addr in self.addrs: t += addr.to_bytes()", "we check for the server principal to be the kerberos", "self.credentials.append(c) def add_kirbi(self, krbcred, override_pp = True, include_expired = False):", "else: tgs_name_string = res['sname']['name-string'][1] tgs_realm = res['realm'] if tgs_encryption_type ==", "the native format of an AS_REP message and the sessionkey", "self.keytype t['keyvalue'] = self.keyvalue return t @staticmethod def parse(reader): k", "CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time = Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1),", "dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1))", "= enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_kirbi(self):", "= 1 p.num_components = 1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for _", "import io import datetime import glob import hashlib from pycquery_krb.protocol.asn1_structs", "+= self.etype.to_bytes(2, byteorder='big', signed=False) t += self.keylen.to_bytes(2, byteorder='big', signed=False) t", "asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self): self.tag", "\"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm']) if override_pp == True: self.primary_principal", "to_bytes(self): if isinstance(self.data,str): self.data = self.data.encode() self.length = len(self.data) t", "= data['keytype'] k.etype = 0 # not sure k.keylen =", "EncryptionType, MESSAGE_TYPE from pycquery_krb import logger from asn1crypto import core", "from the TGS and adds to the ccache file The", "tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )", "AS_REP \"\"\" Creates credential object from the TGS and adds", "+= com.to_bytes() return t class CCACHEOctetString: def __init__(self): self.length =", "Authdata: def __init__(self): self.authtype = None self.authdata = None @staticmethod", "### # data = KrbCredInfo ### c = Credential() c.client", "list of AS_REP tickets in native format (dict). To determine", "directory_path: str the directory to write the kirbi files to", "1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return", "def __init__(self): self.time_offset = None self.usec_offset = None @staticmethod def", "krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till != 0: #this parameter", "self.length = None self.data = None @staticmethod def empty(): o", "os.path.abspath(directory_path) for cred in self.credentials: kirbi, filename = cred.to_kirbi() filename", "c.key = Keyblock.from_asn1(data['key']) c.is_skey = 0 #not sure! c.tktflags =", "self.realm.to_bytes() for com in self.components: t += com.to_bytes() return t", "if self.time.renew_till != 0 else 'N/A', ] def to_bytes(self): t", "Times() t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big',", "TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())", "byteorder='big', signed=False) for ad in self.authdata: t += ad.to_bytes() t", "\"\"\" tgts = [] for cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt')", "kirbidata = f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\" Iterates", "will crash miserably :( if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper()", "= CCACHEPrincipal.parse(reader) c.server = CCACHEPrincipal.parse(reader) c.key = Keyblock.parse(reader) c.time =", "signed=False) k.keyvalue = reader.read(k.keylen) return k def to_bytes(self): t =", "c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _ in range(c.num_authdata):", "override_pp = True): #from AS_REP \"\"\" Creates credential object from", "to a specific service principal with a valid TGT This", "The TGS is the native representation of the asn1 encoded", "override_pp == True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])", "lot of files being generated. directory_path: str the directory to", "file, so prepare for a lot of files being generated.", "CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey", "0 #not sure! c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata", "import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self): self.tag =", ") t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start +", "kirbi file format used by mimikatz. The kirbi file format", "1 p.num_components = 1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for _ in", "DateTime: def __init__(self): self.time_offset = None self.usec_offset = None @staticmethod", "= None with open(kf_abs, 'rb') as f: kirbidata = f.read()", "t += self.server.to_bytes() t += self.key.to_bytes() t += self.time.to_bytes() t", "object \"\"\" cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for", "enc_part.native t = EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_kirbi(self): filename", "a tgs to a specific service principal with a valid", "as_rep object, it is expected that the decrypted XXX is", "datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime = dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start", "not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime != 0:", "def to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big', signed=False) t += self.starttime.to_bytes(4,", "c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2) eof =", "c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return", "valid TGT This function doesn't do decryption of the encrypted", "signed=False) t += self.keylen.to_bytes(2, byteorder='big', signed=False) t += self.keyvalue return", "[name.to_string() for name in self.components]} return t, self.realm.to_string() @staticmethod def", "t_hdr t += self.primary_principal.to_bytes() for cred in self.credentials: t +=", "1: tgs_name_string = res['sname']['name-string'][0] else: tgs_name_string = res['sname']['name-string'][1] tgs_realm =", "krbcredinfo['sname'] = self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred", "# c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader) pos = reader.tell() reader.seek(-1,2) eof", "the asn1 encoded AS_REP data that the AD sends upon", "get_all_tgt(self): \"\"\" Returns a list of AS_REP tickets in native", "def parse(reader): k = Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "to_asn1(self): return self.data def to_string(self): return self.data.decode() @staticmethod def from_string(data):", "bool to determine if client principal should be used as", "is supplied in enc_as_rep_part override_pp: bool to determine if client", "= enc_credinfo['ticket-info'][0] \"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired ==", "def to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big', signed=False) t += self.usec_offset.to_bytes(4,", "t @staticmethod def parse(reader): t = Times() t.authtime = int.from_bytes(reader.read(4),", "= CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket = CCACHEOctetString.empty() return c @staticmethod def parse(reader):", "likely expired, but include_expired is forcing me to add it", "c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key'])", "import dt_to_kerbtime, TGSTicket2hashcat from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from pycquery_krb", "mandatory, and most of the time not present krbcredinfo['authtime'] =", "'N/A', ] def to_bytes(self): t = self.client.to_bytes() t += self.server.to_bytes()", "= len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp))", "= None self.tagdata = None @staticmethod def parse(data): \"\"\" returns", "= None @staticmethod def from_asn1(data): k = Keyblock() k.keytype =", "and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return c def to_bytes(self): t", "= self.addrtype.to_bytes(2, byteorder='big', signed=False) t += self.addrdata.to_bytes() return t class", "= CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big',", "class DateTime: def __init__(self): self.time_offset = None self.usec_offset = None", "filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo =", "returns a list of header tags \"\"\" reader = io.BytesIO(data)", "credential per file, so prepare for a lot of files", "krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()}) kirbi", "self.is_skey.to_bytes(1, byteorder='big', signed=False) t += self.tktflags.to_bytes(4, byteorder='little', signed=False) t +=", "from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata = None with open(kf_abs, 'rb')", "t def to_kirbi(self): filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(),", "+= self.authdata.to_bytes() return t class CCACHEPrincipal: def __init__(self): self.name_type =", "def to_string(self): return self.data.decode() @staticmethod def from_string(data): o = CCACHEOctetString()", "byteorder='big', signed=False) return t class Address: def __init__(self): self.addrtype =", "= data.encode() o.length = len(o.data) return o @staticmethod def from_asn1(data):", "def to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big', signed=False) t += len(self.components).to_bytes(4,", "= EncryptionKey(self.key.to_asn1()).native return tgt_rep, t def to_tgs(self): \"\"\" Returns the", "None self.components = [] @staticmethod def from_asn1(principal, realm): p =", "False): self.file_format_version = None #0x0504 self.headers = [] self.primary_principal =", "{} t['keytype'] = self.keytype t['keyvalue'] = self.keyvalue return t @staticmethod", "self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return tgss", "c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key =", "= len(o.data) return o @staticmethod def from_asn1(data): o = CCACHEOctetString()", "self.key.to_bytes() t += self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big', signed=False) t", "CCACHEOctetString.parse(reader) for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self):", "None @staticmethod def parse(data): \"\"\" returns a list of header", "res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex()", "@staticmethod def from_kirbidir(directory_path): \"\"\" Iterates trough all .kirbi files in", "is basically the native representation of the asn1 encoded AS_REP", "self.components]} return t, self.realm.to_string() @staticmethod def parse(reader): p = CCACHEPrincipal()", "self.usec_offset = None @staticmethod def parse(reader): d = DateTime() d.time_offset", "None self.key = None self.time = None self.is_skey = None", "http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self): self.tag = None self.taglen =", "= 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket =", "self.time.endtime != 0: krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc) if self.time.renew_till !=", "= CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred, override_pp = True, include_expired", "p.realm = CCACHEOctetString.from_string('kerbi.corp') for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p", "when the user requests a tgs to a specific service", "cause problems!') else: logging.debug('This ticket has most likely expired, skipping')", "return separator.join([c.to_string() for c in self.components]) def to_asn1(self): t =", "separator.join([c.to_string() for c in self.components]) def to_asn1(self): t = {'name-type':", "str the directory to write the kirbi files to \"\"\"", "(tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\" Returns the native", "ccache file The TGS is the native representation of the", "hash for all tickets \"\"\" hashes = [] for cred", "kirbi = KRBCRED(krbcred) return kirbi, filename @staticmethod def from_asn1(ticket, data):", "True: logging.debug('This ticket has most likely expired, but include_expired is", "as well htne impacket will crash miserably :( if len(ticket_info['sname']['name-string'])", "@staticmethod def parse(reader): k = Keyblock() k.keytype = int.from_bytes(reader.read(2), byteorder='big',", "-1: tgss.append(cred.to_tgs()) return tgss def get_hashes(self, all_hashes = False): \"\"\"", "p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False) p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "\"\"\" if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc): if include_expired == True: logging.debug('This", "'primary_principal : %s\\n' % self.primary_principal return t def add_tgt(self, as_rep,", "@staticmethod def dummy(): p = CCACHEPrincipal() p.name_type = 1 p.num_components", "the ccache file The TGS is the native representation of", "self.is_skey = None self.tktflags = None self.num_address = None self.addrs", "self.num_address = None self.addrs = [] self.num_authdata = None self.authdata", "= None @staticmethod def empty(): o = CCACHEOctetString() o.length =", "to_asn1(self): t = {} t['keytype'] = self.keytype t['keyvalue'] = self.keyvalue", "__init__(self, empty = False): self.file_format_version = None #0x0504 self.headers =", "open(filename, 'rb') as f: kirbidata = f.read() kirbi = KRBCRED.load(kirbidata).native", "#!/usr/bin/env python3 # # Author: # <NAME> (@skelsec) # import", "def from_asn1(data): k = Keyblock() k.keytype = data['keytype'] k.etype =", "in the CCACHE object to the kirbi file format used", "= False): \"\"\" Returns a list of hashes in hashcat-firendly", "tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname']", "'rb') as f: return CCACHE.parse(f) def to_file(self, filename): \"\"\" Writes", "most likely expired, skipping') return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])", "'rb') as f: kirbidata = f.read() return CCACHE.from_kirbi(kirbidata) @staticmethod def", "self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] =", "addr.to_bytes() t += self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad in self.authdata:", "principal to be the kerberos service \"\"\" tgts = []", "self.tagdata return t class DateTime: def __init__(self): self.time_offset = None", "k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "'%s\\n' % header t+= 'primary_principal : %s\\n' % self.primary_principal return", "return p @staticmethod def dummy(): p = CCACHEPrincipal() p.name_type =", "to_file(self, filename): \"\"\" Writes the contents of the CCACHE object", "= #for i in range(c.headerlen): # c.headers.append(Header.parse(reader)) c.primary_principal = CCACHEPrincipal.parse(reader)", "in glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata = f.read()", "krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())] krbcred['enc-part'] = EncryptedData({'etype':", "!= 0 else 'N/A', ] def to_bytes(self): t = self.client.to_bytes()", "This function doesn't do decryption of the encrypted part of", "o: o.write(kirbi.dump()) @staticmethod def from_file(filename): \"\"\" Parses the ccache file", "c.time = Times.parse(reader) c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags =", "tgss = [] for cred in self.credentials: if cred.server.to_string(separator =", "] def to_bytes(self): t = self.client.to_bytes() t += self.server.to_bytes() t", "well htne impacket will crash miserably :( if len(ticket_info['sname']['name-string']) >", "and adds to the ccache file The TGT is basically", "tgs_name_string = res['sname']['name-string'][1] tgs_realm = res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value:", "0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self,", "if empty == False: self.__setup() def __setup(self): self.file_format_version = 0x0504", "t += len(t_hdr).to_bytes(2, byteorder='big', signed=False) t += t_hdr t +=", "the primary principal for the ccache file \"\"\" c =", "byteorder='big', signed=False) t += self.addrdata.to_bytes() return t class Authdata: def", "the encryption type filtering and returns hash for all tickets", "of the CCACHE object to a file \"\"\" with open(filename,", "from asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header: def __init__(self):", "t += self.addrdata.to_bytes() return t class Authdata: def __init__(self): self.authtype", "(tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 =", "t['name-string'] = t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server =", "file \"\"\" c = Credential() c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm']) if", "Converts all credential object in the CCACHE object to the", "= Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native", "header in bytes, encoded in 2 byte big-endian unsigned int", "self.keyvalue = None @staticmethod def from_asn1(data): k = Keyblock() k.keytype", "= 0 # not sure k.keylen = len(data['keyvalue']) k.keyvalue =", "of them into one CCACHE object \"\"\" cc = CCACHE()", "'tag: %s\\n' % self.tag t += 'taglen: %s\\n' % self.taglen", "dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2)) return t", "to_string(self, separator='-'): return separator.join([c.to_string() for c in self.components]) def to_asn1(self):", "to_tgs(self): \"\"\" Returns the native format of an AS_REP message", "header is rarely used -mostly static- you'd need to init", "CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm']) c.time = Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey =", "the realm as well, trimming it') t = ticket_info['sname'] t['name-string']", "tgs_encrypted_data2.hex() ) else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return", "RC4) all_hashes: overrides the encryption type filtering and returns hash", "else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime' in enc_as_rep_part", "t = Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in", "@staticmethod def parse(reader): d = DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big',", "%s\\n' % self.primary_principal return t def add_tgt(self, as_rep, enc_as_rep_part, override_pp", "a succsessful TGT request. This function doesn't do decryption of", "(self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1())", "+ datetime.timedelta(days=2)) return t @staticmethod def parse(reader): t = Times()", "def __init__(self): self.authtype = None self.authdata = None @staticmethod def", "self.tagdata = None @staticmethod def parse(data): \"\"\" returns a list", "filename = cred.to_kirbi() filename = '%s.kirbi' % filename.replace('..','!') filepath =", "o @staticmethod def parse(reader): o = CCACHEOctetString() o.length = int.from_bytes(reader.read(4),", "c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c)", "> 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper(): logger.debug('SNAME contains the realm", "[] for cred in self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt') ==", "asn1 encoded AS_REP data that the AD sends upon a", "= MESSAGE_TYPE.KRB_AS_REP.value tgt_rep['crealm'] = self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] =", "#for header in self.headers: # t_hdr += header.to_bytes() #self.headerlen =", "and converts all of them into one CCACHE object \"\"\"", "tgs_encrypted_data2.hex() ) def to_tgt(self): \"\"\" Returns the native format of", "= ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else:", "byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen) return k def to_bytes(self): t", "return CCACHE.parse(f) def to_file(self, filename): \"\"\" Writes the contents of", "signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False) return t def to_bytes(self):", "logger.debug('SNAME contains the realm as well, trimming it') t =", "user requests a tgs to a specific service principal with", "(self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat()", "for filename in glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata", "range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big',", "used as the primary principal for the ccache file \"\"\"", "= 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket", "self.primary_principal.to_bytes() for cred in self.credentials: t += cred.to_bytes() return t", "t_hdr += header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big', signed=False) t +=", "self.name_type.to_bytes(4, byteorder='big', signed=False) t += len(self.components).to_bytes(4, byteorder='big', signed=False) t +=", "= dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start +", "self.data.encode() self.length = len(self.data) t = len(self.data).to_bytes(4, byteorder='big', signed=False) t", "from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED,", "True): #from AS_REP \"\"\" Creates credential object from the TGT", "filename) with open(filepath, 'wb') as o: o.write(kirbi.dump()) @staticmethod def from_file(filename):", "Header: def __init__(self): self.tag = None self.taglen = None self.tagdata", "= self.time_offset.to_bytes(4, byteorder='big', signed=False) t += self.usec_offset.to_bytes(4, byteorder='big', signed=False) return", "+= header.to_bytes() t += len(t_hdr).to_bytes(2, byteorder='big', signed=False) t += t_hdr", "t = ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])", "and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return", "kf_abs = os.path.abspath(kirbi_filename) kirbidata = None with open(kf_abs, 'rb') as", "tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )", "enc_as_rep_part and enc_as_rep_part['renew_till'] else 0 return t @staticmethod def dummy_time(start=", "hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen =", "= f.read() kirbi = KRBCRED.load(kirbidata).native cc.add_kirbi(kirbi) return cc def to_kirbidir(self,", "res['sname']['name-string'][1] tgs_realm = res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum =", "and enc_as_rep_part['authtime'] else 0 t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \\ if 'starttime'", "t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "self.second_ticket = None def to_hash(self): res = Ticket.load(self.ticket.to_asn1()).native tgs_encryption_type =", "[] self.ticket = None self.second_ticket = None def to_hash(self): res", "t class Authdata: def __init__(self): self.authtype = None self.authdata =", "else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' %", "= len(data['keyvalue']) k.keyvalue = data['keyvalue'] return k def to_asn1(self): t", "def to_bytes(self): t = self.file_format_version.to_bytes(2, byteorder='big', signed=False) t_hdr = b''", "t def to_tgs(self): \"\"\" Returns the native format of an", "self.tag = None self.taglen = None self.tagdata = None @staticmethod", "Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not sure! c.tktflags", "o @staticmethod def from_asn1(data): o = CCACHEOctetString() o.length = len(data)", "res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum", "p.realm = CCACHEOctetString.from_string(realm) for comp in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p", "return cc def get_all_tgt(self): \"\"\" Returns a list of AS_REP", "= dt_to_kerbtime(start + datetime.timedelta(days=2)) return t @staticmethod def parse(reader): t", "do decryption of the encrypted part of the tgs_rep object,", "#from AS_REP \"\"\" Creates credential object from the TGS and", "= Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in enc_as_rep_part", "in enc_as_rep_part and enc_as_rep_part['starttime'] else 0 t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \\", "filename in glob.glob(dir_path): with open(filename, 'rb') as f: kirbidata =", "are AP_REP we check for the server principal to be", "= None self.server = None self.key = None self.time =", "if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs()) return tgss def", "Times() t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \\ if 'authtime' in enc_as_rep_part and", "CCACHEPrincipal.from_asn1(t, ticket_info['srealm']) else: c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm']) c.time = Times.from_asn1(ticket_info)", "'%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key']", "= Times() t.authtime = dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start ) t.endtime", "byteorder='big', signed=False) t += self.keylen.to_bytes(2, byteorder='big', signed=False) t += self.keyvalue", "= CCACHEOctetString.parse(reader) for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def", "mandatory, and sometimes it's not present krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)", "well, trimming it') t = ticket_info['sname'] t['name-string'] = t['name-string'][:-1] c.server", "has most likely expired, skipping') return \"\"\" c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'],", "CCACHE: \"\"\" As the header is rarely used -mostly static-", "= Times.from_asn1(ticket_info) c.key = Keyblock.from_asn1(ticket_info['key']) c.is_skey = 0 #not sure!", "= {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred = {} krbcred['pvno'] =", "+= self.is_skey.to_bytes(1, byteorder='big', signed=False) t += self.tktflags.to_bytes(4, byteorder='little', signed=False) t", "parameter is not mandatory, and most of the time not", "header.tag = 1 header.taglen = 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata", "return t class CCACHEOctetString: def __init__(self): self.length = None self.data", "[] while reader.tell() < len(data): h = Header() h.tag =", "summary_header(): return ['client','server','starttime','endtime','renew-till'] def summary(self): return [ '%s@%s' % (self.client.to_string(separator='/'),", "k def to_asn1(self): t = {} t['keytype'] = self.keytype t['keyvalue']", "t = self.time_offset.to_bytes(4, byteorder='big', signed=False) t += self.usec_offset.to_bytes(4, byteorder='big', signed=False)", "for cred in self.credentials: kirbi, filename = cred.to_kirbi() filename =", "t += self.authdata.to_bytes() return t class CCACHEPrincipal: def __init__(self): self.name_type", "= None self.keylen = None self.keyvalue = None @staticmethod def", "d def to_bytes(self): t = self.time_offset.to_bytes(4, byteorder='big', signed=False) t +=", "= None self.addrs = [] self.num_authdata = None self.authdata =", "krbcredinfo = {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string() krbcredinfo['pname']", "for _ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "def get_hashes(self, all_hashes = False): \"\"\" Returns a list of", "the AD sends upon a succsessful TGT request. This function", "t class Address: def __init__(self): self.addrtype = None self.addrdata =", "os.path.abspath(kirbi_filename) kirbidata = None with open(kf_abs, 'rb') as f: kirbidata", "t class CCACHEOctetString: def __init__(self): self.length = None self.data =", "EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 = res['enc-part']['cipher'][:-12] return '$krb5tgs$%s$%s$%s$%s$%s' %", "byteorder='big', signed=False) o.data = reader.read(o.length) return o def to_bytes(self): if", "encryption type 23 (which is RC4) all_hashes: overrides the encryption", "return t class CCACHEPrincipal: def __init__(self): self.name_type = None self.num_components", "byteorder='big', signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen)", "> 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data' and cred.server.realm.to_string() == 'X-CACHECONF:'):", "is not mandatory, and most of the time not present", "+= self.server.to_bytes() t += self.key.to_bytes() t += self.time.to_bytes() t +=", "message and the sessionkey in EncryptionKey native format \"\"\" enc_part", "t = '== CCACHE ==\\n' t+= 'file_format_version : %s\\n' %", "[] for cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt", "t = {} t['keytype'] = self.keytype t['keyvalue'] = self.keyvalue return", "hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8]) krbcredinfo = {} krbcredinfo['key'] = EncryptionKey(self.key.to_asn1()) krbcredinfo['prealm'] = self.client.realm.to_string()", "= CCACHEOctetString.from_string('kerbi.corp') for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def", "c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native c.num_address =", "Author: # <NAME> (@skelsec) # import os import io import", "= Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not sure! c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native", "tickets in native format (dict). To determine which ticket are", "t['keyvalue'] = self.keyvalue return t @staticmethod def parse(reader): k =", "Times() t.authtime = dt_to_kerbtime(start) t.starttime = dt_to_kerbtime(start ) t.endtime =", "self.primary_principal = c.client #yaaaaay 4 additional weirdness!!!! #if sname name-string", "cred in self.credentials: if cred.server.to_string(separator='/').lower().find('krbtgt') != -1: tgt = [cred.to_tgt(),", "int.from_bytes(reader.read(2), byteorder='big', signed=False) c.headers = Header.parse(reader.read(hdr_size)) #c.headerlen = #for i", "#this parameter is not mandatory, and most of the time", "d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) return d def to_bytes(self): t", "filename): \"\"\" Writes the contents of the CCACHE object to", "from_asn1(data): o = CCACHEOctetString() o.length = len(data) if isinstance(data,str): o.data", "not mandatory, and most of the time not present krbcredinfo['authtime']", "= CCACHEOctetString.parse(reader) c.second_ticket = CCACHEOctetString.parse(reader) return c @staticmethod def summary_header():", "t += self.usec_offset.to_bytes(4, byteorder='big', signed=False) return t class Credential: def", "int.from_bytes(reader.read(4), byteorder='big', signed=False) t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False) t.endtime =", "to the kirbi file format used by mimikatz. The kirbi", "cc = CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in", "'tagdata: %s\\n' % self.tagdata return t class DateTime: def __init__(self):", "self.server.to_asn1()[0] enc_krbcred = {} enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)] krbcred = {}", "= CCACHEOctetString() o.data = data.encode() o.length = len(o.data) return o", "cred = Credential.parse(reader) if not (len(cred.server.components) > 0 and cred.server.components[0].to_string()", "in principal['name-string']: p.components.append(CCACHEOctetString.from_asn1(comp)) return p @staticmethod def dummy(): p =", "sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0", "with a valid TGT This function doesn't do decryption of", "krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import dt_to_kerbtime,", "len(data) if isinstance(data,str): o.data = data.encode() else: o.data = data", "[] for cred in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype'])", "to_bytes(self): t = self.addrtype.to_bytes(2, byteorder='big', signed=False) t += self.addrdata.to_bytes() return", "0 # not sure k.keylen = len(data['keyvalue']) k.keyvalue = data['keyvalue']", "tgts def get_all_tgs(self): tgss = [] for cred in self.credentials:", "self.time.renew_till != 0: #this parameter is not mandatory, and sometimes", "= 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only", "c.second_ticket = CCACHEOctetString.parse(reader) return c @staticmethod def summary_header(): return ['client','server','starttime','endtime','renew-till']", "= self.client.to_bytes() t += self.server.to_bytes() t += self.key.to_bytes() t +=", "CCACHEPrincipal() p.name_type = 1 p.num_components = 1 p.realm = CCACHEOctetString.from_string('kerbi.corp')", "the ccache file and returns a CCACHE object \"\"\" with", "def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP \"\"\"", "None with open(kf_abs, 'rb') as f: kirbidata = f.read() return", "0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores", "0: #this parameter is not mandatory, and most of the", "#not sure! c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata =", "eof: cred = Credential.parse(reader) if not (len(cred.server.components) > 0 and", "(self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime !=", "CCACHEOctetString.from_string('kerbi.corp') for _ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self,", "parse(reader): a = Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.addrdata", "\"\"\" Writes the contents of the CCACHE object to a", "\"\"\" Creates credential object from the TGT and adds to", "files being generated. directory_path: str the directory to write the", "t, self.realm.to_string() @staticmethod def parse(reader): p = CCACHEPrincipal() p.name_type =", "_ in range(1): p.components.append(CCACHEOctetString.from_string('kerbi')) return p def to_string(self, separator='-'): return", "cred in self.credentials: if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1: tgss.append(cred.to_tgs())", "self.data def to_string(self): return self.data.decode() @staticmethod def from_string(data): o =", "def parse(reader): a = Address() a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)", "b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr = b'' #for header", "KRBCRED.load(kirbidata).native cc = CCACHE() cc.add_kirbi(kirbi) return cc def get_all_tgt(self): \"\"\"", "import logger from asn1crypto import core # http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt class Header:", "= CCACHEPrincipal.dummy() def __str__(self): t = '== CCACHE ==\\n' t+=", "reader.seek(-1,2) eof = reader.tell() reader.seek(pos,0) while reader.tell() < eof: cred", "headers = [] while reader.tell() < len(data): h = Header()", "import EncryptionType, MESSAGE_TYPE from pycquery_krb import logger from asn1crypto import", "res['realm'] if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value: tgs_checksum = res['enc-part']['cipher'][-12:] tgs_encrypted_data2 =", "TGT request. This function doesn't do decryption of the encrypted", "to determine if client principal should be used as the", "d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)", "= self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] =", "self.realm = None self.components = [] @staticmethod def from_asn1(principal, realm):", "None self.time = None self.is_skey = None self.tktflags = None", "unsigned int self.primary_principal = CCACHEPrincipal.dummy() def __str__(self): t = '==", "t += self.num_authdata.to_bytes(4, byteorder='big', signed=False) for ad in self.authdata: t", "signed=False) return t def to_bytes(self): t = self.authtime.to_bytes(4, byteorder='big', signed=False)", "self.length = len(self.data) t = len(self.data).to_bytes(4, byteorder='big', signed=False) t +=", "= self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native t =", "empty = True to get an object without header already", "t @staticmethod def parse(reader): k = Keyblock() k.keytype = int.from_bytes(reader.read(2),", "= CCACHEPrincipal() p.name_type = principal['name-type'] p.num_components = len(principal['name-string']) p.realm =", "!= -1: tgt = [cred.to_tgt(), cred.time] tgts.append(tgt) return tgts def", "CCACHE.from_kirbi(kirbidata) @staticmethod def from_kirbidir(directory_path): \"\"\" Iterates trough all .kirbi files", "len(data['keyvalue']) k.keyvalue = data['keyvalue'] return k def to_asn1(self): t =", "KRBCRED(krbcred) return kirbi, filename @staticmethod def from_asn1(ticket, data): ### #", "byteorder='big', signed=False) t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False) return t def", "class Times: def __init__(self): self.authtime = None self.starttime = None", "for tickets with encryption type 23 (which is RC4) all_hashes:", "#0x0504 self.headers = [] self.primary_principal = None self.credentials = []", "kirbi, filename @staticmethod def from_asn1(ticket, data): ### # data =", "is expected that the decrypted XXX is supplied in enc_as_rep_part", "'%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else", "= {} krbcred['pvno'] = krb5_pvno krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value krbcred['tickets'] =", "return k def to_asn1(self): t = {} t['keytype'] = self.keytype", "self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm']) c.time = Times.from_asn1(enc_tgs_rep_part)", "k.keytype = data['keytype'] k.etype = 0 # not sure k.keylen", "return a def to_bytes(self): t = self.authtype.to_bytes(2, byteorder='big', signed=False) t", "CCACHEPrincipal.dummy() def __str__(self): t = '== CCACHE ==\\n' t+= 'file_format_version", "self.client.realm.to_string()), '%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()), datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0", "in EncryptionKey native format \"\"\" enc_part = EncryptedData({'etype': 1, 'cipher':", "p = CCACHEPrincipal() p.name_type = 1 p.num_components = 1 p.realm", "= int.from_bytes(reader.read(1), byteorder='big', signed=False) c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False) c.num_address", "0 else 'N/A', datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A',", "in self.credentials: res = Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23 or", "= 8 #header.tagdata = b'\\xff\\xff\\xff\\xff\\x00\\x00\\x00\\x00' header.tagdata = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' self.headers.append(header) #t_hdr", "= 'tag: %s\\n' % self.tag t += 'taglen: %s\\n' %", "\\ krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart from pycquery_krb.common.utils import", "+= self.key.to_bytes() t += self.time.to_bytes() t += self.is_skey.to_bytes(1, byteorder='big', signed=False)", "representation of the asn1 encoded AS_REP data that the AD", "= dt_to_kerbtime(enc_as_rep_part['endtime']) \\ if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else", "directory and converts all of them into one CCACHE object", "a list of hashes in hashcat-firendly format for tickets with", "t class Times: def __init__(self): self.authtime = None self.starttime =", "= None self.components = [] @staticmethod def from_asn1(principal, realm): p", "_ in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False) for", "a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader) return a", "weirdness!!!! #if sname name-string contains a realm as well htne", "the CCACHE object to the kirbi file format used by", "per file c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) @staticmethod def from_kirbi(kirbidata): kirbi", "['client','server','starttime','endtime','renew-till'] def summary(self): return [ '%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()), '%s@%s'", "Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23 or all_hashes == True: hashes.append(cred.to_hash())", "CCACHE.parse(f) def to_file(self, filename): \"\"\" Writes the contents of the", "encrypted part of the tgs_rep object, it is expected that", "= 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def", "= self.tag.to_bytes(2, byteorder='big', signed=False) t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False) t", "'N/A', datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A', ] def", "(@skelsec) # import os import io import datetime import glob", "a.addrdata = CCACHEOctetString.parse(reader) return a def to_bytes(self): t = self.addrtype.to_bytes(2,", "signed=False) k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False) k.keyvalue = reader.read(k.keylen) return", "if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0 t.endtime =", "def from_asn1(data): o = CCACHEOctetString() o.length = len(data) if isinstance(data,str):", "doesn't do decryption of the encrypted part of the as_rep", "= False): c = Credential() enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native ticket_info =", "#self.headerlen = 1 #size of the entire header in bytes,", "= CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty() self.credentials.append(c) def add_kirbi(self, krbcred, override_pp", "from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE from pycquery_krb import logger from", "for _ in range(p.num_components): p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self): t", "0 #not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata", "p.components.append(CCACHEOctetString.parse(reader)) return p def to_bytes(self): t = self.name_type.to_bytes(4, byteorder='big', signed=False)", "com.to_bytes() return t class CCACHEOctetString: def __init__(self): self.length = None", "'cipher': b''}) tgt_rep = {} tgt_rep['pvno'] = krb5_pvno tgt_rep['msg-type'] =", "dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till = dt_to_kerbtime(start", "\"\"\" enc_part = EncryptedData({'etype': 1, 'cipher': b''}) tgt_rep = {}", "self.tagdata return t def __str__(self): t = 'tag: %s\\n' %", "XXX is supplied in enc_as_rep_part override_pp: bool to determine if", "init this object with empty = True to get an", "return t @staticmethod def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)): t = Times() t.authtime", "k.keyvalue = data['keyvalue'] return k def to_asn1(self): t = {}", "time not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc) if self.time.starttime !=", "CCACHEPrincipal: def __init__(self): self.name_type = None self.num_components = None self.realm", "= os.path.abspath(kirbi_filename) kirbidata = None with open(kf_abs, 'rb') as f:", "tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2 = res['enc-part']['cipher'][16:] return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm,", "t class DateTime: def __init__(self): self.time_offset = None self.usec_offset =", "t.starttime = dt_to_kerbtime(start ) t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1)) t.renew_till", "ccache file and returns a CCACHE object \"\"\" with open(filename,", "data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key = Keyblock.from_asn1(data['key']) c.is_skey =", "False): \"\"\" Returns a list of hashes in hashcat-firendly format", "to_bytes(self): t = self.keytype.to_bytes(2, byteorder='big', signed=False) t += self.etype.to_bytes(2, byteorder='big',", "= int.from_bytes(reader.read(4), byteorder='big', signed=False) o.data = reader.read(o.length) return o def", "of files being generated. directory_path: str the directory to write", "Credential() c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm']) c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm']) c.key", "False: self.__setup() def __setup(self): self.file_format_version = 0x0504 header = Header()", "c.num_address = 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher']) c.second_ticket", "= [] while reader.tell() < len(data): h = Header() h.tag", "[] self.primary_principal = None self.credentials = [] if empty ==", "CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False) hdr_size = int.from_bytes(reader.read(2), byteorder='big',", "c.time = Times.from_asn1(enc_as_rep_part) c.key = Keyblock.from_asn1(enc_as_rep_part['key']) c.is_skey = 0 #not", "that the AD sends upon a succsessful TGT request. This", "t = self.authtime.to_bytes(4, byteorder='big', signed=False) t += self.starttime.to_bytes(4, byteorder='big', signed=False)", "int.from_bytes(reader.read(2), byteorder='big', signed=False) a.authdata = CCACHEOctetString.parse(reader) return a def to_bytes(self):", "self.server.realm.to_string() tgt_rep['cname'] = self.client.to_asn1()[0] tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native tgt_rep['enc-part'] = enc_part.native", "p.name_type = 1 p.num_components = 1 p.realm = CCACHEOctetString.from_string('kerbi.corp') for", "object with empty = True to get an object without", "cc def get_all_tgt(self): \"\"\" Returns a list of AS_REP tickets", "= Ticket.load(cred.ticket.to_asn1()).native if int(res['enc-part']['etype']) == 23 or all_hashes == True:", "t+= 'primary_principal : %s\\n' % self.primary_principal return t def add_tgt(self,", "p.name_type = principal['name-type'] p.num_components = len(principal['name-string']) p.realm = CCACHEOctetString.from_string(realm) for", "parse(reader): d = DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset", "= 0 #not sure! c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native c.num_address = 0", "\\ if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0 t.renew_till", "= 0 o.data = b'' return o def to_asn1(self): return", "k.etype = 0 # not sure k.keylen = len(data['keyvalue']) k.keyvalue", "@staticmethod def parse(reader): c = CCACHE(True) c.file_format_version = int.from_bytes(reader.read(2), byteorder='big',", "@staticmethod def from_asn1(ticket, data): ### # data = KrbCredInfo ###", "def from_file(filename): \"\"\" Parses the ccache file and returns a", "the asn1 encoded TGS_REP data when the user requests a", "a lot of files being generated. directory_path: str the directory", "= 0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump()) c.second_ticket =", "return t @staticmethod def from_kirbifile(kirbi_filename): kf_abs = os.path.abspath(kirbi_filename) kirbidata =", "me to add it to cache! This can cause problems!')", "def to_asn1(self): t = {'name-type': self.name_type, 'name-string': [name.to_string() for name", "0 c.num_authdata = 0 c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump()) c.second_ticket = CCACHEOctetString.empty()", "so prepare for a lot of files being generated. directory_path:", "= TicketFlags(data['flags']).cast(core.IntegerBitString).native c.num_address = 0 c.num_authdata = 0 c.ticket =", "t = self.addrtype.to_bytes(2, byteorder='big', signed=False) t += self.addrdata.to_bytes() return t", "'%s.kirbi' % filename.replace('..','!') filepath = os.path.join(kf_abs, filename) with open(filepath, 'wb')", "if override_pp == True: self.primary_principal = c.client c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'],", "name-string contains a realm as well htne impacket will crash", "The kirbi file format supports one credential per file, so", "k.keyvalue = reader.read(k.keylen) return k def to_bytes(self): t = self.keytype.to_bytes(2,", "and most of the time not present krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime,", "the contents of the CCACHE object to a file \"\"\"", "AP_REP we check for the server principal to be the", "krbcredinfo['pname'] = self.client.to_asn1()[0] krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags) if self.time.authtime != 0:", "% (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() ) else: tgs_checksum = res['enc-part']['cipher'][:16] tgs_encrypted_data2", "ad in self.authdata: t += ad.to_bytes() t += self.ticket.to_bytes() t", "cred.server.realm.to_string() == 'X-CACHECONF:'): c.credentials.append(cred) return c def to_bytes(self): t =", "d = DateTime() d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False) d.usec_offset =", "file format used by mimikatz. The kirbi file format supports", "CCACHE() dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi') for filename in glob.glob(dir_path): with", "def to_asn1(self): t = {} t['keytype'] = self.keytype t['keyvalue'] =", "the decrypted XXX is supplied in enc_as_rep_part override_pp: bool to", "in range(c.num_address): c.addrs.append(Address.parse(reader)) c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False) for _" ]
[ "of the syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles = smiles.split('\\n') mol", "file is supplied outputString = \"*\".join(set(list_A)) #removes all doubles using", "len(line.rstrip())>0: line = line.split() smi = line[0] molId = \"\"", "output to index.html using javascript print \"\"\" <html> <head> <input", "form.getvalue(\"dataB\") #if two files are supplied if(extension==\"smi\"): list_B = process_smiles(dataB)", "\"\" if(operator==\"UNI\"): #if only one file is supplied outputString =", "a list of smiles of the syntax [smiles|molId,smiles|molId] def process_smiles(smiles):", "cgi #creates a list of smiles of the syntax [smiles|molId,smiles|molId]", "to index.html using javascript print \"\"\" <html> <head> <input type=\"text\"", "line.split() smi = line[0] molId = \"\" if len(line)>1: molId", "<input type=\"text\" id=\"data\" value=\"\"\" + outputString + \"\"\"> <script type=\"text/javascript\">", "doubles using the set() function else: dataB = form.getvalue(\"dataB\") #if", "smiles of the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs = oemolistream()", "= [] for mol in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles", "outputString = \"*\".join(set(list_A)) #removes all doubles using the set() function", "\"\" if len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi =", "the output to index.html using javascript print \"\"\" <html> <head>", "dataA = form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"):", "smiles and writes it as sdf using a memory buffer", "of smiles of the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs =", "[] for mol in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles +", "+ \"|\" + molId) #can't send spaces or new lines", "smiles_list.append(smiles + \"|\" + mol.GetTitle()) return smiles_list if __name__ ==", "form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A =", "elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if we want", "list of smiles and writes it as sdf using a", "= write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\")", "if(sdf_output==\"on\"): #if we want the output as sdf sdfs =", "molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi", "return smiles_list if __name__ == \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form", "#can't send spaces or new lines return smiles_list #takes a", "sdf using a memory buffer def write_sdf(smiles_list): sdfs = []", "if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\" + molId)", "= outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output to index.html using javascript", "for line in smiles: if len(line.rstrip())>0: line = line.split() smi", "= process_smiles(dataB) else: list_B = read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A)", "javascript print \"\"\" <html> <head> <input type=\"text\" id=\"data\" value=\"\"\" +", "import cgi #creates a list of smiles of the syntax", "for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\")", "smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs #creates", "\"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"):", "len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear()", "* import cgi #creates a list of smiles of the", "#sends the output to index.html using javascript print \"\"\" <html>", "= line.split() smi = line[0] molId = \"\" if len(line)>1:", "#takes a list of smiles and writes it as sdf", "def write_sdf(smiles_list): sdfs = [] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring()", "list of smiles of the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs", "as sdf using a memory buffer def write_sdf(smiles_list): sdfs =", "is supplied outputString = \"*\".join(set(list_A)) #removes all doubles using the", "if(extension==\"smi\"): list_B = process_smiles(dataB) else: list_B = read_sdf(dataB) if(operator==\"AND\"): outputString", "smiles = OECreateSmiString(mol) smiles_list.append(smiles + \"|\" + mol.GetTitle()) return smiles_list", "+ mol.GetTitle()) return smiles_list if __name__ == \"__main__\": print \"Content-Type:", "= OECreateSmiString(mol) smiles_list.append(smiles + \"|\" + mol.GetTitle()) return smiles_list if", "+ outputString + \"\"\"> <script type=\"text/javascript\"> parent.postMessage(data.value,\"*\"); </script> </head> </html>", "line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\"", "ofs.SetString(\"\") return sdfs #creates a list of smiles of the", "\"|\" + mol.GetTitle()) return smiles_list if __name__ == \"__main__\": print", "supplied outputString = \"*\".join(set(list_A)) #removes all doubles using the set()", "set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if we", "a list of smiles of the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data):", "#if we want the output as sdf sdfs = write_sdf(outputString.replace(\"|\",\"", "write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends", "OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\" + molId) #can't send spaces", "mol in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles + \"|\" +", "smiles of the syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles = smiles.split('\\n')", "smiles_list #takes a list of smiles and writes it as", "one file is supplied outputString = \"*\".join(set(list_A)) #removes all doubles", "= form.getvalue(\"dataB\") #if two files are supplied if(extension==\"smi\"): list_B =", "extension = form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output", "list of smiles of the syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles", "OECreateSmiString(mol) smiles_list.append(smiles + \"|\" + mol.GetTitle()) return smiles_list if __name__", "index.html using javascript print \"\"\" <html> <head> <input type=\"text\" id=\"data\"", "ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = [] for mol", "line = line.split() smi = line[0] molId = \"\" if", "= OEGraphMol() for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString())", "using javascript print \"\"\" <html> <head> <input type=\"text\" id=\"data\" value=\"\"\"", "read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString =", "ifs.openstring(sdf_data) smiles_list = [] for mol in ifs.GetOEGraphMols(): smiles =", "outputString + \"\"\"> <script type=\"text/javascript\"> parent.postMessage(data.value,\"*\"); </script> </head> </html> \"\"\"", "write_sdf(smiles_list): sdfs = [] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol", "OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs #creates a list of", "sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs #creates a list of smiles", "<head> <input type=\"text\" id=\"data\" value=\"\"\" + outputString + \"\"\"> <script", "#!/opt/az/psf/python/2.7/bin/python from openeye.oechem import * import cgi #creates a list", "if(operator==\"AND\"): outputString = \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A)", "#removes all doubles using the set() function else: dataB =", "#if two files are supplied if(extension==\"smi\"): list_B = process_smiles(dataB) else:", "- set(list_B)) if(sdf_output==\"on\"): #if we want the output as sdf", "smiles_list=[] for line in smiles: if len(line.rstrip())>0: line = line.split()", "sdfs = [] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol =", "[smiles|molId,smiles|molId] def process_smiles(smiles): smiles = smiles.split('\\n') mol = OEGraphMol() smiles_list=[]", "process_smiles(smiles): smiles = smiles.split('\\n') mol = OEGraphMol() smiles_list=[] for line", "outputString = \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) |", "sdfs #creates a list of smiles of the syntax [smiles|molId,smiles|molId]", "new lines return smiles_list #takes a list of smiles and", "= form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output =", "def process_smiles(smiles): smiles = smiles.split('\\n') mol = OEGraphMol() smiles_list=[] for", "mol.Clear() ofs.SetString(\"\") return sdfs #creates a list of smiles of", "writes it as sdf using a memory buffer def write_sdf(smiles_list):", "read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = [] for", "smiles.split('\\n') mol = OEGraphMol() smiles_list=[] for line in smiles: if", "print \"\"\" <html> <head> <input type=\"text\" id=\"data\" value=\"\"\" + outputString", "if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs #creates a", "syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles = smiles.split('\\n') mol = OEGraphMol()", "of smiles and writes it as sdf using a memory", "buffer def write_sdf(smiles_list): sdfs = [] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF)", "list_B = read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"):", "sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\"", "or new lines return smiles_list #takes a list of smiles", "else: list_B = read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A) & set(list_B))", "molId) #can't send spaces or new lines return smiles_list #takes", "text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\")", "form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA) else: list_A = read_sdf(dataA) outputString", "= \"\" if(operator==\"UNI\"): #if only one file is supplied outputString", "outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output to index.html using javascript print", "smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\" + molId) #can't", "def read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = []", "return smiles_list #takes a list of smiles and writes it", "list_B = process_smiles(dataB) else: list_B = read_sdf(dataB) if(operator==\"AND\"): outputString =", "= read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString", "the output as sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString =", "\",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\" +", "\"\"\" <html> <head> <input type=\"text\" id=\"data\" value=\"\"\" + outputString +", "read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"): #if only one file is", "= \"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) | set(list_B))", "as sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs) outputString", "set() function else: dataB = form.getvalue(\"dataB\") #if two files are", "= \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if we want the output", "sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA) else: list_A =", "smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return", "return sdfs #creates a list of smiles of the syntax", "line in smiles: if len(line.rstrip())>0: line = line.split() smi =", "if(extension==\"smi\"): list_A = process_smiles(dataA) else: list_A = read_sdf(dataA) outputString =", "memory buffer def write_sdf(smiles_list): sdfs = [] ofs = oemolostream()", "list_A = read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"): #if only one", "in smiles: if len(line.rstrip())>0: line = line.split() smi = line[0]", "dataB = form.getvalue(\"dataB\") #if two files are supplied if(extension==\"smi\"): list_B", "else: list_A = read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"): #if only", "= read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"): #if only one file", "outputString = \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if we want the", "= \"*\".join(set(list_A)) #removes all doubles using the set() function else:", "= \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output to", "id=\"data\" value=\"\"\" + outputString + \"\"\"> <script type=\"text/javascript\"> parent.postMessage(data.value,\"*\"); </script>", "= cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator =", "in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs", "a list of smiles and writes it as sdf using", "set(list_B)) if(sdf_output==\"on\"): #if we want the output as sdf sdfs", "ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol() for smiles", "& set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString", "= oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol() for smiles in", "mol.Clear() smiles_list.append(smi + \"|\" + molId) #can't send spaces or", "else: dataB = form.getvalue(\"dataB\") #if two files are supplied if(extension==\"smi\"):", "the set() function else: dataB = form.getvalue(\"dataB\") #if two files", "process_smiles(dataA) else: list_A = read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"): #if", "set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString =", "+ molId) #can't send spaces or new lines return smiles_list", "print \"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA", "[] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol() for", "OEGraphMol() for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear()", "syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list", "<html> <head> <input type=\"text\" id=\"data\" value=\"\"\" + outputString + \"\"\">", "are supplied if(extension==\"smi\"): list_B = process_smiles(dataB) else: list_B = read_sdf(dataB)", "in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles + \"|\" + mol.GetTitle())", "import * import cgi #creates a list of smiles of", "= OECreateSmiString(mol) mol.Clear() smiles_list.append(smi + \"|\" + molId) #can't send", "value=\"\"\" + outputString + \"\"\"> <script type=\"text/javascript\"> parent.postMessage(data.value,\"*\"); </script> </head>", "sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs) outputString =", "operator = form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA)", "| set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if", "= process_smiles(dataA) else: list_A = read_sdf(dataA) outputString = \"\" if(operator==\"UNI\"):", "the syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles = smiles.split('\\n') mol =", "of the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF)", "from openeye.oechem import * import cgi #creates a list of", "smiles_list.append(smi + \"|\" + molId) #can't send spaces or new", "the syntax [smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data)", "smiles_list if __name__ == \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form =", "= form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA) else:", "= [] ofs = oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol()", "OEGraphMol() smiles_list=[] for line in smiles: if len(line.rstrip())>0: line =", "if len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol)", "smiles_list = [] for mol in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol)", "\"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA =", "mol.GetTitle()) return smiles_list if __name__ == \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\"", "#if only one file is supplied outputString = \"*\".join(set(list_A)) #removes", "type=\"text\" id=\"data\" value=\"\"\" + outputString + \"\"\"> <script type=\"text/javascript\"> parent.postMessage(data.value,\"*\");", "function else: dataB = form.getvalue(\"dataB\") #if two files are supplied", "[smiles|molId,smiles|molId] def read_sdf(sdf_data): ifs = oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list =", "openeye.oechem import * import cgi #creates a list of smiles", "\"|\" + molId) #can't send spaces or new lines return", "mol = OEGraphMol() for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))): OEWriteMolecule(ofs,mol)", "using the set() function else: dataB = form.getvalue(\"dataB\") #if two", "= OEGraphMol() smiles_list=[] for line in smiles: if len(line.rstrip())>0: line", "= form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA) else: list_A = read_sdf(dataA)", "outputString = \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) -", "form = cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator", "spaces or new lines return smiles_list #takes a list of", "ofs.openstring() mol = OEGraphMol() for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\" \"))):", "= line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi = OECreateSmiString(mol) mol.Clear() smiles_list.append(smi +", "form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A = process_smiles(dataA) else: list_A", "if(operator==\"UNI\"): #if only one file is supplied outputString = \"*\".join(set(list_A))", "process_smiles(dataB) else: list_B = read_sdf(dataB) if(operator==\"AND\"): outputString = \"*\".join(set(list_A) &", "want the output as sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString", "using a memory buffer def write_sdf(smiles_list): sdfs = [] ofs", "only one file is supplied outputString = \"*\".join(set(list_A)) #removes all", "= \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A) - set(list_B))", "for mol in ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles + \"|\"", "smi = line[0] molId = \"\" if len(line)>1: molId =", "it as sdf using a memory buffer def write_sdf(smiles_list): sdfs", "smiles = smiles.split('\\n') mol = OEGraphMol() smiles_list=[] for line in", "= smiles.split('\\n') mol = OEGraphMol() smiles_list=[] for line in smiles:", "\").split(\"*\")) outputString = \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the", "cgi.FieldStorage() extension = form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\")", "= \"\" if len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)): smi", "#creates a list of smiles of the syntax [smiles|molId,smiles|molId] def", "list_A = process_smiles(dataA) else: list_A = read_sdf(dataA) outputString = \"\"", "molId = \"\" if len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip() if(OEParseSmiles(mol,smi)):", "__name__ == \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension", "two files are supplied if(extension==\"smi\"): list_B = process_smiles(dataB) else: list_B", "smiles: if len(line.rstrip())>0: line = line.split() smi = line[0] molId", "files are supplied if(extension==\"smi\"): list_B = process_smiles(dataB) else: list_B =", "elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"): outputString = \"*\".join(set(list_A)", "outputString = \"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output", "\"*\".join(set(list_A)) #removes all doubles using the set() function else: dataB", "\",\"|\") #sends the output to index.html using javascript print \"\"\"", "ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol() for smiles in smiles_list: if(OEParseSmiles(mol,smiles.replace(\"|\",\"", "+ \"|\" + mol.GetTitle()) return smiles_list if __name__ == \"__main__\":", "\"))): OEWriteMolecule(ofs,mol) sdfs.append(ofs.GetString()) mol.Clear() ofs.SetString(\"\") return sdfs #creates a list", "mol = OEGraphMol() smiles_list=[] for line in smiles: if len(line.rstrip())>0:", "outputString = \"\" if(operator==\"UNI\"): #if only one file is supplied", "outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output to index.html using", "all doubles using the set() function else: dataB = form.getvalue(\"dataB\")", "\"*\".join(set(list_A) & set(list_B)) elif(operator==\"OR\"): outputString = \"*\".join(set(list_A) | set(list_B)) elif(operator==\"NOT\"):", "\"*\".join(sdfs) outputString = outputString.replace(\"\\n\",\"!\").replace(\" \",\"|\") #sends the output to index.html", "\"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension = form.getvalue(\"extension\")", "of smiles of the syntax [smiles|molId,smiles|molId] def process_smiles(smiles): smiles =", "send spaces or new lines return smiles_list #takes a list", "supplied if(extension==\"smi\"): list_B = process_smiles(dataB) else: list_B = read_sdf(dataB) if(operator==\"AND\"):", "== \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage() extension =", "= oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = [] for mol in", "= form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\") if(extension==\"smi\"): list_A", "ifs.GetOEGraphMols(): smiles = OECreateSmiString(mol) smiles_list.append(smiles + \"|\" + mol.GetTitle()) return", "\"*\".join(set(list_A) - set(list_B)) if(sdf_output==\"on\"): #if we want the output as", "a memory buffer def write_sdf(smiles_list): sdfs = [] ofs =", "ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = [] for mol in ifs.GetOEGraphMols(): smiles", "if __name__ == \"__main__\": print \"Content-Type: text/html\\r\\n\\r\\n\" form = cgi.FieldStorage()", "form.getvalue(\"extension\") dataA = form.getvalue(\"dataA\") operator = form.getvalue(\"smiles_operator\") sdf_output = form.getvalue(\"sdf_output\")", "if len(line.rstrip())>0: line = line.split() smi = line[0] molId =", "line[0] molId = \"\" if len(line)>1: molId = line[1].replace(\" \",\"|\").rstrip()", "and writes it as sdf using a memory buffer def", "oemolistream() ifs.SetFormat(OEFormat_SDF) ifs.openstring(sdf_data) smiles_list = [] for mol in ifs.GetOEGraphMols():", "oemolostream() ofs.SetFormat(OEFormat_SDF) ofs.openstring() mol = OEGraphMol() for smiles in smiles_list:", "= line[0] molId = \"\" if len(line)>1: molId = line[1].replace(\"", "we want the output as sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\"))", "lines return smiles_list #takes a list of smiles and writes", "output as sdf sdfs = write_sdf(outputString.replace(\"|\",\" \").split(\"*\")) outputString = \"*\".join(sdfs)" ]
[ "= _swig_repr def __init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh", "return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class", "self, GridFunction vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn", "-> double ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0)", "dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient", "_swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction", "const *[] irs=0) -> double ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol,", "import weakref import mfem._par.array import mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient", "== 1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown =", "def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self, int i, IntegrationPoint ip,", "VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal =", "excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction", "Vector base, int offset, int size) MakeRef(GridFunction self, Vector base,", "orig) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1) ->", "MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner", "file unless you know what you are doing--modify # the", "**kwargs)) def Eval(self, h, p): r\"\"\"Eval(JumpScaling self, double h, int", "vals, tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side,", "def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error,", "double ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) ->", "blfi, flux, wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args):", "_swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self, int i, IntegrationPoint", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule", "wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta", "def __idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown = 0", "error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors", "with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux, Vector error_estimates, intArray", "return _gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction", "_gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\" GetValue(GridFunction self,", "metaclass to a SWIG wrapped class - a slimmed down", "def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule\"\"\" return", "def Assign(self, *args): r\"\"\" Assign(GridFunction self, GridFunction rhs) -> GridFunction", "try: strthis = \"proxy of \" + self.this.__repr__() except __builtin__.Exception:", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self, double const", "idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx)", "GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__ = __isub__ GridFunction.__imul__", "_swig_python_version_info < (2, 7, 0): raise RuntimeError(\"Python 2.7 or later", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs)", "the low-level C/C++ module if __package__ or \".\" in __name__:", "ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient", "int i, IntegrationRule ir, Vector vals, int vdim=1) GetValues(GridFunction self,", "vdim=1) GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, DenseMatrix", "self, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "def __init__(self, *args): r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction self,", "ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double", "self, int i, IntegrationRule ir, Vector vals, int vdim=1) GetValues(GridFunction", "vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data,", "self, ElementTransformation tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl", "double const p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule", "*args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction self,", "GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\" Save(QuadratureFunction self, std::ostream", "const * fname, int precision=16) Save(GridFunction self, char const *", "ret.thisown = 0 return self def __idiv__(self, v): ret =", "VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction self,", "_swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update)", "i, side, ir, vals, tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def", "GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\"", "return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\"", "Coefficient exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule const *[] irs=0)", "_gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad, norm_type,", "tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i, int side, IntegrationRule ir,", "FiniteElementSpace f, Vector v, int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args)", "ir, DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule", "*[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def", "_gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction", "def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error,", "GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\"", "_gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\" thisown", "r\"\"\" GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, int", "ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown = 0 return self def", "return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\"", "precision): r\"\"\"SaveToFile(GridFunction self, char const * gf_file, int const precision)\"\"\"", "SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream & out, int TimesToRefine=1)\"\"\"", "Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip) ->", "def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir,", "self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\"", "= 0 return self def __isub__(self, v): ret = _gridfunc.GridFunction_isub(self,", "IntegrationPoint ip, int comp=0, Vector tr=None) -> double \"\"\" return", "\"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args):", "precision=16): r\"\"\"SaveGZ(GridFunction self, char const * file, int precision=16)\"\"\" return", "double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self,", "-> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def", "r\"\"\" ImposeBounds(GridFunction self, int i, Vector weights, Vector lo_, Vector", "_gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) ->", "ret.thisown = 0 return self def __isub__(self, v): ret =", "i, IntegrationRule ir, Vector vals, int vdim=1) GetValues(GridFunction self, int", "SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs", "ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient", "_gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self, v):", "GridFunction Assign(GridFunction self, double value) -> GridFunction Assign(GridFunction self, Vector", "# Version 4.0.2 # # Do not make changes to", "self, int i, IntegrationRule ir, DenseMatrix hess, DenseMatrix tr, int", "not isinstance(getattr(cls, name), property): set(cls, name, value) else: raise AttributeError(\"You", "*args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self,", "wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True,", "of C++ mfem::JumpScaling class.\"\"\" thisown = property(lambda x: x.this.own(), lambda", "subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True, int", "set(cls, name, value) else: raise AttributeError(\"You cannot add class attributes", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol,", "0 return self def __imul__(self, v): ret = _gridfunc.GridFunction_imul(self, v)", "int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK)", "return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\"", "DenseMatrix tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals,", "_gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\" Save(GridFunction", "self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul =", "return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self,", "imul(self, c): r\"\"\"imul(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self,", "int idx, Vector values) GetElementValues(QuadratureFunction self, int idx, int const", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self)", "def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi,", "ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule", "int side, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) -> int\"\"\"", "GridFunction flux, Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False)", "QuadratureFunction qf) -> std::ostream & \"\"\" return _gridfunc.__lshift__(*args) __lshift__ =", "= 0 return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self,", "nondynamic attributes (no new attributes) for a class\"\"\" __setattr__ =", "Vector val, Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue =", "with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1, gf2):", "r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom)", "value) else: raise AttributeError(\"You cannot add instance attributes to %s\"", "Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol, ny):", "Vector vals, int comp=0, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args)", "int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def", "vdim=1) -> double GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, int", "0 return self def __isub__(self, v): ret = _gridfunc.GridFunction_isub(self, v)", "GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, DenseMatrix tr,", "-> GridFunction Assign(GridFunction self, double value) -> GridFunction Assign(GridFunction self,", "self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self):", "precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self,", "_swig_repr(self): try: strthis = \"proxy of \" + self.this.__repr__() except", "curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self,", "*args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self)", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol,", "-> std::ostream __lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream &", "= Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown = 0 return vec.GetDataArray()", "*args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None,", "GridFunction rhs) -> GridFunction Assign(GridFunction self, double value) -> GridFunction", "ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule", "r\"\"\" __lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream __lshift__(std::ostream &", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError)", "int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self,", "*args): r\"\"\" GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess,", "char const * file, int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args)", "class _SwigNonDynamicMeta(type): \"\"\"Meta class to enforce nondynamic attributes (no new", "vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn)", "int i, int side, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr)", "__lshift__(*args): r\"\"\" __lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream __lshift__(std::ostream", "ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self, int i, Vector weights, Vector", "doc=\"The membership flag\") __repr__ = _swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction", "\"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction", "DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def", "*args): r\"\"\" MakeRef(GridFunction self, Vector base, int offset, int size)", "double ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) ->", "__imul__(self, v): ret = _gridfunc.GridFunction_imul(self, v) ret.thisown = 0 return", "int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def", "self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def", "tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def", "def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const", "*[] irs=0) -> double ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const", "int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def", "\"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self)", "__repr__ = _swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\"", "ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self,", "self, int idx, DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues", "_gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream & os, SparseMatrix mat) ->", "*args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self,", "-> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def", "_swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self, int i, IntegrationRule", "Vector lo_, Vector hi_) ImposeBounds(GridFunction self, int i, Vector weights,", "ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return", "c): r\"\"\"iadd(GridFunction self, GridFunction c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c)", "# Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream", "return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals):", "VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0, intArray elems=None) -> double", "in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream & os, SparseMatrix", "automatically generated by SWIG (http://www.swig.org). # Version 4.0.2 # #", "return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff,", "0): raise RuntimeError(\"Python 2.7 or later required\") # Import the", "import _gridfunc try: import builtins as __builtin__ except ImportError: import", "ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, int const ip_num,", "def GetNodalValues(self, *args): ''' GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>,", "return _gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction", "name, value): if hasattr(cls, name) and not isinstance(getattr(cls, name), property):", "\"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction", "Vector vals, int vdim=1) GetValues(GridFunction self, int i, IntegrationRule ir,", "int offset, int size) MakeRef(GridFunction self, Vector base, int offset)", "ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction", "val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues)", "= _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling", "_swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\" isub(GridFunction self, GridFunction c) ->", "_gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out,", "int idx, Vector values) GetElementValues(QuadratureFunction self, int idx, Vector values)", "gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++", "ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl,", "f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args):", "\"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func):", "return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator =", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff,", "ir, Vector vals, int vdim=1) GetValues(GridFunction self, int i, IntegrationRule", "Vector tv, int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef =", "_gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self,", "__iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown = 0 return", "GridFunction __init__(GridFunction self, FiniteElementSpace f) -> GridFunction __init__(GridFunction self, FiniteElementSpace", "\"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args):", "aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1,", "Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self, v)", "MakeRef(GridFunction self, FiniteElementSpace f, double * v) MakeRef(GridFunction self, FiniteElementSpace", "dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self,", "int vdim=1) GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess,", "self, FiniteElementSpace f, Vector tv, int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self,", "r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient s, int n_) -> ExtrudeCoefficient\"\"\"", "exsol, mfem::IntegrationRule const *[] irs=0, intArray elems=None) -> double \"\"\"", "SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self,", "= _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self)", "mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import", "_swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming)", "Save(QuadratureFunction self, char const * file, int precision=16) \"\"\" return", "return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\"", "GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim)", "error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors", "\"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction", "r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\" thisown = property(lambda x: x.this.own(),", "_gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\"", "ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, JumpScaling", "f, Vector tv, int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef", "exsol, exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl,", "vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\"", "field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction", "ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return", "self, Vector base, int offset) MakeRef(GridFunction self, FiniteElementSpace f, double", "Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def", "class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\" thisown = property(lambda", "to %s\" % cls) return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator", "GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\" thisown = property(lambda x:", "# Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of", "GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, DenseMatrix tr,", "thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The", "values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) GetElementValues(QuadratureFunction self, int", "_swig_python_version_info if _swig_python_version_info < (2, 7, 0): raise RuntimeError(\"Python 2.7", "_gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h, p): r\"\"\"Eval(JumpScaling self, double h,", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args):", "el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues)", "import _gridfunc else: import _gridfunc try: import builtins as __builtin__", "curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self,", "GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) GetElementValues(QuadratureFunction self, int idx,", "p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[]", "f) -> GridFunction __init__(GridFunction self, FiniteElementSpace f, double * data)", "import mfem._par.element import mfem._par.table import mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll", "ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)\"\"\" return", "SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim", "mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import mfem._par.geom import mfem._par.fespace import", "StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO) -> PyObject *\"\"\" return", "precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self,", "def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def", "attributes to %s\" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def", "elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, int norm_type,", "vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix", "double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self,", "return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): ''' GetNodalValues(i)", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs)", "membership flag\") __repr__ = _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H =", "*\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c):", "self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\"", "Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction self,", "cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class to enforce", "= _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming =", "Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) -> double ComputeLpError(GridFunction self,", "Vector v) -> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign =", "you know what you are doing--modify # the SWIG interface", "ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient", "except ImportError: import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New", "self, QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, int", "enforce nondynamic attributes (no new attributes) for a class\"\"\" __setattr__", "OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return", "ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double", "_gridfunc.GridFunction_isub(self, v) ret.thisown = 0 return self def __idiv__(self, v):", "h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling # Register", "self, Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs, int", "_gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self,", "return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self,", "__repr__ = _swig_repr def __init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self,", "MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f, double * tv)", "FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace \"\"\" return", "return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir,", "self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) -> double", "_gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def", "self, FiniteElementSpace f, double * v) MakeRef(GridFunction self, FiniteElementSpace f,", "__idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown = 0 return", "self, mfem::Coefficient *[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient =", "self, int idx, int const ip_num, Vector values) GetElementValues(QuadratureFunction self,", "file instead. from sys import version_info as _swig_python_version_info if _swig_python_version_info", "intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray attr) \"\"\"", "SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=-1) \"\"\"", "ElementTransformation T, IntegrationPoint ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip)", "-> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__", "self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def", "out, Mesh mesh) -> std::ostream __lshift__(std::ostream & out, GridFunction sol)", "GetVectorFieldValues(self, i, ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i,", "try: import builtins as __builtin__ except ImportError: import __builtin__ _swig_new_instance_method", "VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute)", "return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self)", "*args): r\"\"\" GetValue(GridFunction self, int i, IntegrationPoint ip, int vdim=1)", "_swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self,", "mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction", "__isub__(self, v): ret = _gridfunc.GridFunction_isub(self, v) ret.thisown = 0 return", "self, double const p, Coefficient exsol, Vector error, Coefficient weight=None,", "return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction", "i, IntegrationRule ir, DenseMatrix hess, int vdim=1) GetHessians(GridFunction self, int", "unless you know what you are doing--modify # the SWIG", "irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error)", "int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile)", "ir, Vector vals, DenseMatrix tr, int vdim=1) GetValues(GridFunction self, ElementTransformation", "exsol, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) -> double ComputeLpError(GridFunction", "-> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self, T, ip):", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self, double const", "Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self,", "exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL1Error(GridFunction self, VectorCoefficient", "r\"\"\" MakeRef(GridFunction self, Vector base, int offset, int size) MakeRef(GridFunction", "Vector laps, int vdim=1) GetLaplacians(GridFunction self, int i, IntegrationRule ir,", "self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def", "*[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError =", "r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def", "int i, IntegrationPoint ip, int vdim=1) -> double GetValue(GridFunction self,", "T, IntegrationPoint ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval", "& \"\"\" return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u,", "_gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d,", "Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr)", "double const p, VectorCoefficient exsol, Vector error, Coefficient weight=None, VectorCoefficient", "r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner)", "name), property): set(cls, name, value) else: raise AttributeError(\"You cannot add", "i, IntegrationRule ir, Vector laps, int vdim=1) GetLaplacians(GridFunction self, int", "blfi, GridFunction u, GridFunction flux, Vector error_estimates, intArray aniso_flags=None, int", "self, int const elem, IntegrationRule ir, DenseMatrix grad) \"\"\" return", "= _gridfunc.GridFunction_imul(self, v) ret.thisown = 0 return self GridFunction.__iadd__ =", "Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) -> double", "tr, IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction self, int const elem,", "decorator for adding a metaclass to a SWIG wrapped class", "*args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0)", "mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction", "exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol,", "sys import version_info as _swig_python_version_info if _swig_python_version_info < (2, 7,", "def GetValue(self, *args): r\"\"\" GetValue(GridFunction self, int i, IntegrationPoint ip,", "-> QuadratureFunction __init__(QuadratureFunction self, Mesh mesh, std::istream & _in) ->", "_swig_add_metaclass(metaclass): \"\"\"Class decorator for adding a metaclass to a SWIG", "GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction self) -> Vector \"\"\" return", "generated by SWIG (http://www.swig.org). # Version 4.0.2 # # Do", "file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in _gridfunc:", "GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c):", "self, char const * file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file,", "T, IntegrationPoint ip, Vector val, Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self,", "r\"\"\"GetFaceValues(GridFunction self, int i, int side, IntegrationRule ir, Vector vals,", "self, int i, Vector weights, double min_=0.0, double max_=mfem::infinity()) \"\"\"", "= _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self)", "IntegrationRule ir, Vector laps, int vdim=1) GetLaplacians(GridFunction self, int i,", "ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction =", "mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error,", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError", "hess, int vdim=1) GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix", "IntegrationPoint ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval =", "QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double *", "_swig_repr def __init__(self, *args): r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction", "GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction self,", "error, mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector", "class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v),", "error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux,", "ell_coeff, double Nu, mfem::IntegrationRule const *[] irs=0) -> double \"\"\"", "base, int offset, int size) MakeRef(GridFunction self, Vector base, int", "_gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis = \"proxy", "*[] coeff, intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient =", "const p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0)", "tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self,", "VectorCoefficient exgrad, Coefficient ell_coef, double Nu, int norm_type) -> double", "int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def", "*args): r\"\"\" __init__(GridFunction self) -> GridFunction __init__(GridFunction self, GridFunction orig)", "GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def", "idx, DenseMatrix values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) \"\"\"", "p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) ->", "return _gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\"", "Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args)", "def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction self)", "\"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args):", "_gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction", "*[] exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self,", "flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction", "ref): r\"\"\"SaveVTK(GridFunction self, std::ostream & out, std::string const & field_name,", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol,", "_gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux, wcoef=True,", "__init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self,", "def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const", "-> std::ostream __lshift__(std::ostream & out, GridFunction sol) -> std::ostream __lshift__(std::ostream", "_gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\"", "IntegrationRule ir, DenseMatrix hess, DenseMatrix tr, int vdim=1) \"\"\" return", "char const * file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision)", "coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient", "r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0)", "VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return", "version_info as _swig_python_version_info if _swig_python_version_info < (2, 7, 0): raise", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient", "side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i, int side,", "mfem::IntegrationRule const *[] irs=0) -> double ComputeL1Error(GridFunction self, VectorCoefficient exsol,", "x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def MakeOwner(self, fec_):", "gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char const * gf_file, int const", "self, ElementTransformation tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence =", "-> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self,", "def isub(self, *args): r\"\"\" isub(GridFunction self, GridFunction c) -> GridFunction", "ip, int comp=0, Vector tr=None) -> double \"\"\" return _gridfunc.GridFunction_GetValue(self,", "self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL1Error(GridFunction", "mfem::IntegrationRule const *[] irs=0) -> double ComputeLpError(GridFunction self, double const", "values) GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction self, int", "x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\") __repr__", "coeff) ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs, int vd=0) ProjectCoefficient(GridFunction", "self, int i, int side, IntegrationRule ir, Vector vals, DenseMatrix", "PyObject * StringIO) -> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream", "exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv,", "const p, VectorCoefficient exsol, Vector error, Coefficient weight=None, VectorCoefficient v_weight=None,", "= _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self, int i,", "ElementTransformation T, IntegrationPoint ip, int comp=0, Vector tr=None) -> double", "self) -> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector)", "grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self,", "int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction self,", "_swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self,", "== \"thisown\": self.this.own(value) elif name == \"this\": set(self, name, value)", "out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream & out, int TimesToRefine=1)\"\"\" return", "_gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\"", "_swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self, int i, IntegrationRule", "args[0]) vec.thisown = 0 return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args)", "& out) Save(GridFunction self, char const * fname, int precision=16)", "const ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values)", "= _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f,", "side, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) -> int\"\"\" return", "GridFunction __init__(GridFunction self, Mesh m, std::istream & input) -> GridFunction", "*args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const", "_gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int", "_gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d,", "__name__: from . import _gridfunc else: import _gridfunc try: import", "Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) -> double", "r\"\"\" Save(QuadratureFunction self, std::ostream & out) Save(QuadratureFunction self, char const", "tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self,", "self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) ->", "qspace_, double * qf_data, int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args)", "int const ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny)", "irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[]", "int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def", "self, Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType", "hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self, name, value) else:", "idx, Vector values) GetElementValues(QuadratureFunction self, int idx, int const ip_num,", "isub(self, *args): r\"\"\" isub(GridFunction self, GridFunction c) -> GridFunction isub(GridFunction", "raise RuntimeError(\"Python 2.7 or later required\") # Import the low-level", "_gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction", "__lshift__(std::ostream & out, GridFunction sol) -> std::ostream __lshift__(std::ostream & out,", "exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self, VectorCoefficient", "return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self,", "def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self, int i, Vector weights,", "Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self):", "in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\"", "gf2): r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction gf1, GridFunction gf2) ->", "else: raise AttributeError(\"You cannot add class attributes to %s\" %", "name, value) else: raise AttributeError(\"You cannot add instance attributes to", "AttributeError(\"You cannot add instance attributes to %s\" % self) return", "_swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\"", "return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction", "ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient", "GridFunction gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance", "mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x, v:", "i, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1) GetValues(GridFunction", "the SWIG interface file instead. from sys import version_info as", "flag\") __repr__ = _swig_repr def __init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient", "*args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0)", "x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\") __repr__ =", "val) GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, Vector val, Vector", "_gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction", "double * tv) MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv, int", "r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient coeff, intArray", "r\"\"\" GetVectorValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix", "v) ret.thisown = 0 return self def __idiv__(self, v): ret", "self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self,", "exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0) ->", "SWIG interface file instead. from sys import version_info as _swig_python_version_info", "self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def", "self, FiniteElementSpace fes, Vector v, int offset) -> GridFunction \"\"\"", "*args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self,", "if len(args) == 1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0])", "mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self, mfem::Coefficient *[]", "-> std::ostream __lshift__(std::ostream & out, Mesh mesh) -> std::ostream __lshift__(std::ostream", "return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\"", "return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad):", "GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self,", "size) MakeRef(GridFunction self, Vector base, int offset) MakeRef(GridFunction self, FiniteElementSpace", "exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl, irs=0):", "= _gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown", "r\"\"\"idiv(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv", "GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)\"\"\" return", "input) -> GridFunction __init__(GridFunction self, Mesh m, mfem::GridFunction *[] gf_array,", "GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self):", "def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace =", "* fname, int precision=16) Save(GridFunction self, char const * file,", "GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages", "*args): r\"\"\" GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir, DenseMatrix grad)", ".vector import Vector if len(args) == 1: vec = Vector()", "ImposeBounds(GridFunction self, int i, Vector weights, double min_=0.0, double max_=mfem::infinity())", "int i, int side, IntegrationRule ir, Vector vals, DenseMatrix tr,", "SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char const * gf_file, int", "ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction", "r\"\"\" GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction", "self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def", "comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction", "weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) -> double \"\"\"", "a SWIG wrapped class - a slimmed down version of", "T, IntegrationPoint ip, int comp=0, Vector tr=None) -> double \"\"\"", "GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector curl)\"\"\" return", "tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self,", "IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1) -> int\"\"\"", "ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0):", "GetVectorValue(GridFunction self, int i, IntegrationPoint ip, Vector val) GetVectorValue(GridFunction self,", "ip, Vector val) GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, Vector", "import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe", "def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1)", "mfem._par.geom import mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import", "f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace =", "* v) MakeRef(GridFunction self, FiniteElementSpace f, Vector v, int v_offset)", "*args): r\"\"\" GetVectorValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals,", "self, int i, IntegrationRule ir, Vector laps, DenseMatrix tr, int", "ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule", "self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction", "self, int i, IntegrationPoint ip, int vdim=1) -> double GetValue(GridFunction", "VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self,", "class - a slimmed down version of six.add_metaclass\"\"\" def wrapper(cls):", "DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def", "int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr, vdim) GetFaceValues", "= _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self, int i,", "_gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction", "def Save(self, *args): r\"\"\" Save(GridFunction self, std::ostream & out) Save(GridFunction", "self, Mesh m, Coefficient s, int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self,", "self, char const * gf_file, int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self,", "exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) ->", "* tv) MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv, int tv_offset)", "_swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling # Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling)", "Vector laps, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args)", "ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): ''' GetNodalValues(i) -> GetNodalValues(vector,", "Nu, int norm_type) -> double ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient", "isinstance(getattr(type(self), name), property): set(self, name, value) else: raise AttributeError(\"You cannot", "r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[]", "self, ElementTransformation T, IntegrationPoint ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T,", "_gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction", "self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self,", "ElementTransformation T, IntegrationPoint ip, Vector val, Vector tr=None) \"\"\" return", "excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl,", "irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[]", "ComputeLpError(GridFunction self, double const p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient", "(2, 7, 0): raise RuntimeError(\"Python 2.7 or later required\") #", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self,", "std::istream & input) -> GridFunction __init__(GridFunction self, Mesh m, mfem::GridFunction", "self) -> FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self,", "excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const", "ImportError: import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def", "name, value) elif hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self,", "-> GridFunction __init__(GridFunction self, Mesh m, mfem::GridFunction *[] gf_array, int", "GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems,", "*args): ''' GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim) '''", "return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args):", "v) ret.thisown = 0 return self def __isub__(self, v): ret", "_gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr):", "= 0 return self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ = __idiv__", "*[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def", "tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp)", "*args): r\"\"\" GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction self) -> Vector", "*[] irs=0, intArray elems=None) -> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args)", "= _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace", "__init__(self, *args): r\"\"\" __init__(GridFunction self) -> GridFunction __init__(GridFunction self, GridFunction", "_gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir, vals,", "DenseMatrix grad) GetGradients(GridFunction self, int const elem, IntegrationRule ir, DenseMatrix", "irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) ->", "_gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\" thisown =", "QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=1)", "precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject", "def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream & out, int", "r\"\"\"SaveToFile(GridFunction self, char const * gf_file, int const precision)\"\"\" return", "IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args):", "_gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction self,", "vals, tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\"", "mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\"", "r\"\"\"GetDerivative(GridFunction self, int comp, int der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self,", "tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient)", "-> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def", "import mfem._par.vtk import mfem._par.element import mfem._par.table import mfem._par.hash import mfem._par.vertex", "= _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector =", "def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name == \"thisown\":", "mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args)", "tr) GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir, DenseMatrix vals, DenseMatrix", "ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self,", "\"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f):", "as __builtin__ except ImportError: import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method", "GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir, DenseMatrix", "exsol, exgrad, norm_type, elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self,", "s, n_)) def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T,", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError", "_gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction", "tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return", "r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol, int const ny) ->", "attributes (no new attributes) for a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)", "_gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector", "__init__(GridFunction self, GridFunction orig) -> GridFunction __init__(GridFunction self, FiniteElementSpace f)", "GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self, int", "ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient", "i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction gf1, GridFunction", "exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self, mfem::Coefficient", "\".\" in __name__: from . import _gridfunc else: import _gridfunc", "self, QuadratureSpace qspace_, double * qf_data, int vdim_=-1) \"\"\" return", "self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def", "GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace)", "const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error", "ElementTransformation T, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None) \"\"\" return", "double GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, int comp=0, Vector", "self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__ = __isub__", "*args): r\"\"\" Assign(GridFunction self, GridFunction rhs) -> GridFunction Assign(GridFunction self,", "\"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args):", "Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\" FESpace(GridFunction self) ->", "-> double ComputeLpError(GridFunction self, double const p, VectorCoefficient exsol, Coefficient", "*[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError =", "r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv):", "-> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def", "r\"\"\" ComputeLpError(GridFunction self, double const p, Coefficient exsol, Coefficient weight=None,", "ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval)", "GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self, int i, IntegrationPoint ip, Vector", "self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) ->", "import mfem._par.table import mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg", "_swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int", "qspace_, int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data,", "= _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction", "coeff, intArray dofs, int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl,", "DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir,", "_swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\" GetValue(GridFunction self, int i, IntegrationPoint", "-> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError)", "val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction", "vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self,", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule", "GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self,", "def _swig_add_metaclass(metaclass): \"\"\"Class decorator for adding a metaclass to a", "tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction", "mfem::Coefficient *[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient)", "orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction", "const *[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule", "= _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return", "self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction", "coeff, intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient)", "self, int i, int side, IntegrationRule ir, DenseMatrix vals, DenseMatrix", "norm_type, elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\"", "return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self,", "precision=16) Save(GridFunction self, char const * file, int precision=16) \"\"\"", "GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction", "__swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\" __init__(GridFunction self) ->", "self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction", "offset, int size) MakeRef(GridFunction self, Vector base, int offset) MakeRef(GridFunction", "Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double", "ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i, int side,", "*[] exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self,", "def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace =", "self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction", "r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType", "_swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient)", "_gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return", "def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f, double *", "p, int i, GridFunction gf1, GridFunction gf2) -> double\"\"\" return", "_swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) -> double\"\"\"", "_gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError =", "s, int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def", "return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name,", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray", "*[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const", "fes, Vector v, int offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args))", "name) and not isinstance(getattr(cls, name), property): set(cls, name, value) else:", "comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self, int comp, int der_comp, GridFunction", "return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\"", "double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self,", "return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction", "val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self,", "vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction", "-> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr) GetFaceVectorValues", "def GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self, int comp, int", "membership flag\") __repr__ = _swig_repr def __init__(self, m, s, n_):", "IntegrationRule ir, DenseMatrix hess, int vdim=1) GetHessians(GridFunction self, int i,", "GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix", "GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self,", "= _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\" Save(GridFunction self, std::ostream &", "IntegrationRule ir, Vector vals, int comp=0, DenseMatrix tr=None) \"\"\" return", "int norm_type, intArray elems=None, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "& out, QuadratureFunction qf) -> std::ostream & \"\"\" return _gridfunc.__lshift__(*args)", "GridFunction sol, int const ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d,", "intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction self,", "__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array import mfem._par.mem_manager import", "bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction", "int i, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1)", "Mesh m, std::istream & input) -> GridFunction __init__(GridFunction self, Mesh", "const elem, IntegrationRule ir, DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args)", "curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr,", "*[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError =", "VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent =", "return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction", "__init__(GridFunction self) -> GridFunction __init__(GridFunction self, GridFunction orig) -> GridFunction", "of C++ mfem::GridFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda", "VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return", "return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self,", "self) -> Vector GetTrueVector(GridFunction self) -> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self,", "double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self,", "ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const *[]", "int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def", "self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self,", "ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i, int side, IntegrationRule", "Save(self, *args): r\"\"\" Save(QuadratureFunction self, std::ostream & out) Save(QuadratureFunction self,", "self, char const * fname, int precision=16) Save(GridFunction self, char", "int num_pieces) -> GridFunction __init__(GridFunction self, FiniteElementSpace fes, Vector v,", "\"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv):", "return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr,", "self, int i, IntegrationRule ir, Vector laps, int vdim=1) GetLaplacians(GridFunction", "\"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args):", "_swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self,", "*args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const", "value) elif hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self, name,", "f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self,", "SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream", "GetVectorValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr)", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self, int", "mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient", "to a SWIG wrapped class - a slimmed down version", "Coefficient exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0)", "ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self, double const p, Coefficient exsol,", "not make changes to this file unless you know what", "mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error,", "std::ostream __lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream & \"\"\"", "GetFaceVectorValues(self, i, side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i,", "T, IntegrationRule ir, Vector vals, int comp=0, DenseMatrix tr=None) \"\"\"", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient", "Import the low-level C/C++ module if __package__ or \".\" in", "int norm_type) -> double ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad,", "def ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient", "= _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream &", "double ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[]", "* StringIO) -> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream =", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError", "RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self,", "def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr)", "= _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self, int i,", "self, double h, int p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h,", "ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient", "\"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char", "r\"\"\" GetVectorValue(GridFunction self, int i, IntegrationPoint ip, Vector val) GetVectorValue(GridFunction", "-> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def", "import mfem._par.matrix import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat", "irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) ->", "exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient", "_swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name == \"thisown\": self.this.own(value)", "\"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad,", "double const p, Coefficient exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule", "*args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self,", "new attributes) for a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref", "mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy", "i, IntegrationPoint ip, Vector val) GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs)", "*args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction", "-> double ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[]", "mfem::Coefficient *[] coeff, intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient", "SWIG wrapped class - a slimmed down version of six.add_metaclass\"\"\"", "return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\" GetValue(GridFunction", "const *[] irs=0) -> double ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule", "type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self,", "x, v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def", "Coefficient ell_coef, double Nu, int norm_type) -> double ComputeH1Error(GridFunction self,", "exsol, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self,", "*args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self,", "int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) #", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError", "double Nu, int norm_type) -> double ComputeH1Error(GridFunction self, Coefficient exsol,", "return self def __imul__(self, v): ret = _gridfunc.GridFunction_imul(self, v) ret.thisown", "ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0, intArray elems=None)", "import mfem._par.coefficient import mfem._par.globals import mfem._par.matrix import mfem._par.operators import mfem._par.intrules", "return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction", "T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register", "*args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction", "_gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self,", "* file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ =", "*args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction self,", "wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain) ComputeFlux", "= _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh,", "mfem::IntegrationRule const *[] irs=0, intArray elems=None) -> double \"\"\" return", "= _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction self, double c) ->", "GridFunction u, GridFunction flux, Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1,", "*args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self,", "GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\"", "vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction", "_gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC =", "was automatically generated by SWIG (http://www.swig.org). # Version 4.0.2 #", "r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def", "= _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int", "x, v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr CONSTANT", "tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians)", "-> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__", "return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction", "GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self, ElementTransformation", "mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import mfem._par.table import", "DenseMatrix vals, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues =", "mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args)", "int with_subdomains=1, bool with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux,", "comp, int der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der)", "mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\" thisown =", "return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv,", "orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom =", "c) -> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub)", "interface file instead. from sys import version_info as _swig_python_version_info if", "ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self, double", "excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0)", "to enforce nondynamic attributes (no new attributes) for a class\"\"\"", "r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef, double", "return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\"", "weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self,", "int vdim=1) GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps,", "int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr, int comp=0)\"\"\"", "x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def __init__(self, m,", "vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self,", "__repr__ = _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H", "*[] gf_array, int num_pieces) -> GridFunction __init__(GridFunction self, FiniteElementSpace fes,", "Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0) -> double", "import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import mfem._par.table import mfem._par.hash", "self.this.__repr__() except __builtin__.Exception: strthis = \"\" return \"<%s.%s; %s >\"", "tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs =", "to this file unless you know what you are doing--modify", "= _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction c) ->", "GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args):", "vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction", "_gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) ->", "ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) ->", "i, IntegrationPoint ip, int vdim=1) -> double GetValue(GridFunction self, ElementTransformation", "flag\") __repr__ = _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H", "*args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self,", "double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def", "_gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\" __init__(GridFunction self) -> GridFunction __init__(GridFunction", "if __package__ or \".\" in __name__: from . import _gridfunc", "exsol, VectorCoefficient exgrad, Coefficient ell_coef, double Nu, int norm_type) ->", "return _gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16):", "def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def", "_swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector", "_swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis", "out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL)", "ElementTransformation tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient =", "*args): r\"\"\" ComputeElementLpErrors(GridFunction self, double const p, Coefficient exsol, Vector", "idx, Vector values) GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction", "bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self,", "_swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\" GetValues(GridFunction self, int i, IntegrationRule", "*args): r\"\"\" GetVectorValue(GridFunction self, int i, IntegrationPoint ip, Vector val)", "flux, Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False) ->", "mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import", "= _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction self, double c) ->", "c): r\"\"\"idiv(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c)", "Save(GridFunction self, char const * fname, int precision=16) Save(GridFunction self,", "VectorCoefficient exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule const *[] irs=0)", "= _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr,", "self def __isub__(self, v): ret = _gridfunc.GridFunction_isub(self, v) ret.thisown =", "exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction", "def __imul__(self, v): ret = _gridfunc.GridFunction_imul(self, v) ret.thisown = 0", "_gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction", "sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret =", "dofs, int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient", "# # Do not make changes to this file unless", "ir, DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients)", "= _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\" isub(GridFunction self, GridFunction c)", "Mesh mesh, std::istream & _in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args))", "a metaclass to a SWIG wrapped class - a slimmed", "tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self,", "ret = _gridfunc.GridFunction_imul(self, v) ret.thisown = 0 return self GridFunction.__iadd__", "intArray elems=None) -> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error =", "def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const", "**kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self,", "double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule", "def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff,", "mesh) -> std::ostream __lshift__(std::ostream & out, GridFunction sol) -> std::ostream", "r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace)", "r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda x: x.this.own(),", "*args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff,", "= _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction", "= \"proxy of \" + self.this.__repr__() except __builtin__.Exception: strthis =", "def __init__(self, *args): r\"\"\" __init__(GridFunction self) -> GridFunction __init__(GridFunction self,", "min_=0.0, double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds)", "*[] irs=0) -> double ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule", "int idx, DenseMatrix values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values)", "QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_,", "def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)\"\"\"", "max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self):", "return _gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\"", "c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\" Save(GridFunction self,", "*[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def", "der_comp, der): r\"\"\"GetDerivative(GridFunction self, int comp, int der_comp, GridFunction der)\"\"\"", "r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args):", "VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self,", "JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0) -> double ComputeDGFaceJumpError(GridFunction self,", "vec.thisown = 0 return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def", "c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def", "grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr,", "double value) -> GridFunction Assign(GridFunction self, Vector v) -> GridFunction", "return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux, error_estimates,", "return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\"", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return", "IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args)", "ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient", "self, Coefficient exsol, VectorCoefficient exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule", "exsol, VectorCoefficient exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule const *[]", "_swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls, name) and not", "attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args)", "_gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad):", "*args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling,", "DenseMatrix hess, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args)", "tv, int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef)", "mfem::JumpScaling class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x, v:", "*[] irs=0) -> double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff,", "const * file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ", "orig) -> GridFunction __init__(GridFunction self, FiniteElementSpace f) -> GridFunction __init__(GridFunction", "mfem._par.vector import mfem._par.coefficient import mfem._par.globals import mfem._par.matrix import mfem._par.operators import", "weights, double min_=0.0, double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds", "Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args)", "__package__ or \".\" in __name__: from . import _gridfunc else:", "GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\" GetValues(GridFunction self, int", "= _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes =", "def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func)", "-> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError)", "\"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args):", "Vector values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) GetElementValues(QuadratureFunction self,", "sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol, int const", "CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def", "= _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side, ir, vals, tr, vdim=1):", "__init__(GridFunction self, Mesh m, mfem::GridFunction *[] gf_array, int num_pieces) ->", "ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction", "ir, Vector vals, int comp=0, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self,", "SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def", "ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs,", "const *[] irs=0) -> double ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol,", "in __name__: from . import _gridfunc else: import _gridfunc try:", "r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace)", "Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args)", "GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)\"\"\" return", "def Save(self, *args): r\"\"\" Save(QuadratureFunction self, std::ostream & out) Save(QuadratureFunction", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol,", "tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i, int side, IntegrationRule ir, DenseMatrix", "p, Coefficient exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule const *[]", "self, Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef, double Nu, int", "& os, SparseMatrix mat) -> std::ostream __lshift__(std::ostream & out, Mesh", "GridFunction __init__(GridFunction self, FiniteElementSpace f, double * data) -> GridFunction", "-> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self,", "property): set(cls, name, value) else: raise AttributeError(\"You cannot add class", "*args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector", "GridFunction c) -> GridFunction isub(GridFunction self, double c) -> GridFunction", "irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self,", "v_weight=None, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors", "r\"\"\"imul(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul", "def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv)", "elems=None, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol,", "r\"\"\" Assign(GridFunction self, GridFunction rhs) -> GridFunction Assign(GridFunction self, double", "Assign(GridFunction self, Vector v) -> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args)", "ret = _gridfunc.GridFunction_isub(self, v) ret.thisown = 0 return self def", "ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient", "DenseMatrix tr, int vdim=1) GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir,", "SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self, char const * file, int", "r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO) -> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self,", "StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction", "error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i,", "der): r\"\"\"GetDerivative(GridFunction self, int comp, int der_comp, GridFunction der)\"\"\" return", "_gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) ->", "_gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator", "r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace \"\"\"", "def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const * file,", "ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p,", "s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient s, int n_)", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return", "(no new attributes) for a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import", "if name == \"thisown\": self.this.own(value) elif name == \"this\": set(self,", "GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace", "return _gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\"", "r\"\"\"SaveGZ(QuadratureFunction self, char const * file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self,", "class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\" thisown = property(lambda", "JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h, p): r\"\"\"Eval(JumpScaling self,", "VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args)", "SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return", "*args): r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig)", "u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def", "0 return self def __idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self, v)", "file, precision=16): r\"\"\"SaveGZ(GridFunction self, char const * file, int precision=16)\"\"\"", "% (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value):", "VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray", "mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args)", "vals, DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir, DenseMatrix", "r\"\"\" __init__(GridFunction self) -> GridFunction __init__(GridFunction self, GridFunction orig) ->", "return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\"", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol,", "exgrad, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self,", "Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream &", "comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix", "r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient", "ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[]", "= _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return", "* file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ =", "cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class to enforce nondynamic", "i, side, ir, vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self,", "qf_data, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, Mesh mesh, std::istream", "return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self,", "gf_array, int num_pieces) -> GridFunction __init__(GridFunction self, FiniteElementSpace fes, Vector", "import builtins as __builtin__ except ImportError: import __builtin__ _swig_new_instance_method =", "self) -> GridFunction __init__(GridFunction self, GridFunction orig) -> GridFunction __init__(GridFunction", "= _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el,", "%s\" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name,", "return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args):", "exsol, exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient", "vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent)", "def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC =", "Coefficient coeff, intArray dofs, int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff)", "GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, Vector val, Vector tr=None)", "VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient =", "IntegrationRule ir, Vector vals, int vdim=1) GetValues(GridFunction self, int i,", "VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return", "def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self, int i, IntegrationRule ir,", "r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence", "def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self, int i, IntegrationRule ir,", "def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src)", "= _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self) -> Vector", "irs=0) -> double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, double", "*args) def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int", "r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction gf1, GridFunction gf2) -> double\"\"\"", "self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def", "GridFunction isub(GridFunction self, double c) -> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self,", "-> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def", "__init__(GridFunction self, FiniteElementSpace fes, Vector v, int offset) -> GridFunction", "Assign(GridFunction self, GridFunction rhs) -> GridFunction Assign(GridFunction self, double value)", "Vector if len(args) == 1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec,", "FESpace(GridFunction self) -> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace =", "wrapped class - a slimmed down version of six.add_metaclass\"\"\" def", "v): ret = _gridfunc.GridFunction_imul(self, v) ret.thisown = 0 return self", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol,", "__init__(QuadratureFunction self, Mesh mesh, std::istream & _in) -> QuadratureFunction \"\"\"", "import version_info as _swig_python_version_info if _swig_python_version_info < (2, 7, 0):", "def _swig_repr(self): try: strthis = \"proxy of \" + self.this.__repr__()", "GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args):", "mfem._par.vtk import mfem._par.element import mfem._par.table import mfem._par.hash import mfem._par.vertex import", "Vector base, int offset) MakeRef(GridFunction self, FiniteElementSpace f, double *", "GridFunction __init__(GridFunction self, Mesh m, mfem::GridFunction *[] gf_array, int num_pieces)", "return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self) ->", "GetValue(GridFunction self, int i, IntegrationPoint ip, int vdim=1) -> double", "i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation", "*args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self,", "bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self,", "int vdim=1) -> double GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip,", "GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\"", "r\"\"\" GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, int", "_swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self, char const *", "exgrad, Coefficient ell_coef, double Nu, int norm_type) -> double ComputeH1Error(GridFunction", "def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type):", "0 return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val,", "exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule", "C++ mfem::JumpScaling class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x,", "ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown = 0 return self def", "class JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\" thisown = property(lambda", "_gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args,", "mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import", "import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\" thisown", "def __iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown = 0", "tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self,", "with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux, Vector error_estimates,", "self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def", "SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx)", "flux, wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\"", "v): ret = _gridfunc.GridFunction_isub(self, v) ret.thisown = 0 return self", "\"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args):", "ip, Vector val, Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue", "_gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self,", "Eval(self, h, p): r\"\"\"Eval(JumpScaling self, double h, int p) ->", "bool wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain)", "fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner =", "_gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return", "import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import mfem._par.geom", "_gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues)", "const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError", "Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling # Register JumpScaling in", "IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction self, int const elem, IntegrationRule", "by SWIG (http://www.swig.org). # Version 4.0.2 # # Do not", "GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self, int", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad,", "side, ir, vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args):", "offset) MakeRef(GridFunction self, FiniteElementSpace f, double * v) MakeRef(GridFunction self,", "type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self,", "Vector values) GetElementValues(QuadratureFunction self, int idx, int const ip_num, Vector", "flag\") __repr__ = _swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection", "set(self, name, value) else: raise AttributeError(\"You cannot add instance attributes", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self,", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector", "= _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\" GetValue(GridFunction self, int i,", "from .vector import Vector if len(args) == 1: vec =", "vals, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues)", "irs=0) -> double ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const", "intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def", "= 0 return self def __imul__(self, v): ret = _gridfunc.GridFunction_imul(self,", "fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction", "Version 4.0.2 # # Do not make changes to this", "cannot add class attributes to %s\" % cls) return set_class_attr", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule", "_gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\"", "-> GridFunction __init__(GridFunction self, FiniteElementSpace f, double * data) ->", "wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class to enforce nondynamic attributes (no", "SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace", "vals, int vdim=1) GetValues(GridFunction self, int i, IntegrationRule ir, Vector", "irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self,", "_gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None,", "self, GridFunction rhs) -> GridFunction Assign(GridFunction self, double value) ->", "vals, DenseMatrix tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir,", "_gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def", "orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom =", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self, int comp,", "-> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self,", "QuadratureSpace qspace_, double * qf_data, int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self,", "i, ir, vals, tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self):", "= _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis =", "r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction)", "attributes to %s\" % cls) return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class", "< (2, 7, 0): raise RuntimeError(\"Python 2.7 or later required\")", "norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, int", "\"\" return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,) def", "doc=\"The membership flag\") __repr__ = _swig_repr def __init__(self, m, s,", "ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule const", "r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim)", "GetNodalValues(i, array<dobule>, vdim) ''' from .vector import Vector if len(args)", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol,", "doc=\"The membership flag\") __repr__ = _swig_repr def __init__(self, *args): r\"\"\"", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray", "_swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream &", "Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) \"\"\" return", "_gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\" FESpace(GridFunction self)", "VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self) ->", "_gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis = \"proxy of \" +", "SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_,", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError)", "-> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self,", "const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors)", "\"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self)", "self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double \"\"\"", "import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import mfem._par.geom import mfem._par.fespace", "a slimmed down version of six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__,", "double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv)", "self, std::ostream & out) Save(GridFunction self, char const * fname,", "attributes) for a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import", "isub(GridFunction self, double c) -> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args)", "-> GridFunction isub(GridFunction self, double c) -> GridFunction \"\"\" return", "vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction", "-> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def", "self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv =", "import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element", "elems=None) -> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error)", "self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent", "DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians =", "_gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\" isub(GridFunction", "mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def", "norm_type, intArray elems=None, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "_swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule", "side, ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i, int", "FiniteElementSpace f, double * tv) MakeTRef(GridFunction self, FiniteElementSpace f, Vector", "mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\" return", "const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error", "r\"\"\" Save(GridFunction self, std::ostream & out) Save(GridFunction self, char const", "_swig_repr def __init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m,", "Save(GridFunction self, std::ostream & out) Save(GridFunction self, char const *", "_gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def", "GetGradients(GridFunction self, int const elem, IntegrationRule ir, DenseMatrix grad) \"\"\"", "r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule", "irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction", "vcoeff, int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff) \"\"\" return", "exgrad, norm_type, elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args):", "GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None)", "Vector error, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction self,", "_in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def", "QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1)", "raise AttributeError(\"You cannot add instance attributes to %s\" % self)", "= _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update =", "\"\"\" return _gridfunc.GridFunction_isub(self, *args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c):", "const *[] irs=0, intArray elems=None) -> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self,", "r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux, Vector error_estimates, intArray aniso_flags=None,", "aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux, Vector", "return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector", "GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim) ''' from .vector import Vector", "os, SparseMatrix mat) -> std::ostream __lshift__(std::ostream & out, Mesh mesh)", "mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self, VectorCoefficient exsol,", "Vector values) GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction self,", "*[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC", "doc=\"The membership flag\") __repr__ = _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H", "self, ElementTransformation tr, IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction self, int", "_gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0):", "= _gridfunc.delete_JumpScaling # Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector):", "return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args):", "SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const * file, int", "HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient", "-> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def", "= property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership", "= _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self, ElementTransformation tr,", "bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self,", "std::ostream & \"\"\" return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi,", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient", "in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh,", "SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self,", "double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self,", "= _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return", "def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim =", "self, GridFunction orig) -> GridFunction __init__(GridFunction self, FiniteElementSpace f) ->", "adding a metaclass to a SWIG wrapped class - a", "idx, DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues)", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self,", "= _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H =", "RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\"", "vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal)", "_gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown =", "_gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\" thisown =", "n_)) def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol,", "r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad)", "self, FiniteElementSpace f, Vector v, int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self,", "r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "Vector vals, DenseMatrix tr, int vdim=1) GetValues(GridFunction self, ElementTransformation T,", "precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction)", "r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def", "__init__(GridFunction self, FiniteElementSpace f) -> GridFunction __init__(GridFunction self, FiniteElementSpace f,", "weight=None, mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction self, double const p,", "this file unless you know what you are doing--modify #", "GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr)", "ElementTransformation tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient =", "r\"\"\"GetFaceVectorValues(GridFunction self, int i, int side, IntegrationRule ir, DenseMatrix vals,", "ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "_gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h, p): r\"\"\"Eval(JumpScaling self, double", "r\"\"\"Eval(JumpScaling self, double h, int p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self,", "-> Vector GetTrueVector(GridFunction self) -> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args)", "r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, int norm_type, intArray elems=None,", "name), property): set(self, name, value) else: raise AttributeError(\"You cannot add", "const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError", "exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule const *[] irs=0) ->", "base, int offset) MakeRef(GridFunction self, FiniteElementSpace f, double * v)", "irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError)", "num_pieces) -> GridFunction __init__(GridFunction self, FiniteElementSpace fes, Vector v, int", "IntegrationRule ir, DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients =", "VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeCurlError(self,", "else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self,", "= _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO)", "v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr CONSTANT =", "irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self,", "GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector", "tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self,", "ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff,", "der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation", "def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self, Vector base, int offset,", "_swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC", "int el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues =", "int p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p) Eval =", "int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int", "irs=0) -> double ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[]", "-> double GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, int comp=0,", "ir, DenseMatrix hess, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self,", "= _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in _gridfunc:", "r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) ->", "isub(GridFunction self, GridFunction c) -> GridFunction isub(GridFunction self, double c)", "p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval)", "double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def", "values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self,", "def __init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient", "of \" + self.this.__repr__() except __builtin__.Exception: strthis = \"\" return", "def idiv(self, c): r\"\"\"idiv(GridFunction self, double c) -> GridFunction\"\"\" return", "double * data) -> GridFunction __init__(GridFunction self, Mesh m, std::istream", "doing--modify # the SWIG interface file instead. from sys import", "_gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): ''' GetNodalValues(i) ->", "C++ mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x,", "iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self,", "% self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value):", "int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def", "vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i, int side, IntegrationRule", "_swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\" Save(QuadratureFunction self, std::ostream & out)", "_swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self,", "r\"\"\"SaveGZ(GridFunction self, char const * file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self,", "add instance attributes to %s\" % self) return set_instance_attr def", "ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient", "MakeTRef(GridFunction self, FiniteElementSpace f, double * tv) MakeTRef(GridFunction self, FiniteElementSpace", "= _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr,", "GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction", "precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register", "const *[] irs=0) -> double ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule", "import mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre", "return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0):", "r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "mfem::GridFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x, v:", "flux, error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p,", "_gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self,", "iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\" isub(GridFunction self, GridFunction", "Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self,", "exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule const *[] irs=0) ->", "-> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction", "int const elem, IntegrationRule ir, DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self,", "mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import", "*args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef,", "irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError)", "_gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction self,", "def GetVectorFieldValues(self, i, ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int", "self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal", "def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f)", "self, Vector v) -> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign", "strthis = \"proxy of \" + self.this.__repr__() except __builtin__.Exception: strthis", "Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++", "array<dobule>, vdim) ''' from .vector import Vector if len(args) ==", "int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self):", "C++ mfem::QuadratureFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x,", "DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def", "mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol, int", "r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl)", "MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace", "mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import", "_swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis = \"proxy of", "class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array import mfem._par.mem_manager", "VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim)", "import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import mfem._par.restriction", "c) -> GridFunction isub(GridFunction self, double c) -> GridFunction \"\"\"", "self, std::ostream & out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine)", "what you are doing--modify # the SWIG interface file instead.", "error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) \"\"\"", "r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src):", "_gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self,", "r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr)", "_SwigNonDynamicMeta(type): \"\"\"Meta class to enforce nondynamic attributes (no new attributes)", "vdim=1) GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir, Vector vals, int", "RuntimeError(\"Python 2.7 or later required\") # Import the low-level C/C++", "def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip)", "value): if hasattr(cls, name) and not isinstance(getattr(cls, name), property): set(cls,", "r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[]", "import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans", "_swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\" __init__(GridFunction self)", "self, int i, Vector weights, Vector lo_, Vector hi_) ImposeBounds(GridFunction", "bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self,", "IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation T,", "ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self,", "_gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction", "r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr)", "_gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction", "__builtin__ except ImportError: import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method =", "Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def", "= _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self, int i,", "exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const", "= _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): ''' GetNodalValues(i) -> GetNodalValues(vector, vdim)", "self, char const * file, int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self,", "v) ret.thisown = 0 return self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__", "*args): r\"\"\" GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals,", "import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self):", "ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol,", "Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol,", "i, IntegrationRule ir, DenseMatrix hess, DenseMatrix tr, int vdim=1) \"\"\"", "return _gridfunc.JumpScaling_Eval(self, h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling", "r\"\"\" GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction self) -> Vector \"\"\"", "mfem._par.array import mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient import mfem._par.globals import", "_swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): ''' GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i,", "set_instance_attr(self, name, value): if name == \"thisown\": self.this.own(value) elif name", "GridFunction Assign(GridFunction self, Vector v) -> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self,", "= _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try: strthis = \"proxy of \"", "GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, int vdim=1)", "import mfem._par.eltrans import mfem._par.fe import mfem._par.geom import mfem._par.fespace import mfem._par.mesh", "def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)\"\"\"", "r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) ->", "v, int offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self,", "ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming", "GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector grad)\"\"\" return", "value): if name == \"thisown\": self.this.own(value) elif name == \"this\":", "Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0)", "*args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0)", "def FESpace(self, *args): r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction self)", "Coefficient ell_coeff, double Nu, mfem::IntegrationRule const *[] irs=0) -> double", "-> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self,", "vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, Mesh mesh, std::istream & _in)", "sol, int const ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol,", "ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[]", "self, int i, IntegrationRule ir, Vector vals, DenseMatrix tr, int", "def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self, int i, IntegrationRule ir,", "self, double const p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None,", "int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp) GetVectorFieldValues", "own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace =", "self, double c) -> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args) isub", "self, Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[]", "*args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self)", "make changes to this file unless you know what you", "Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs, int vd=0)", "file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ)", "qf) -> std::ostream & \"\"\" return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__", "self, ElementTransformation T, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None) \"\"\"", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector", "_swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side, ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction", "membership flag\") __repr__ = _swig_repr def __init__(self, *args): r\"\"\" __init__(QuadratureFunction", "T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double\"\"\"", "WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction c)", "GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, int vdim=1)", "-> FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args)", "PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self,", "def __lshift__(*args): r\"\"\" __lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream", "* gf_file, int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile", "return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl,", "excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol,", "double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ =", "r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp)", "ir, Vector vals, DenseMatrix tr, int vdim=1) -> int\"\"\" return", "tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction", "_swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction self, double c) -> GridFunction\"\"\"", "TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self, *args):", "to %s\" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls,", "IntegrationPoint ip, Vector val, Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args)", "_gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation", "self, VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff)", "std::ostream __lshift__(std::ostream & out, GridFunction sol) -> std::ostream __lshift__(std::ostream &", "Nu, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self,", "tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs =", "r\"\"\"SaveSTL(GridFunction self, std::ostream & out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out,", "f, double * tv) MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv,", "r\"\"\"Proxy of C++ mfem::GridFunction class.\"\"\" thisown = property(lambda x: x.this.own(),", "return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class to enforce nondynamic attributes", "-> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args):", "int i, IntegrationRule ir, DenseMatrix hess, DenseMatrix tr, int vdim=1)", "int vdim=1) GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir, Vector vals,", "_gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace", "= __iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__ = __isub__ GridFunction.__imul__ =", "GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling", "2.7 or later required\") # Import the low-level C/C++ module", "Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self,", "int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self,", "irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error)", "exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule", "own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx):", "int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, Mesh mesh, std::istream &", "*[] irs=0) ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient exsol, Vector", "\"proxy of \" + self.this.__repr__() except __builtin__.Exception: strthis = \"\"", "def ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient", "_gridfunc.JumpScaling_Eval(self, h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling #", "_gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient #", "int der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative", "ImposeBounds(GridFunction self, int i, Vector weights, Vector lo_, Vector hi_)", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule", "i, side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i, int", "IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self,", "mfem::IntegrationRule const *[] irs=0) -> double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol,", "= _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\" __init__(GridFunction", "self, std::ostream & out, std::string const & field_name, int ref)\"\"\"", "ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol, int const ny)", "mfem._par.globals import mfem._par.matrix import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import", "import mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::GridFunction", "and isinstance(getattr(type(self), name), property): set(self, name, value) else: raise AttributeError(\"You", "ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double", "*args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction", "import mfem._par.handle import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform", "irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self,", "vals, DenseMatrix tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals,", "gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient", "out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self,", "orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i,", "orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir, vals, tr,", "_swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction", "tr, int vdim=1) GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir, Vector", "-> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self, double const p,", "Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction self,", "FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self,", "-> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_):", "\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff,", "i, int side, IntegrationRule ir, Vector vals, DenseMatrix tr, int", "def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs)", "VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self,", "const *[] irs=0) -> double ComputeLpError(GridFunction self, double const p,", "avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el,", "int const ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, int", "double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff) ZZErrorEstimator", "ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) ->", "const p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const", "_gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def", "GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, int comp=0, Vector tr=None)", "AttributeError(\"You cannot add class attributes to %s\" % cls) return", "\"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args):", "VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream & out, std::string const &", "+ self.this.__repr__() except __builtin__.Exception: strthis = \"\" return \"<%s.%s; %s", "mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self, mfem::Coefficient *[]", "_swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self, Vector base, int", "m, std::istream & input) -> GridFunction __init__(GridFunction self, Mesh m,", "mfem._par.matrix import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import", "aniso_flags=None, int with_subdomains=1, bool with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u,", "ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self,", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction", "mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors =", "int comp, int der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp,", "import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg", "ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient", "m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient s, int", "_gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown = 0 return vec.GetDataArray() else: return", "return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC", "_swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self, int idx, Vector", "GetDerivative(self, comp, der_comp, der): r\"\"\"GetDerivative(GridFunction self, int comp, int der_comp,", "*args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction self, Coefficient coeff,", "tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl)", "side, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1) ->", "self, double const p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const", "*args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\" GetValues(GridFunction self,", "ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef, double Nu,", "v) MakeRef(GridFunction self, FiniteElementSpace f, Vector v, int v_offset) \"\"\"", "_gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0):", "_gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction", "class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda", "def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self, double const p, Coefficient", "tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient)", "GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self,", "OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\"", "r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args):", "Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self,", "irs=0) -> double ComputeLpError(GridFunction self, double const p, VectorCoefficient exsol,", "_swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix", "qspace_, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, double", "char const * fname, int precision=16) Save(GridFunction self, char const", "int i, GridFunction gf1, GridFunction gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p,", "def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction self,", "Save(GridFunction self, char const * file, int precision=16) \"\"\" return", "c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction self, double", "self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction", "__lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream __lshift__(std::ostream & out,", "= _gridfunc.GridFunction_idiv(self, v) ret.thisown = 0 return self def __imul__(self,", "_gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args):", "module if __package__ or \".\" in __name__: from . import", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\" Assign(GridFunction self, GridFunction rhs) ->", "Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def", "exsol, Vector error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[]", "double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ =", "out) Save(QuadratureFunction self, char const * file, int precision=16) \"\"\"", "*[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const", "*[] irs=0) -> double ComputeLpError(GridFunction self, double const p, VectorCoefficient", "GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim) ''' from .vector", "mesh2d, sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret", "vdim) GetNodalValues(i, array<dobule>, vdim) ''' from .vector import Vector if", "ElementTransformation tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence)", "irs=0) -> double ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[]", "import mfem._par.vector import mfem._par.coefficient import mfem._par.globals import mfem._par.matrix import mfem._par.operators", "precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register", "avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages =", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff) ProjectCoefficient(GridFunction", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule", "SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def GetValue(self, *args): r\"\"\" GetValue(GridFunction self, int", "tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side, ir,", "_gridfunc.GridFunction_iadd(self, v) ret.thisown = 0 return self def __isub__(self, v):", "-> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff)", "_swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream & out,", "hasattr(cls, name) and not isinstance(getattr(cls, name), property): set(cls, name, value)", "self, ElementTransformation tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient", "DenseMatrix tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr,", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray", "GetElementValues(QuadratureFunction self, int idx, int const ip_num, Vector values) GetElementValues(QuadratureFunction", "Mesh m, Coefficient s, int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m,", "el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self,", "r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs)", "QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction class.\"\"\" thisown = property(lambda x:", "GetTrueVector(GridFunction self) -> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector =", "ElementTransformation tr, IntegrationRule ir, DenseMatrix grad) GetGradients(GridFunction self, int const", "return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip) Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient", "ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff,", "ret.thisown = 0 return self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ =", "import mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of", "grad) GetGradients(GridFunction self, int const elem, IntegrationRule ir, DenseMatrix grad)", "return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\"", "gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self,", "changes to this file unless you know what you are", "-> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def", "_swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction c) -> GridFunction\"\"\"", "int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) #", "def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) -> double\"\"\" return", "mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg class", "self, ElementTransformation tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError)", "SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\"", "irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self,", "c): r\"\"\"imul(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c)", "ir, vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\"", "def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return _gridfunc.GridFunction_RestrictConforming(self) RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def", "self, VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int", "int i, Vector weights, Vector lo_, Vector hi_) ImposeBounds(GridFunction self,", "return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction", "def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv)", "mesh, Mesh mesh2d, GridFunction sol, int const ny) -> GridFunction\"\"\"", "and not isinstance(getattr(cls, name), property): set(cls, name, value) else: raise", "_gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return", "__init__(GridFunction self, Mesh m, std::istream & input) -> GridFunction __init__(GridFunction", "exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0)", "def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector) def", "GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self, int", "value) -> GridFunction Assign(GridFunction self, Vector v) -> GridFunction \"\"\"", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) ->", "tr=None) -> double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue)", "GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes", "c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def", "self def __imul__(self, v): ret = _gridfunc.GridFunction_imul(self, v) ret.thisown =", "required\") # Import the low-level C/C++ module if __package__ or", "vdim) ''' from .vector import Vector if len(args) == 1:", "x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT", "Coefficient exsol, VectorCoefficient exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule const", "*args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction self,", "FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\"", "ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir,", "ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const *[]", "-> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def", "ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs)", "i, ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule", "*args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC", "*args): r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const", "error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors", "self, PyObject * StringIO) -> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO)", "mfem._par.element import mfem._par.table import mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll import", "VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff) \"\"\"", "flag\") __repr__ = _swig_repr def __init__(self, *args): r\"\"\" __init__(QuadratureFunction self)", "return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls,", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self, int idx,", "\"\"\" return _gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file,", "down version of six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())", "r\"\"\" ComputeElementLpErrors(GridFunction self, double const p, Coefficient exsol, Vector error,", "def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self, double const p, Coefficient", "of C++ mfem::QuadratureFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda", "r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad)", "idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def", "ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def ComputeLpError(self, *args): r\"\"\" ComputeLpError(GridFunction self, double", "mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++", "class to enforce nondynamic attributes (no new attributes) for a", "GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\"", "def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const", "GridFunction flux, bool wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux,", "DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def", "def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const", "-> GridFunction Assign(GridFunction self, Vector v) -> GridFunction \"\"\" return", "version of six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return", "ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const *[]", "bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff,", "-> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own):", "_gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy", "r\"\"\"iadd(GridFunction self, GridFunction c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd", "\"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args):", "tv) GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self)", "int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr) GetFaceVectorValues =", "hess, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetHessians(self, *args) GetHessians", "double Nu, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return", "-> GridFunction __init__(GridFunction self, Mesh m, std::istream & input) ->", "= _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False):", "def iadd(self, c): r\"\"\"iadd(GridFunction self, GridFunction c) -> GridFunction\"\"\" return", "Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction self,", "return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self)", "orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func):", "return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction", "vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int i, int side, IntegrationRule ir,", "v) -> GridFunction \"\"\" return _gridfunc.GridFunction_Assign(self, *args) Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign)", "i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr, int comp=0)\"\"\" return", "\"\"\" return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux,", "_gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val,", "return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self)", "ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda x:", "& out, GridFunction sol) -> std::ostream __lshift__(std::ostream & out, QuadratureFunction", "tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self,", "def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_)", "GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, int vdim=1)", "ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs):", ". import _gridfunc else: import _gridfunc try: import builtins as", "irs=0) -> double ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const", "SparseMatrix mat) -> std::ostream __lshift__(std::ostream & out, Mesh mesh) ->", "norm_type) -> double ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule", "vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side, ir, vals,", "attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self,", "_gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self,", "own) SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int", "self, GridFunction c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd =", "return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl):", "vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self,", "GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative)", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient", "1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown = 0", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const", "return self def __idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown", "*args): r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace", "self, Vector val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues", "\"this\": set(self, name, value) elif hasattr(self, name) and isinstance(getattr(type(self), name),", "comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self)", "metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class _SwigNonDynamicMeta(type): \"\"\"Meta class to", "*args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side, ir, vals,", "exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self, VectorCoefficient", "self, Coefficient exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule const *[]", "self, FiniteElementSpace f, double * data) -> GridFunction __init__(GridFunction self,", "= _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args): r\"\"\" GetVectorValue(GridFunction self, int i,", "grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction self,", "_gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self,", "-> GridFunction __init__(GridFunction self, FiniteElementSpace fes, Vector v, int offset)", "exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError", "comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val,", "_gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side, ir,", "if _swig_python_version_info < (2, 7, 0): raise RuntimeError(\"Python 2.7 or", "self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def", "ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction", "i, GridFunction gf1, GridFunction gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i,", "self, int i, IntegrationRule ir, DenseMatrix hess, int vdim=1) GetHessians(GridFunction", "self, int el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals) GetElementDofValues", "def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\"", "ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient", "c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def", "r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) ->", "intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi,", "self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def", "ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs, int vd=0) ProjectCoefficient(GridFunction self,", "import Vector if len(args) == 1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self,", "= _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown", "blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux,", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self,", "*args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\" Save(QuadratureFunction self,", "with_subdomains=1, bool with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates,", "irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, int norm_type, intArray", "FiniteElementSpace f, Vector tv, int tv_offset) \"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args)", "ElementTransformation T, IntegrationRule ir, Vector vals, int comp=0, DenseMatrix tr=None)", "return _gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\"", "return _gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16):", "mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors =", "import mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle", "*args) isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction self, double", "*args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self,", "or later required\") # Import the low-level C/C++ module if", "def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_)", "# This file was automatically generated by SWIG (http://www.swig.org). #", "-> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim) ''' from .vector import", "property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")", "_swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy", "len(args) == 1: vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\" Save(QuadratureFunction self, std::ostream &", "def VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim =", "type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h, p):", "_gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\" GetValues(GridFunction", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs)", "idx, int const ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx,", "*[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const", "int i, IntegrationRule ir, DenseMatrix hess, int vdim=1) GetHessians(GridFunction self,", "p, VectorCoefficient exsol, Vector error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule", "ir, DenseMatrix vals, DenseMatrix tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i,", "file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const * file, int precision=16)\"\"\"", "self, VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff,", "r\"\"\" ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[]", "r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def", "_gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self, T, ip): r\"\"\"Eval(ExtrudeCoefficient self,", "out, field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream & out, std::string const", "MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction self,", "*args): r\"\"\" isub(GridFunction self, GridFunction c) -> GridFunction isub(GridFunction self,", "double ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0)", "* file, int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save =", "int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff,", "const * file, int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args) Save", "vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray attr)", "-> std::ostream & \"\"\" return _gridfunc.__lshift__(*args) __lshift__ = _gridfunc.__lshift__ def", "__lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream & \"\"\" return", "*[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def", "GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom", "__lshift__ = _gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1,", "r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule", "DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians =", "int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__", "const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def", "-> double ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0)", "ir, DenseMatrix hess, int vdim=1) GetHessians(GridFunction self, int i, IntegrationRule", "= _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr,", "for adding a metaclass to a SWIG wrapped class -", "QuadratureFunction __init__(QuadratureFunction self, Mesh mesh, std::istream & _in) -> QuadratureFunction", "return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\"", "= _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self,", "Save(QuadratureFunction self, std::ostream & out) Save(QuadratureFunction self, char const *", "with_coeff) ZZErrorEstimator = _gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double", "vdim=1) GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, DenseMatrix", "def GetFaceVectorValues(self, i, side, ir, vals, tr): r\"\"\"GetFaceVectorValues(GridFunction self, int", "vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray", "bool with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags,", "strthis = \"\" return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__,", "return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol, exgrad,", "double ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0, intArray", "value) else: raise AttributeError(\"You cannot add class attributes to %s\"", "self) -> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace)", "Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol,", "set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator for adding a metaclass to", "self, int comp, int der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp,", "Assign(self, *args): r\"\"\" Assign(GridFunction self, GridFunction rhs) -> GridFunction Assign(GridFunction", "int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def", "return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set):", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\" Assign(GridFunction self, GridFunction rhs)", "_gridfunc else: import _gridfunc try: import builtins as __builtin__ except", "mfem._par.coefficient import mfem._par.globals import mfem._par.matrix import mfem._par.operators import mfem._par.intrules import", "MakeRef(GridFunction self, Vector base, int offset, int size) MakeRef(GridFunction self,", "self, std::ostream & out) Save(QuadratureFunction self, char const * file,", "return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp,", "GridFunction c) -> GridFunction\"\"\" return _gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd)", "self, char const * file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file,", "TimesToRefine=1): r\"\"\"SaveSTL(GridFunction self, std::ostream & out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self,", "r\"\"\" GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction self, int", "return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error)", "def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO) -> PyObject", "const * gf_file, int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision)", "_gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\" thisown", "char const * file, int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args)", "char const * file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision)", "tv) MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv, int tv_offset) \"\"\"", "''' from .vector import Vector if len(args) == 1: vec", "_gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction", "file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in _gridfunc:", "out, std::string const & field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out,", "irs=0) -> double ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[]", "h, int p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p) Eval", "_swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el, Vector", "(http://www.swig.org). # Version 4.0.2 # # Do not make changes", "-> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction self,", "double min_=0.0, double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds =", "Vector vals, DenseMatrix tr, int vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self,", "import mfem._par.array import mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient import mfem._par.globals", "side, ir, vals, tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self,", "return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args):", "vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self,", "elif hasattr(self, name) and isinstance(getattr(type(self), name), property): set(self, name, value)", "r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp)", "el, dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction", "ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient", "GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args)", "_gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0):", "Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False) -> double\"\"\"", "*args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self,", "mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import", "VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self,", "cls) return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator for adding a", "bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self,", "ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK =", "-> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args):", "double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def", "This file was automatically generated by SWIG (http://www.swig.org). # Version", "ir, Vector laps, int vdim=1) GetLaplacians(GridFunction self, int i, IntegrationRule", "r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom)", "_gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\" Save(QuadratureFunction", "GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl) def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation", "you are doing--modify # the SWIG interface file instead. from", "exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double \"\"\"", "lo_, Vector hi_) ImposeBounds(GridFunction self, int i, Vector weights, double", "return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field,", "-> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff,", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args):", "weight=None, mfem::IntegrationRule const *[] irs=0) -> double ComputeLpError(GridFunction self, double", "intArray elems=None, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self,", "exsol, excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\"", "idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule) def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self,", "IntegrationRule ir, Vector laps, DenseMatrix tr, int vdim=1) \"\"\" return", "self, int idx, DenseMatrix values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix", "self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr, int", "__init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction", "self, int idx, Vector values) GetElementValues(QuadratureFunction self, int idx, Vector", "vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self):", "ret.thisown = 0 return self def __imul__(self, v): ret =", "import mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient import mfem._par.globals import mfem._par.matrix", "irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[]", "def GetGradient(self, tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector grad)\"\"\"", "# the SWIG interface file instead. from sys import version_info", "self, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "self, Mesh mesh, std::istream & _in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self,", "_swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array import mfem._par.mem_manager import mfem._par.vector import", "ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0) -> double ComputeDGFaceJumpError(GridFunction", "_gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction", "GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir, Vector vals, int comp=0,", "from sys import version_info as _swig_python_version_info if _swig_python_version_info < (2,", "of six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper", "r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f, double * tv) MakeTRef(GridFunction self,", "_gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error) def ComputeGradError(self, exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction", "jump_scaling, mfem::IntegrationRule const *[] irs=0) -> double ComputeDGFaceJumpError(GridFunction self, Coefficient", "_gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType", "vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i, int side, IntegrationRule ir, Vector", "intArray dofs, int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self,", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs)", "tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals,", "%s\" % cls) return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator for", "later required\") # Import the low-level C/C++ module if __package__", "sol) -> std::ostream __lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream", "_gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) ->", "vals, DenseMatrix tr, int vdim=1) GetValues(GridFunction self, ElementTransformation T, IntegrationRule", "= _gridfunc.GridFunction_iadd(self, v) ret.thisown = 0 return self def __isub__(self,", "r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs)", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol,", "comp=0, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues)", "SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self, Vector", "= _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self, Vector base,", "SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return", "%s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self,", "int side, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1)", "Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction self, double const", "self, double const p, VectorCoefficient exsol, Vector error, Coefficient weight=None,", "mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import mfem._par.table import mfem._par.hash import", "MakeRef(GridFunction self, FiniteElementSpace f, Vector v, int v_offset) \"\"\" return", "const *[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule", "QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction", "int vdim=1) GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals,", "exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad,", "const p, Coefficient exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule const", "QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream & os,", "import mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk", "= _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self, char const", "membership flag\") __repr__ = _swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self,", "= _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return", "instance attributes to %s\" % self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set):", "vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient", "GridFunction orig) -> GridFunction __init__(GridFunction self, FiniteElementSpace f) -> GridFunction", "self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction", "GridFunction gf1, GridFunction gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1,", "from . import _gridfunc else: import _gridfunc try: import builtins", "mfem._par.handle import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform import", "DenseMatrix tr, int vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side,", "= \"\" return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)", "_swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self, int i, Vector", "mfem._par.fe_coll import mfem._par.lininteg import mfem._par.handle import mfem._par.hypre import mfem._par.restriction import", "_gridfunc.__lshift__ def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator", "f, double * v) MakeRef(GridFunction self, FiniteElementSpace f, Vector v,", "ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def", "values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self,", "GetNodalValues(self, *args): ''' GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim)", "ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args)", "ell_coef, double Nu, int norm_type) -> double ComputeH1Error(GridFunction self, Coefficient", "_gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def RestrictConforming(self): r\"\"\"RestrictConforming(GridFunction self)\"\"\" return", "-> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction", "ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self, double const p, Coefficient exsol,", "imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction self, double c)", "irs=0) ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient exsol, Vector error,", "int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain) ComputeFlux =", "vec = Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown = 0 return", "_gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction", "_swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self,", "lambda x, v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr", "ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient", "self, BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True, int subdomain=-1)\"\"\" return", "_gridfunc.GridFunction_idiv(self, v) ret.thisown = 0 return self def __imul__(self, v):", "double * v) MakeRef(GridFunction self, FiniteElementSpace f, Vector v, int", "Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval) __swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in", "-> double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def", "der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self,", "r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace", "laps, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians", "_gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction", "tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self,", "OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC)", "\"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self, *args):", "error, mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector", "* qf_data, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, Mesh mesh,", "FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self, fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self):", "Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of C++", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementMaxErrors(GridFunction self, VectorCoefficient", "r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True, int subdomain=-1)\"\"\"", "return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\" FESpace(GridFunction", "irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self,", "i, IntegrationRule ir, Vector laps, DenseMatrix tr, int vdim=1) \"\"\"", "comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self,", "_gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction", "_gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientNormal(GridFunction", "& out, std::string const & field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self,", "std::ostream & out) Save(QuadratureFunction self, char const * file, int", "Coefficient s, int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_))", "_swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim", "= _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\" return", "coeff, intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction", "self, QuadratureSpace qspace_, double * qf_data, int vdim_=1) -> QuadratureFunction", "SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject *", "r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self, avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages)", "\"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr,", "-> GridFunction __init__(GridFunction self, FiniteElementSpace f) -> GridFunction __init__(GridFunction self,", "coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\"", "& out) Save(QuadratureFunction self, char const * file, int precision=16)", "_swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector", "r\"\"\"GetElementDofValues(GridFunction self, int el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals)", "exsol, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector", "ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const", "raise AttributeError(\"You cannot add class attributes to %s\" % cls)", "_swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f, double", "mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors =", "Mesh mesh2d, GridFunction sol, int const ny) -> GridFunction\"\"\" return", "4.0.2 # # Do not make changes to this file", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad,", "<filename>mfem/_par/gridfunc.py # This file was automatically generated by SWIG (http://www.swig.org).", "strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name ==", "(self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if", "tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians)", "_gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in", "bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff,", "-> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs) ComputeW11Error", "name) and isinstance(getattr(type(self), name), property): set(self, name, value) else: raise", "Vector weights, Vector lo_, Vector hi_) ImposeBounds(GridFunction self, int i,", "__swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient # Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def", "import mfem._par.globals import mfem._par.matrix import mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat", "*args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction self,", "self, Mesh m, std::istream & input) -> GridFunction __init__(GridFunction self,", "der_comp, GridFunction der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative =", "return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\"", "import mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg class GridFunction(mfem._par.vector.Vector):", "Coefficient coeff, mfem::GridFunction::AvgType type) ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type)", "excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction", "rhs) -> GridFunction Assign(GridFunction self, double value) -> GridFunction Assign(GridFunction", "wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\" Assign(GridFunction", "ir, DenseMatrix grad) GetGradients(GridFunction self, int const elem, IntegrationRule ir,", "attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray attr) \"\"\" return", "return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO):", "self, Coefficient coeff, intArray dofs, int vd=0) ProjectCoefficient(GridFunction self, VectorCoefficient", "_swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self) -> int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim", "ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) GetElementValues(QuadratureFunction", "r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\" thisown = property(lambda x: x.this.own(),", "self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction", "qspace_, double * qf_data, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self,", "Assign(GridFunction self, double value) -> GridFunction Assign(GridFunction self, Vector v)", "mesh, std::istream & _in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__", "flux, bool wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef,", "Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self,", "i, int side, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) ->", "dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el, Vector dof_vals)\"\"\" return _gridfunc.GridFunction_GetElementDofValues(self, el,", "std::ostream & out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL", "double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self,", "name, value): if name == \"thisown\": self.this.own(value) elif name ==", "double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self,", "*args): r\"\"\" ImposeBounds(GridFunction self, int i, Vector weights, Vector lo_,", "def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el, Vector dof_vals)\"\"\"", "mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction self, VectorCoefficient exsol,", "def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0): r\"\"\"ComputeW11Error(GridFunction self, Coefficient", "# Import the low-level C/C++ module if __package__ or \".\"", "\"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args):", "irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError)", "_swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace) def GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule\"\"\"", "\"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self, *args):", "der)\"\"\" return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def", "src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args):", "tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr)", "-> double ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const", "= _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector =", "self, int idx, Vector values) GetElementValues(QuadratureFunction self, int idx, int", "__lshift__(std::ostream & out, Mesh mesh) -> std::ostream __lshift__(std::ostream & out,", "__init__(GridFunction self, FiniteElementSpace f, double * data) -> GridFunction __init__(GridFunction", "file, int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save)", "vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self, vdim_) SetVDim =", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self, double const p,", "with_coeff=False) -> double\"\"\" return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains,", "GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self) -> Vector GetTrueVector(GridFunction self) ->", "_gridfunc.GridFunction_imul(self, v) ret.thisown = 0 return self GridFunction.__iadd__ = __iadd__", "*args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self,", "ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def", "ir, vals, tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction", "Vector hi_) ImposeBounds(GridFunction self, int i, Vector weights, double min_=0.0,", "GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom", "p) Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling # Register JumpScaling", "double ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0)", "ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient exsol, Vector error, Coefficient", "\"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError) def ComputeW11Error(self, exsol,", "= _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff)", "QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction", "mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv,", "attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient", "\"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def", "of C++ mfem::ExtrudeCoefficient class.\"\"\" thisown = property(lambda x: x.this.own(), lambda", "_gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\"", "& out, Mesh mesh) -> std::ostream __lshift__(std::ostream & out, GridFunction", "v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def __init__(self,", "set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls, name)", "% cls) return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator for adding", "int offset) MakeRef(GridFunction self, FiniteElementSpace f, double * v) MakeRef(GridFunction", "= _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self, *args): r\"\"\" GetValues(GridFunction self, int i,", "low-level C/C++ module if __package__ or \".\" in __name__: from", "\"\"\"Class decorator for adding a metaclass to a SWIG wrapped", "val, Vector tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue)", "_gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv, irs=0):", "GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix", "import mfem._par.hypre import mfem._par.restriction import mfem._par.bilininteg import mfem._par.linearform import mfem._par.nonlininteg", "Vector error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0)", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient", "grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self,", "Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign) def Update(self): r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update", "*args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient) def ProjectBdrCoefficient(self, *args): r\"\"\" ProjectBdrCoefficient(GridFunction self,", "ComputeMaxError(self, *args): r\"\"\" ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[]", "*args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self,", "_gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction) def __lshift__(*args): r\"\"\" __lshift__(std::ostream & os, SparseMatrix mat)", "-> double ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[]", "else: raise AttributeError(\"You cannot add instance attributes to %s\" %", "QuadratureSpace qspace_, double * qf_data, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction", "-> GridFunction __init__(GridFunction self, GridFunction orig) -> GridFunction __init__(GridFunction self,", "tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction", "= _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object):", "slimmed down version of six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__,", "ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction", "FiniteElementSpace fes, Vector v, int offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self,", "def GetFaceValues(self, i, side, ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self,", "ir, DenseMatrix vals, DenseMatrix tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i,", "SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def SetFromTrueDofs(self, tv): r\"\"\"SetFromTrueDofs(GridFunction self, Vector tv)\"\"\"", "mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient import mfem._par.globals import mfem._par.matrix import", "GetElementIntRule(self, idx): r\"\"\"GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self,", "_gridfunc.GridFunction_SetSpace(self, f) SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace) def MakeRef(self, *args): r\"\"\" MakeRef(GridFunction", "\"\"\"Meta class to enforce nondynamic attributes (no new attributes) for", "elem, IntegrationRule ir, DenseMatrix grad) \"\"\" return _gridfunc.GridFunction_GetGradients(self, *args) GetGradients", "_gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp,", "u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u,", "return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues)", "OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace)", "weakref import mfem._par.array import mfem._par.mem_manager import mfem._par.vector import mfem._par.coefficient import", "_gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name, ref):", "int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, double *", "GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func) GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self,", "int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def", "DenseMatrix hess, int vdim=1) GetHessians(GridFunction self, int i, IntegrationRule ir,", "*args): r\"\"\" ComputeLpError(GridFunction self, double const p, Coefficient exsol, Coefficient", "* data) -> GridFunction __init__(GridFunction self, Mesh m, std::istream &", "\"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi,", "QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace", "self) -> QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self,", "fname, int precision=16) Save(GridFunction self, char const * file, int", "def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False): r\"\"\"ZZErrorEstimator(BilinearFormIntegrator blfi,", "elems, irs) ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction", "def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector) def", "vcoeff, intArray dofs) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute) ProjectCoefficient(GridFunction", "double const p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const *[]", "= _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream", "r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args,", "ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction self, Coefficient", "n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_)) def Eval(self, T,", "ComputeLpError(GridFunction self, double const p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule", "Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol,", "i, gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of", "builtins as __builtin__ except ImportError: import __builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New", "def set_class_attr(cls, name, value): if hasattr(cls, name) and not isinstance(getattr(cls,", "-> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error) def", "self)\"\"\" return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args): r\"\"\"", "gf_file, int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision) SaveToFile =", "ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh", "std::string const & field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name,", "& input) -> GridFunction __init__(GridFunction self, Mesh m, mfem::GridFunction *[]", "def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if hasattr(cls, name) and", "Mesh mesh) -> std::ostream __lshift__(std::ostream & out, GridFunction sol) ->", "_swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming) def ProjectGridFunction(self, src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self,", "_gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool", "self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0, intArray elems=None) ->", "m, mfem::GridFunction *[] gf_array, int num_pieces) -> GridFunction __init__(GridFunction self,", "= _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self)", "= _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array import mfem._par.mem_manager import mfem._par.vector", "a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array import", "r\"\"\" isub(GridFunction self, GridFunction c) -> GridFunction isub(GridFunction self, double", "int idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule)", "-> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction", "self, Mesh m, mfem::GridFunction *[] gf_array, int num_pieces) -> GridFunction", "- a slimmed down version of six.add_metaclass\"\"\" def wrapper(cls): return", "ir, Vector laps, DenseMatrix tr, int vdim=1) \"\"\" return _gridfunc.GridFunction_GetLaplacians(self,", "int\"\"\" return _gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\"", "r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double\"\"\" return _gridfunc.ExtrudeCoefficient_Eval(self,", "vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=-1)", "m, Coefficient s, int n_) -> ExtrudeCoefficient\"\"\" _gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s,", "SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class", "Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def", "const *[] irs=0) ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient exsol,", "def ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction", "GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self,", "# Register ExtrudeCoefficient in _gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol,", "# Do not make changes to this file unless you", "IntegrationPoint ip, int vdim=1) -> double GetValue(GridFunction self, ElementTransformation T,", "elif name == \"this\": set(self, name, value) elif hasattr(self, name)", "_gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char const", "comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field,", "ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient", "return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues) def Save(self, *args): r\"\"\"", "return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self,", "ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient): r\"\"\"Proxy of C++ mfem::ExtrudeCoefficient class.\"\"\"", "gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction gf1, GridFunction gf2)", "self, QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double", "blfi, GridFunction flux, bool wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi,", "mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args)", "double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue) def GetVectorValue(self,", "vals, DenseMatrix tr, int vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i,", "def GetVectorFieldNodalValues(self, val, comp): r\"\"\"GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)\"\"\"", "return _gridfunc.GridFunction_GetLaplacians(self, *args) GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\"", "double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args) ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds) def", "self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name, value): if name", "const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors)", "const & field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref)", "ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\"", "class attributes to %s\" % cls) return set_class_attr def _swig_add_metaclass(metaclass):", "vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr,", "self, Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray", "VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0)", "comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self,", "ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self,", "-> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h, p): r\"\"\"Eval(JumpScaling", "ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol, excurl, irs=0): r\"\"\"ComputeHCurlError(GridFunction self,", "ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "_swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector", "_swig_new_instance_method(_gridfunc.GridFunction_SaveToFile) def WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO) ->", "_gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown =", "subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\" Assign(GridFunction self,", "double * qf_data, int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace", "tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValue(self, *args) GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue) def GetValues(self,", "tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self)", "file, int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save)", "const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors)", "int comp=0, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues =", "*args): r\"\"\" Save(QuadratureFunction self, std::ostream & out) Save(QuadratureFunction self, char", "ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient", "__init__(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=1) ->", "FiniteElementCollection\"\"\" return _gridfunc.GridFunction_OwnFEC(self) OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC) def VectorDim(self): r\"\"\"VectorDim(GridFunction self)", "self, mfem::Coefficient *[] coeff, intArray attr) \"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args)", "c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\" isub(GridFunction self,", "_gridfunc.delete_JumpScaling # Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy", "Vector v, int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef =", "_gridfunc.GridFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction", "h, p): r\"\"\"Eval(JumpScaling self, double h, int p) -> double\"\"\"", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError =", "def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self,", "out, GridFunction sol) -> std::ostream __lshift__(std::ostream & out, QuadratureFunction qf)", "self, ElementTransformation T, IntegrationPoint ip, Vector val, Vector tr=None) \"\"\"", "return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self,", "_gridfunc try: import builtins as __builtin__ except ImportError: import __builtin__", "_swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self): r\"\"\"SetFromTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetFromTrueVector(self) SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector)", "\"\"\" return _gridfunc.GridFunction_MakeTRef(self, *args) MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef) def SaveVTK(self, out,", "self) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction", "Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef, double Nu, int norm_type)", "dof_vals) GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self,", "def imul(self, c): r\"\"\"imul(GridFunction self, double c) -> GridFunction\"\"\" return", "def Eval(self, h, p): r\"\"\"Eval(JumpScaling self, double h, int p)", "GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction self, int", "vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self, comp, der_comp, der):", "self, GridFunction c) -> GridFunction isub(GridFunction self, double c) ->", "MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv, int tv_offset) \"\"\" return", "_gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction", "i, Vector weights, Vector lo_, Vector hi_) ImposeBounds(GridFunction self, int", "return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs) ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv,", "DenseMatrix vals, DenseMatrix tr) -> int\"\"\" return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side,", "_swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const *", "T, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self,", "__repr__ = _swig_repr def __init__(self, *args): r\"\"\" __init__(QuadratureFunction self) ->", "ip): r\"\"\"Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double\"\"\" return", "_gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self, *args): r\"\"\" ComputeElementL1Errors(GridFunction", "f, Vector v, int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef", "__swig_destroy__ = _gridfunc.delete_JumpScaling # Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class", "mfem::GridFunction *[] gf_array, int num_pieces) -> GridFunction __init__(GridFunction self, FiniteElementSpace", "r\"\"\"ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0)", "_swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace f)\"\"\" return _gridfunc.GridFunction_SetSpace(self,", "error, mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector", "DenseMatrix values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix values) \"\"\" return", "_swig_new_instance_method(_gridfunc.GridFunction_GetValues) def GetVectorValues(self, *args): r\"\"\" GetVectorValues(GridFunction self, int i, IntegrationRule", "set(self, name, value) elif hasattr(self, name) and isinstance(getattr(type(self), name), property):", "Vector tr=None) -> double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue =", "*args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self, f): r\"\"\"SetSpace(GridFunction self, FiniteElementSpace", "ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int i, GridFunction gf1,", "ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type)", "*args): r\"\"\" GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction self,", "return self def __isub__(self, v): ret = _gridfunc.GridFunction_isub(self, v) ret.thisown", "ir, vals, tr, vdim) GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i,", "self, VectorCoefficient vcoeff) ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs) ProjectCoefficient(GridFunction", "= _swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return", "\"\"\" return _gridfunc.GridFunction_ComputeLpError(self, *args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args):", "FiniteElementSpace f, double * v) MakeRef(GridFunction self, FiniteElementSpace f, Vector", "tr, int vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir,", "self, FiniteElementSpace f) -> GridFunction __init__(GridFunction self, FiniteElementSpace f, double", "return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr, vdim) GetFaceValues =", "__builtin__ _swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New _swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New def _swig_repr(self): try:", "v): ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown = 0 return self", "int const ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, DenseMatrix", "return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side,", "qf_data, int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace)", "FiniteElementSpace f, double * data) -> GridFunction __init__(GridFunction self, Mesh", "int idx, int const ip_num, Vector values) GetElementValues(QuadratureFunction self, int", "int size) MakeRef(GridFunction self, Vector base, int offset) MakeRef(GridFunction self,", "*[] irs=0) -> double ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule", "return set_class_attr def _swig_add_metaclass(metaclass): \"\"\"Class decorator for adding a metaclass", "-> double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, double Nu,", "= _swig_new_instance_method(_gridfunc.GridFunction_GetHessians) def GetValuesFrom(self, orig_func): r\"\"\"GetValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return", "def Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction", "= _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0,", "self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs))", "r\"\"\"Update(GridFunction self)\"\"\" return _gridfunc.GridFunction_Update(self) Update = _swig_new_instance_method(_gridfunc.GridFunction_Update) def FESpace(self, *args):", "MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\" return", "return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp) GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues)", "else: import _gridfunc try: import builtins as __builtin__ except ImportError:", "file, int precision=16)\"\"\" return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ)", "ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, Coefficient", "def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT)", "precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char const * file, int precision=16)\"\"\" return", "intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr) ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def", "GetValue(self, *args): r\"\"\" GetValue(GridFunction self, int i, IntegrationPoint ip, int", "double ComputeLpError(GridFunction self, double const p, VectorCoefficient exsol, Coefficient weight=None,", "if hasattr(cls, name) and not isinstance(getattr(cls, name), property): set(cls, name,", "coeff, mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient)", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError =", "irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self,", "Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(QuadratureFunction self, char", "v): ret = _gridfunc.GridFunction_iadd(self, v) ret.thisown = 0 return self", "r\"\"\"SaveVTK(GridFunction self, std::ostream & out, std::string const & field_name, int", "name == \"thisown\": self.this.own(value) elif name == \"this\": set(self, name,", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\" ComputeElementMaxErrors(GridFunction self, Coefficient exsol,", "_gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction", "irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError =", "def SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream & out,", "def SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own)", "-> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def", "_swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim) def OwnsSpace(self): r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace", "comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp) GetVectorFieldValues =", "__init__(self, m, s, n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient s,", "as _swig_python_version_info if _swig_python_version_info < (2, 7, 0): raise RuntimeError(\"Python", "DenseMatrix tr) GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir, DenseMatrix vals,", "self, char const * file, int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self,", "_swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs) def SetTrueVector(self): r\"\"\"SetTrueVector(GridFunction self)\"\"\" return _gridfunc.GridFunction_SetTrueVector(self) SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector)", "= _gridfunc.delete_GridFunction def __init__(self, *args): r\"\"\" __init__(GridFunction self) -> GridFunction", "JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of C++ mfem::QuadratureFunction", "ir, DenseMatrix vals, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues", "int precision=16) Save(GridFunction self, char const * file, int precision=16)", "_swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int", "std::ostream & out, std::string const & field_name, int ref)\"\"\" return", "0 return self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__", "double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs) ComputeW11Error =", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient excurl,", "& field_name, int ref)\"\"\" return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref) SaveVTK", "ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff,", "IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1) GetValues(GridFunction self,", "exsol, Vector error, mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL2Errors(self,", "C/C++ module if __package__ or \".\" in __name__: from .", "weights, Vector lo_, Vector hi_) ImposeBounds(GridFunction self, int i, Vector", ">\" % (self.__class__.__module__, self.__class__.__name__, strthis,) def _swig_setattr_nondynamic_instance_variable(set): def set_instance_attr(self, name,", "*args) ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError) def ComputeElementLpErrors(self, *args): r\"\"\" ComputeElementLpErrors(GridFunction self,", "f, double * data) -> GridFunction __init__(GridFunction self, Mesh m,", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC HARMONIC = _gridfunc.GridFunction_HARMONIC def ProjectDiscCoefficient(self,", "ElementTransformation tr, Vector curl)\"\"\" return _gridfunc.GridFunction_GetCurl(self, tr, curl) GetCurl =", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient", "Vector error, mfem::IntegrationRule const *[] irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol,", "_gridfunc.GridFunction_ComputeDivError(self, exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\"", "return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def", "coeff) \"\"\" return _gridfunc.GridFunction_ProjectCoefficient(self, *args) ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient) ARITHMETIC =", "v) ret.thisown = 0 return self def __imul__(self, v): ret", "fec_) MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner) def OwnFEC(self): r\"\"\"OwnFEC(GridFunction self) -> FiniteElementCollection\"\"\"", "GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation", "mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import mfem._par.fe import mfem._par.geom import", "GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int", "std::istream & _in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ =", "error, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) ComputeElementLpErrors(GridFunction self, double", "GridFunction sol) -> std::ostream __lshift__(std::ostream & out, QuadratureFunction qf) ->", "def ComputeH1Error(self, *args): r\"\"\" ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad,", "def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char const * gf_file,", "def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)\"\"\"", "ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "ip, int vdim=1) -> double GetValue(GridFunction self, ElementTransformation T, IntegrationPoint", "const *[] irs=0) -> double ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient", "\"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error) def ComputeHDivError(self, exsol,", "VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return", "*args): r\"\"\" GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps,", "return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs):", "const ip_num, Vector values) GetElementValues(QuadratureFunction self, int idx, int const", "double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul)", "x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def __init__(self, *args):", "std::ostream __lshift__(std::ostream & out, Mesh mesh) -> std::ostream __lshift__(std::ostream &", "SaveVTK(self, out, field_name, ref): r\"\"\"SaveVTK(GridFunction self, std::ostream & out, std::string", "int\"\"\" return _gridfunc.QuadratureFunction_GetVDim(self) GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction", "mfem._par.eltrans import mfem._par.fe import mfem._par.geom import mfem._par.fespace import mfem._par.mesh import", "mfem::IntegrationRule const *[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error,", "grad) GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction", "self, Vector base, int offset, int size) MakeRef(GridFunction self, Vector", "GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i, side, ir, vals, tr,", "mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args)", "exgrad, irs=0): r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0)", "return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction", "GridFunction vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn =", "Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self,", "mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self, h,", "Save = _swig_new_instance_method(_gridfunc.GridFunction_Save) def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self, char", "_swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim) def SetVDim(self, vdim_): r\"\"\"SetVDim(QuadratureFunction self, int vdim_)\"\"\" return _gridfunc.QuadratureFunction_SetVDim(self,", "SWIG (http://www.swig.org). # Version 4.0.2 # # Do not make", "const *[] irs=0) ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule", "exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL2Error(GridFunction self, mfem::Coefficient", "precision) SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ) # Register QuadratureFunction in _gridfunc: _gridfunc.QuadratureFunction_swigregister(QuadratureFunction)", "*args) SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace) def GetVDim(self): r\"\"\"GetVDim(QuadratureFunction self) -> int\"\"\"", "ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "const ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny) Extrude1DGridFunction", "_swig_repr CONSTANT = _gridfunc.JumpScaling_CONSTANT ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H", "vdim=1) GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, DenseMatrix", "in _gridfunc: _gridfunc.GridFunction_swigregister(GridFunction) class JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\"", "except __builtin__.Exception: strthis = \"\" return \"<%s.%s; %s >\" %", "set_class_attr(cls, name, value): if hasattr(cls, name) and not isinstance(getattr(cls, name),", "& out, int TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL =", "ny) Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction def __iadd__(self, v): ret = _gridfunc.GridFunction_iadd(self,", "v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self,", "ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)", "r\"\"\"ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double\"\"\"", "_swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self,", "self, double value) -> GridFunction Assign(GridFunction self, Vector v) ->", "ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[]", "subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain) ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux)", "name == \"this\": set(self, name, value) elif hasattr(self, name) and", "_swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction self, double c) -> GridFunction\"\"\"", "-> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int", "& _in) -> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction", "are doing--modify # the SWIG interface file instead. from sys", "FESpace(self, *args): r\"\"\" FESpace(GridFunction self) -> FiniteElementSpace FESpace(GridFunction self) ->", "return _gridfunc.GridFunction_imul(self, c) imul = _swig_new_instance_method(_gridfunc.GridFunction_imul) def idiv(self, c): r\"\"\"idiv(GridFunction", "import mfem._par.sort_pairs import mfem._par.ncmesh import mfem._par.vtk import mfem._par.element import mfem._par.table", "_swig_repr def MakeOwner(self, fec_): r\"\"\"MakeOwner(GridFunction self, FiniteElementCollection fec_)\"\"\" return _gridfunc.GridFunction_MakeOwner(self,", "vals, tr) GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues) def GetLaplacians(self, *args): r\"\"\" GetLaplacians(GridFunction", "precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self,", "int comp=0, Vector tr=None) -> double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args)", "*[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError =", "_gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs) ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError) def ComputeHCurlError(self, exsol,", "int i, IntegrationRule ir, Vector laps, int vdim=1) GetLaplacians(GridFunction self,", "__iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__ = __isub__ GridFunction.__imul__ = __imul__", "DenseMatrix vals, DenseMatrix tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir,", "mfem::QuadratureFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x, v:", "r\"\"\"OwnsSpace(QuadratureFunction self) -> bool\"\"\" return _gridfunc.QuadratureFunction_OwnsSpace(self) OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace) def", "__init__(self, *args): r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction", "== \"this\": set(self, name, value) elif hasattr(self, name) and isinstance(getattr(type(self),", "self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) GetVectorValues(GridFunction", "VectorCoefficient exsol, Vector error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const", "= _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir, vals, tr, comp=0): r\"\"\"GetVectorFieldValues(GridFunction", "__init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) ->", "avgs) GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages) def GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self,", "exdiv, irs) ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError) def ComputeDGFaceJumpError(self, *args): r\"\"\" ComputeDGFaceJumpError(GridFunction", "GetElementDofValues(self, el, dof_vals): r\"\"\"GetElementDofValues(GridFunction self, int el, Vector dof_vals)\"\"\" return", "mfem._par.operators import mfem._par.intrules import mfem._par.sparsemat import mfem._par.densemat import mfem._par.eltrans import", "know what you are doing--modify # the SWIG interface file", "file was automatically generated by SWIG (http://www.swig.org). # Version 4.0.2", "irs=0, intArray elems=None) -> double \"\"\" return _gridfunc.GridFunction_ComputeL2Error(self, *args) ComputeL2Error", "Vector v, int offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def", "Do not make changes to this file unless you know", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs)", "return _gridfunc.GridFunction_iadd(self, c) iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd) def isub(self, *args): r\"\"\"", "_swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent) def ComputeL2Error(self, *args): r\"\"\" ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule", "mfem::IntegrationRule const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors =", "QuadratureSpace\"\"\" return _gridfunc.QuadratureFunction_GetSpace(self) GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\"", "intArray bdr_attr)\"\"\" return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def", "def SaveGZ(self, file, precision=16): r\"\"\"SaveGZ(GridFunction self, char const * file,", "GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom) def GetVectorFieldValues(self, i, ir, vals, tr, comp=0):", "idiv(self, c): r\"\"\"idiv(GridFunction self, double c) -> GridFunction\"\"\" return _gridfunc.GridFunction_idiv(self,", "*args): r\"\"\" Save(GridFunction self, std::ostream & out) Save(GridFunction self, char", "self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeMaxError(GridFunction", "for a class\"\"\" __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__) import weakref import mfem._par.array", "const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type,", "__swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self): r\"\"\"GetSpace(QuadratureFunction self) -> QuadratureSpace\"\"\" return", "return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args) ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient) def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr):", "Save(self, *args): r\"\"\" Save(GridFunction self, std::ostream & out) Save(GridFunction self,", "_gridfunc.GridFunction_VectorDim(self) VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim) def GetTrueVector(self, *args): r\"\"\" GetTrueVector(GridFunction self)", "_swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector) def GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self,", "import mfem._par.fe import mfem._par.geom import mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs", "GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, DenseMatrix tr,", "P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self, double", "GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues) def GetFaceVectorValues(self, i, side, ir, vals, tr):", "comp, der_comp, der) GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction", "_gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ) # Register GridFunction in", "GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians) def GetHessians(self, *args): r\"\"\" GetHessians(GridFunction self, int", "return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors) def ComputeElementL2Errors(self, *args): r\"\"\"", "GridFunction __init__(GridFunction self, FiniteElementSpace fes, Vector v, int offset) ->", "FiniteElementSpace f) -> GridFunction __init__(GridFunction self, FiniteElementSpace f, double *", "-> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs) ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def", "self, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return", "self, int idx) -> IntegrationRule\"\"\" return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx) GetElementIntRule =", "v, int v_offset) \"\"\" return _gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef)", "exgrad, mfem::IntegrationRule const *[] irs=0) -> double\"\"\" return _gridfunc.GridFunction_ComputeGradError(self, exgrad,", "self) return set_instance_attr def _swig_setattr_nondynamic_class_variable(set): def set_class_attr(cls, name, value): if", "mat) -> std::ostream __lshift__(std::ostream & out, Mesh mesh) -> std::ostream", "i, Vector weights, double min_=0.0, double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self,", "vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self,", "gf1, GridFunction gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2)", "_gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream) def iadd(self, c): r\"\"\"iadd(GridFunction self,", "src) ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self,", "def GetValues(self, *args): r\"\"\" GetValues(GridFunction self, int i, IntegrationRule ir,", "MakeRef(self, *args): r\"\"\" MakeRef(GridFunction self, Vector base, int offset, int", "comp=0, Vector tr=None) -> double \"\"\" return _gridfunc.GridFunction_GetValue(self, *args) GetValue", "v_weight=None, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeLpError(self,", "src): r\"\"\"ProjectGridFunction(GridFunction self, GridFunction src)\"\"\" return _gridfunc.GridFunction_ProjectGridFunction(self, src) ProjectGridFunction =", "out) Save(GridFunction self, char const * fname, int precision=16) Save(GridFunction", "r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureFunction orig) ->", "ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux) def Assign(self, *args): r\"\"\" Assign(GridFunction self, GridFunction", "isinstance(getattr(cls, name), property): set(cls, name, value) else: raise AttributeError(\"You cannot", "= _swig_new_instance_method(_gridfunc.JumpScaling_Eval) __swig_destroy__ = _gridfunc.delete_JumpScaling # Register JumpScaling in _gridfunc:", "# Register JumpScaling in _gridfunc: _gridfunc.JumpScaling_swigregister(JumpScaling) class QuadratureFunction(mfem._par.vector.Vector): r\"\"\"Proxy of", "hi_) ImposeBounds(GridFunction self, int i, Vector weights, double min_=0.0, double", "''' GetNodalValues(i) -> GetNodalValues(vector, vdim) GetNodalValues(i, array<dobule>, vdim) ''' from", "= _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues) def ProjectVectorFieldOn(self, vec_field, comp=0): r\"\"\"ProjectVectorFieldOn(GridFunction self, GridFunction vec_field,", "grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self, tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self,", "MakeRef(GridFunction self, Vector base, int offset) MakeRef(GridFunction self, FiniteElementSpace f,", "def ComputeElementL2Errors(self, *args): r\"\"\" ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error,", "DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def", "char const * gf_file, int const precision)\"\"\" return _gridfunc.GridFunction_SaveToFile(self, gf_file,", "intArray attr) ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self,", "self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) ->", "= _gridfunc.GridFunction_isub(self, v) ret.thisown = 0 return self def __idiv__(self,", "* file, int precision=16) \"\"\" return _gridfunc.GridFunction_Save(self, *args) Save =", "double * qf_data, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, Mesh", "const *[] irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args) ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors)", "laps, int vdim=1) GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector", "TimesToRefine=1)\"\"\" return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ =", "add class attributes to %s\" % cls) return set_class_attr def", "or \".\" in __name__: from . import _gridfunc else: import", "def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func)", "def __isub__(self, v): ret = _gridfunc.GridFunction_isub(self, v) ret.thisown = 0", "mfem._par.table import mfem._par.hash import mfem._par.vertex import mfem._par.fe_coll import mfem._par.lininteg import", "Vector GetTrueVector(GridFunction self) -> Vector \"\"\" return _gridfunc.GridFunction_GetTrueVector(self, *args) GetTrueVector", "v: x.this.own(v), doc=\"The membership flag\") __repr__ = _swig_repr def MakeOwner(self,", "GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients) def GetVectorGradient(self, tr, grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation", "FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace) def SetSpace(self,", "std::ostream & out) Save(GridFunction self, char const * fname, int", "\"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save) def SaveGZ(self, file,", "__builtin__.Exception: strthis = \"\" return \"<%s.%s; %s >\" % (self.__class__.__module__,", "instead. from sys import version_info as _swig_python_version_info if _swig_python_version_info <", "r\"\"\"GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr,", "return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine) SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL) __swig_destroy__ = _gridfunc.delete_GridFunction", "WriteToStream(self, StringIO): r\"\"\"WriteToStream(GridFunction self, PyObject * StringIO) -> PyObject *\"\"\"", "GetTrueDofs(self, tv): r\"\"\"GetTrueDofs(GridFunction self, Vector tv)\"\"\" return _gridfunc.GridFunction_GetTrueDofs(self, tv) GetTrueDofs", "nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\" _gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs)) def Eval(self,", "_gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision): r\"\"\"SaveToFile(GridFunction self, char const *", "out, QuadratureFunction qf) -> std::ostream & \"\"\" return _gridfunc.__lshift__(*args) __lshift__", "* qf_data, int vdim_=-1) \"\"\" return _gridfunc.QuadratureFunction_SetSpace(self, *args) SetSpace =", "= _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self, orig_func): r\"\"\"GetBdrValuesFrom(GridFunction self, GridFunction orig_func)\"\"\" return", "int idx, DenseMatrix values) \"\"\" return _gridfunc.QuadratureFunction_GetElementValues(self, *args) GetElementValues =", "cannot add instance attributes to %s\" % self) return set_instance_attr", "comp=0)\"\"\" return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp) ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn) def GetDerivative(self,", "const *[] irs=0) -> double ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule", "ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double", "return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs) ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError) def ComputeMaxError(self,", "vec, args[0]) vec.thisown = 0 return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self,", "r\"\"\" GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, int", "u, GridFunction flux, Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool", "double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class", "gf2) -> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance =", "import mfem._par.geom import mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs import mfem._par.ncmesh", "-> double\"\"\" return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance", "ProjectDiscCoefficient(self, *args): r\"\"\" ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff) ProjectDiscCoefficient(GridFunction self, Coefficient", "_swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\" Save(GridFunction self, std::ostream & out)", "p): r\"\"\"Eval(JumpScaling self, double h, int p) -> double\"\"\" return", "GetElementValues(QuadratureFunction self, int idx, Vector values) GetElementValues(QuadratureFunction self, int idx,", "ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double", "irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const *[]", "StringIO) -> PyObject *\"\"\" return _gridfunc.GridFunction_WriteToStream(self, StringIO) WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream)", "six.add_metaclass\"\"\" def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy()) return wrapper class", "IntegrationPoint ip, Vector val) GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip,", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr): r\"\"\"ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff,", "Vector val, int comp)\"\"\" return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp) GetVectorFieldNodalValues =", "*[] irs=0) -> double ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const", "_gridfunc.GridFunction_MakeRef(self, *args) MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef) def MakeTRef(self, *args): r\"\"\" MakeTRef(GridFunction", "double \"\"\" return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args) ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError) def ComputeH1Error(self,", "idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\" Save(GridFunction self, std::ostream", "int vdim=1) -> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals,", "irs=0) ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[]", "_swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient) def GetElementAverages(self, avgs): r\"\"\"GetElementAverages(GridFunction self, GridFunction avgs)\"\"\" return _gridfunc.GridFunction_GetElementAverages(self,", "irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self,", "const * file, int precision=16)\"\"\" return _gridfunc.GridFunction_SaveGZ(self, file, precision) SaveGZ", "double h, int p) -> double\"\"\" return _gridfunc.JumpScaling_Eval(self, h, p)", "-> QuadratureFunction \"\"\" _gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args)) __swig_destroy__ = _gridfunc.delete_QuadratureFunction def GetSpace(self):", "flux, wcoef=True, subdomain=-1): r\"\"\"ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux, bool", "Vector() _gridfunc.GridFunction_GetNodalValues(self, vec, args[0]) vec.thisown = 0 return vec.GetDataArray() else:", "GetValues(self, *args): r\"\"\" GetValues(GridFunction self, int i, IntegrationRule ir, Vector", "GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues) def ImposeBounds(self, *args): r\"\"\" ImposeBounds(GridFunction self, int", "*[] irs=0) -> double ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const", "const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeMaxError(self, *args) ComputeMaxError", "\" + self.this.__repr__() except __builtin__.Exception: strthis = \"\" return \"<%s.%s;", "= _swig_repr def __init__(self, *args): r\"\"\" __init__(QuadratureFunction self) -> QuadratureFunction", "def GetElementValues(self, *args): r\"\"\" GetElementValues(QuadratureFunction self, int idx, Vector values)", "error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False) -> double\"\"\" return", "C++ mfem::GridFunction class.\"\"\" thisown = property(lambda x: x.this.own(), lambda x,", "Mesh m, mfem::GridFunction *[] gf_array, int num_pieces) -> GridFunction __init__(GridFunction", "isub = _swig_new_instance_method(_gridfunc.GridFunction_isub) def imul(self, c): r\"\"\"imul(GridFunction self, double c)", "*args, **kwargs): r\"\"\"__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling\"\"\"", "*args): r\"\"\" MakeTRef(GridFunction self, FiniteElementSpace f, double * tv) MakeTRef(GridFunction", "*[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self, *args) ComputeL1Error =", "mesh2d, GridFunction sol, int const ny) -> GridFunction\"\"\" return _gridfunc.Extrude1DGridFunction(mesh,", "-> int\"\"\" return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr, vdim)", "self, ElementTransformation T, IntegrationPoint ip, int comp=0, Vector tr=None) ->", "Vector tv)\"\"\" return _gridfunc.GridFunction_SetFromTrueDofs(self, tv) SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs) def SetFromTrueVector(self):", "Vector val) GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, Vector val,", "self, FiniteElementSpace f, double * tv) MakeTRef(GridFunction self, FiniteElementSpace f,", "return self GridFunction.__iadd__ = __iadd__ GridFunction.__idiv__ = __idiv__ GridFunction.__isub__ =", "grad): r\"\"\"GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)\"\"\" return _gridfunc.GridFunction_GetVectorGradient(self, tr,", "\"thisown\": self.this.own(value) elif name == \"this\": set(self, name, value) elif", "irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) ->", "= 0 return self def __idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self,", "_gridfunc: _gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient) def Extrude1DGridFunction(mesh, mesh2d, sol, ny): r\"\"\"Extrude1DGridFunction(Mesh mesh, Mesh", "BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True, int subdomain=-1)\"\"\" return _gridfunc.GridFunction_ComputeFlux(self,", "tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) -> double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr)", "_gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr) ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal) def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr):", "n_): r\"\"\"__init__(ExtrudeCoefficient self, Mesh m, Coefficient s, int n_) ->", "int i, IntegrationRule ir, Vector laps, DenseMatrix tr, int vdim=1)", "Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double ComputeL1Error(GridFunction self,", "const * file, int precision=16) \"\"\" return _gridfunc.QuadratureFunction_Save(self, *args) Save", "*args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1):", "tr, grad) GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient) def GetGradients(self, *args): r\"\"\" GetGradients(GridFunction", "double c) -> GridFunction \"\"\" return _gridfunc.GridFunction_isub(self, *args) isub =", "exsol, mfem::IntegrationRule const *[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeL1Error(self,", "int i, IntegrationPoint ip, Vector val) GetVectorValue(GridFunction self, ElementTransformation T,", "vals, int comp=0, DenseMatrix tr=None) \"\"\" return _gridfunc.GridFunction_GetValues(self, *args) GetValues", "irs=0) \"\"\" return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args) ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors) def ComputeElementL1Errors(self,", "-> double ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0,", "ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr) ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[]", "def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const", "__init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace qspace_,", "self, ElementTransformation T, IntegrationRule ir, Vector vals, int comp=0, DenseMatrix", "ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError) def ComputeCurlError(self, excurl, irs=0): r\"\"\"ComputeCurlError(GridFunction self, VectorCoefficient", "return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args) ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors) def ComputeElementMaxErrors(self, *args): r\"\"\"", "_gridfunc.ZZErrorEstimator def ComputeElementLpDistance(p, i, gf1, gf2): r\"\"\"ComputeElementLpDistance(double p, int i,", "ComputeHDivError(self, exsol, exdiv, irs=0): r\"\"\"ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv,", "int offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file,", "= _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative) def GetDivergence(self, tr): r\"\"\"GetDivergence(GridFunction self, ElementTransformation tr) ->", "ComputeElementLpErrors(GridFunction self, double const p, Coefficient exsol, Vector error, Coefficient", "self, QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction __init__(QuadratureFunction self, QuadratureSpace", "tr, grad): r\"\"\"GetGradient(GridFunction self, ElementTransformation tr, Vector grad)\"\"\" return _gridfunc.GridFunction_GetGradient(self,", "property): set(self, name, value) else: raise AttributeError(\"You cannot add instance", "7, 0): raise RuntimeError(\"Python 2.7 or later required\") # Import", "SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1) SetSpace(QuadratureFunction", "self, int i, IntegrationPoint ip, Vector val) GetVectorValue(GridFunction self, ElementTransformation", "name, value) else: raise AttributeError(\"You cannot add class attributes to", "int i, Vector weights, double min_=0.0, double max_=mfem::infinity()) \"\"\" return", "IntegrationRule ir, DenseMatrix vals, DenseMatrix tr, int comp=0)\"\"\" return _gridfunc.GridFunction_GetVectorFieldValues(self,", "= _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction) def ProjectCoefficient(self, *args): r\"\"\" ProjectCoefficient(GridFunction self, Coefficient coeff)", "values) GetElementValues(QuadratureFunction self, int idx, int const ip_num, Vector values)", "= _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace) def SetSpace(self, *args): r\"\"\" SetSpace(QuadratureFunction self, QuadratureSpace qspace_,", "GridFunction __init__(GridFunction self, GridFunction orig) -> GridFunction __init__(GridFunction self, FiniteElementSpace", "_gridfunc.JumpScaling_ONE_OVER_H P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H def __init__(self, *args, **kwargs): r\"\"\"__init__(JumpScaling self,", "data) -> GridFunction __init__(GridFunction self, Mesh m, std::istream & input)", "return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args) ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors) def ComputeFlux(self, blfi, flux,", "ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error) def ComputeL1Error(self, *args): r\"\"\" ComputeL1Error(GridFunction self, Coefficient", "r\"\"\" GetValue(GridFunction self, int i, IntegrationPoint ip, int vdim=1) ->", "SetOwnsSpace(self, own): r\"\"\"SetOwnsSpace(QuadratureFunction self, bool own)\"\"\" return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own) SetOwnsSpace", "*[] irs=0) -> double \"\"\" return _gridfunc.GridFunction_ComputeH1Error(self, *args) ComputeH1Error =", "self.this.own(value) elif name == \"this\": set(self, name, value) elif hasattr(self,", "\"\"\" return _gridfunc.GridFunction_GetVectorValues(self, *args) GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues) def GetFaceValues(self, i,", "self def __idiv__(self, v): ret = _gridfunc.GridFunction_idiv(self, v) ret.thisown =", "self, VectorCoefficient coeff, mfem::GridFunction::AvgType type) \"\"\" return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args) ProjectDiscCoefficient", "Vector weights, double min_=0.0, double max_=mfem::infinity()) \"\"\" return _gridfunc.GridFunction_ImposeBounds(self, *args)", "_swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues) def ReorderByNodes(self): r\"\"\"ReorderByNodes(GridFunction self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes)", "double\"\"\" return _gridfunc.GridFunction_GetDivergence(self, tr) GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence) def GetCurl(self, tr,", "offset) -> GridFunction \"\"\" _gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args)) def SaveToFile(self, gf_file, precision):", "out, field_name, ref) SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK) def SaveSTL(self, out, TimesToRefine=1):", "= _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError) def ComputeDivError(self, exdiv, irs=0): r\"\"\"ComputeDivError(GridFunction self, Coefficient exdiv,", "def GetCurl(self, tr, curl): r\"\"\"GetCurl(GridFunction self, ElementTransformation tr, Vector curl)\"\"\"", "GetFaceValues(self, i, side, ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int", "mfem._par.fe import mfem._par.geom import mfem._par.fespace import mfem._par.mesh import mfem._par.sort_pairs import", "i, side, ir, vals, tr, vdim=1): r\"\"\"GetFaceValues(GridFunction self, int i,", "GridFunction orig_func)\"\"\" return _gridfunc.GridFunction_GetValuesFrom(self, orig_func) GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom) def GetBdrValuesFrom(self,", "self)\"\"\" return _gridfunc.GridFunction_ReorderByNodes(self) ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes) def GetNodalValues(self, *args): '''", "def set_instance_attr(self, name, value): if name == \"thisown\": self.this.own(value) elif", "ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0)", "FiniteElementSpace FESpace(GridFunction self) -> FiniteElementSpace \"\"\" return _gridfunc.GridFunction_FESpace(self, *args) FESpace", "return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2) ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance class ExtrudeCoefficient(mfem._par.coefficient.Coefficient):", "return _gridfunc.GridFunction_idiv(self, c) idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv) def Save(self, *args): r\"\"\"", "JumpScaling(object): r\"\"\"Proxy of C++ mfem::JumpScaling class.\"\"\" thisown = property(lambda x:", "return vec.GetDataArray() else: return _gridfunc.GridFunction_GetNodalValues(self, *args) def GetVectorFieldNodalValues(self, val, comp):" ]
[ "['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': { 'level': 'INFO', 'handlers':", "PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS", "= 'json' REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth`", "STATICFILES_DIRS = [ # os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME =", "If you wish to associate users to errors (assuming you", "paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR =", "LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL = 'home' # no email for", "], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10", "{ 'level': 'INFO', 'handlers': ['console'] } # 'celery': { #", "LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console':", "}, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, {", "inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))", "\"type\": \"image/png\", \"purpose\": \"any maskable\" } ] PWA_APP_DIR = 'ltr'", "'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles',", "# or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [", "= { \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR,", "os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY #", "= os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application definition INSTALLED_APPS = [", "# CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK = { # Use Django's", "os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0'", "ROOT_URLCONF = 'tracks.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS':", "= os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR,", "\"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\": \"any maskable\" }", "= os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND =", "DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS'", "# 'celery': { # 'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),", "'localhost').split(\" \") # Application definition INSTALLED_APPS = [ 'routes', 'accounts',", "} ] PWA_APP_DIR = 'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\",", "# Build paths inside the project like this: os.path.join(BASE_DIR, ...)", "'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING = {", "= 'home' # no email for localhost or staging EMAIL_USE_TLS", "] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR", "'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': { 'handlers': ['console'],", "STATIC_URL = '/static/' MEDIA_URL = '/media/' STATIC_ROOT = './static/' MEDIA_ROOT", "staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\")", "a single string of hosts with a space between each.", "maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\":", "os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } } # Password validation", "{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME':", "(CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL =", "import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration # Build paths inside", "JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/'", "= os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be a single string", "single string of hosts with a space between each. #", "{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] #", "{ 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': { 'handlers':", "'<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts", "using # django.contrib.auth) you may enable sending PII data. send_default_pii=True", "'console': { 'format': '%(levelname)s %(asctime)s %(module)s: %(message)s' }, }, 'handlers':", "= True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/", "the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH", "= os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY # CELERY_BROKER_URL =", "\"512x512\", \"type\": \"image/png\", \"purpose\": \"any maskable\" } ] PWA_APP_DIR =", "'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0' #", "PWA_APP_ICONS = [ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\",", "os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>'", "os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js',", "INSTALLED_APPS = [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes',", "\"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\",", "'#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION = 'portrait'", "'/media/' STATIC_ROOT = './static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL = 'home'", "maskable\" } ] PWA_APP_DIR = 'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init(", "'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'tracks.wsgi.application'", "'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ] # 'celery', MIDDLEWARE =", "'PAGE_SIZE': 10 } LOGGING = { 'version': 1, 'disable_existing_loggers': False,", "LOGOUT_REDIRECT_URL = 'home' # no email for localhost or staging", "like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join(", "} # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ {", "'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL',", "\"image/png\", \"purpose\": \"any maskable\" } ] PWA_APP_DIR = 'ltr' PWA_APP_LANG", "os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\",", "TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ", "'./media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL = 'home' # no email", "EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD", "= '/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS =", "\"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"),", "integrations=[DjangoIntegration()], # If you wish to associate users to errors", "allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ],", "os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } }", "}, }, ] WSGI_APPLICATION = 'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases", "= 'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/'", "{ # Use Django's standard `django.contrib.auth` permissions, # or allow", "}, ] WSGI_APPLICATION = 'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES", "# }, }, } # STATICFILES_DIRS = [ # os.path.join(BASE_DIR,", "= '/' PWA_APP_ICONS = [ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32',", "Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',", "= 'home' LOGOUT_REDIRECT_URL = 'home' # no email for localhost", "{ # 'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # },", "sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration # Build paths inside the", "# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, }, } # STATICFILES_DIRS", "BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\",", "= [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions',", "'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', },", "'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you wish to associate", "\"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"),", "SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be a single", "# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with", "hosts with a space between each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost", "USE_I18N = True USE_L10N = True USE_TZ = True #", "[ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF", "} # 'celery': { # 'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL',", "'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',", "} # STATICFILES_DIRS = [ # os.path.join(BASE_DIR, 'static'), # ]", "this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR,", "# ] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\"", "# CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0'", "= '#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION =", "'celery': { # 'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), #", "[ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\", \"purpose\": \"any", "\"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"),", "BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT =", "PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you wish", "'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you", "are using # django.contrib.auth) you may enable sending PII data.", "'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ] # 'celery', MIDDLEWARE = [", "# CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND", "a space between each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'", "'/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\", \"purpose\": \"any maskable\" }, {", "EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL", "}, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'console' },", "read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES':", "{ 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': { 'level':", "https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME':", "os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\":", "'rest_framework', 'pwa', ] # 'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',", "'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'console' }, },", "\"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } } # Password", "# https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' STATIC_ROOT =", "'INFO'), }, 'django': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), },", "'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL = '/'", "space between each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS", "'#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE = '/'", "'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization #", "# CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER", "Use Django's standard `django.contrib.auth` permissions, # or allow read-only access", "{ 'format': '%(levelname)s %(asctime)s %(module)s: %(message)s' }, }, 'handlers': {", "= os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY", "{ \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\": \"any maskable\"", "CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK = { # Use Django's standard", "[ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages',", "'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING = { 'version': 1,", "os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be a single string of", "}, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE", "# Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\": { \"ENGINE\":", "'%(levelname)s %(asctime)s %(module)s: %(message)s' }, }, 'handlers': { 'console': {", "\"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\": \"any maskable\" } ]", "access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': (", "['application/json'] # CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK", "( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING", "sentry_sdk.integrations.django import DjangoIntegration # Build paths inside the project like", "os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, }, } # STATICFILES_DIRS = [", "Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY = 'standalone'", "default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be a", "'<EMAIL>' # CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND =", "}, 'django': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request':", "'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request',", "}, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ]", "= 'tracks.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [],", "'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },", "'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING =", "\"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\",", "\"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"),", "'class': 'logging.StreamHandler', 'formatter': 'console' }, }, 'loggers': { '': {", "'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'tracks.wsgi.application' # Database", "= 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000'", "no email for localhost or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST", "[ # os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION", "= 'json' # CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK = { #", "# STATICFILES_DIRS = [ # os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME", "] # 'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware',", "'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE':", "https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True", "Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY =", "Application definition INSTALLED_APPS = [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin',", "'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application", "to associate users to errors (assuming you are using #", "['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': { 'handlers': ['console'], 'level':", "users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ),", "PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR =", "'static'), # ] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing", "DATABASES = { \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\",", "os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\")", "of hosts with a space between each. # For example:", "os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\":", "AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',", "= '/media/' STATIC_ROOT = './static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL =", "'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks',", "PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION", "'INFO'), # }, }, } # STATICFILES_DIRS = [ #", "os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\",", "= 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER = 'json'", "'formatters': { 'console': { 'format': '%(levelname)s %(asctime)s %(module)s: %(message)s' },", "] PWA_APP_DIR = 'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()],", "'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization", "localhost or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER", "}, } # STATICFILES_DIRS = [ # os.path.join(BASE_DIR, 'static'), #", "\"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\":", "CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER =", "'level': 'INFO', 'handlers': ['console'] } # 'celery': { # 'handlers':", "# no email for localhost or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\")", "'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0))", "'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages',", "'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug',", "= 'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If", "= 'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\":", "unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication',", "'32x32', \"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\",", "} LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': {", "'logging.StreamHandler', 'formatter': 'console' }, }, 'loggers': { '': { 'handlers':", "PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE", "PWA_APP_DIR = 'ltr' PWA_APP_LANG = 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], #", "'json' # CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK = { # Use", "with a space between each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1", "\"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), }", "[::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application definition INSTALLED_APPS", "# Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC'", "os.environ.get(\"SQL_PORT\", \"5432\"), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS", "'INFO'), }, 'django.request': { 'level': 'INFO', 'handlers': ['console'] } #", "DjangoIntegration # Build paths inside the project like this: os.path.join(BASE_DIR,", "BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\",", "'json' REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions,", "BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join(", "\"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\", \"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\":", "import os import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration # Build", "] WSGI_APPLICATION = 'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES =", "'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json'] #", "'home' LOGOUT_REDIRECT_URL = 'home' # no email for localhost or", "os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\")", "}, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\": \"any", "['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, }, } #", "\"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\", \"user\"), \"PASSWORD\": os.environ.get(\"SQL_PASSWORD\",", "# CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL", "'console': { 'class': 'logging.StreamHandler', 'formatter': 'console' }, }, 'loggers': {", "= './static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL =", "'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ] # 'celery', MIDDLEWARE", "'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING = { 'version': 1, 'disable_existing_loggers':", "\"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\":", "'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls' TEMPLATES = [", "] ROOT_URLCONF = 'tracks.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates',", "{ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console': { 'format':", "= os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD =", "] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE =", "}, }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'console'", "users to errors (assuming you are using # django.contrib.auth) you", "USE_L10N = True USE_TZ = True # Static files (CSS,", "string of hosts with a space between each. # For", "EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND", "# BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT", "127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application definition", "True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ],", "{ 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\", \"purpose\": \"any maskable\"", "{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE", "CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL =", "'': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': {", "project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH =", "# CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK =", "Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/'", "https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' STATIC_ROOT = './static/'", "= [ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\", \"purpose\":", "= '<EMAIL>' # CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND", "\"any maskable\" } ] PWA_APP_DIR = 'ltr' PWA_APP_LANG = 'en-US'", "'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls' TEMPLATES = [ {", "os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor", "CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND =", "%(module)s: %(message)s' }, }, 'handlers': { 'console': { 'class': 'logging.StreamHandler',", "associate users to errors (assuming you are using # django.contrib.auth)", "For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \")", "'loggers': { '': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), },", "= ['application/json'] # CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER = 'json'", "ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application definition INSTALLED_APPS =", "PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js'))", "= 'UTC' USE_I18N = True USE_L10N = True USE_TZ =", "for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication',", "'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') #", "Django's standard `django.contrib.auth` permissions, # or allow read-only access for", "'widget_tweaks', 'rest_framework', 'pwa', ] # 'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware',", "(assuming you are using # django.contrib.auth) you may enable sending", "os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': { 'level': 'INFO', 'handlers': ['console'] }", "1, 'disable_existing_loggers': False, 'formatters': { 'console': { 'format': '%(levelname)s %(asctime)s", "[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': {", "from sentry_sdk.integrations.django import DjangoIntegration # Build paths inside the project", "os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") # Application definition INSTALLED_APPS = [ 'routes',", "# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, {", "'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER = 'json' #", "= os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG", "files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL", "= [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ]", "True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL", "}, 'loggers': { '': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),", "import DjangoIntegration # Build paths inside the project like this:", "standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated", "# Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL =", "} } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [", "# If you wish to associate users to errors (assuming", "= '/static/' MEDIA_URL = '/media/' STATIC_ROOT = './static/' MEDIA_ROOT =", "'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa',", "= 'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL =", "[], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth',", "{ 'console': { 'class': 'logging.StreamHandler', 'formatter': 'console' }, }, 'loggers':", "\"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\": \"any maskable\" },", "= \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR = '#000000'", "= True USE_TZ = True # Static files (CSS, JavaScript,", "should be a single string of hosts with a space", "), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 } LOGGING = { 'version':", "'pwa', ] # 'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',", "'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',", "or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER =", "'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, }, }", "\"5432\"), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS =", "# Use Django's standard `django.contrib.auth` permissions, # or allow read-only", "maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\":", "= 'en-US' sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you wish to", "between each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS =", "[ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, {", "each. # For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\",", "= [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS':", "# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\"", "\"sizes\": \"512x512\", \"type\": \"image/png\", \"purpose\": \"any maskable\" } ] PWA_APP_DIR", "validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', },", "USE_TZ = True # Static files (CSS, JavaScript, Images) #", "CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER =", "{ \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\": os.environ.get(\"SQL_USER\",", "= './media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL = 'home' # no", "EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' #", "'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], },", "'sizes': '32x32', \"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\":", "{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors':", "# Application definition INSTALLED_APPS = [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig',", "'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': { 'level': 'INFO', 'handlers': ['console']", "], }, }, ] WSGI_APPLICATION = 'tracks.wsgi.application' # Database #", "os import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration # Build paths", "}, }, } # STATICFILES_DIRS = [ # os.path.join(BASE_DIR, 'static'),", "os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js',", "'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ] #", "'home' # no email for localhost or staging EMAIL_USE_TLS =", "'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': { 'level': 'INFO',", "'formatter': 'console' }, }, 'loggers': { '': { 'handlers': ['console'],", "sentry_sdk.init( dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you wish to associate users", "'handlers': ['console'] } # 'celery': { # 'handlers': ['console'], #", "'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\": {", "True USE_L10N = True USE_TZ = True # Static files", "https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\":", "\"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")), \"USER\":", "PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE = '/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL", "'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR", "definition INSTALLED_APPS = [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth',", "'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', 'PAGE_SIZE': 10 }", "= [ # os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME = 'ChalkTracks'", "'console' }, }, 'loggers': { '': { 'handlers': ['console'], 'level':", "'dashboard.apps.DashboardConfig', 'api.apps.ApiConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework',", "\"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators", "}, }, 'loggers': { '': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL',", "CELERY_TASK_SERIALIZER = 'json' # CELERY_RESULT_SERIALIZER = 'json' REST_FRAMEWORK = {", "PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION = \"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR =", "= '#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY = 'standalone' PWA_APP_SCOPE =", "'disable_existing_loggers': False, 'formatters': { 'console': { 'format': '%(levelname)s %(asctime)s %(module)s:", "'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us'", "# 'handlers': ['console'], # 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, },", "wish to associate users to errors (assuming you are using", "\"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\":", "...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) PWA_SERVICE_WORKER_PATH = os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')", "'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), # }, }, } # STATICFILES_DIRS =", "'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ]", "os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG =", "}, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\": \"any", "'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls' TEMPLATES =", "= os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT =", "{ 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, },", "'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ]", "{ '': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django':", "= { # Use Django's standard `django.contrib.auth` permissions, # or", "'/' PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS = [", "\"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\",", "`django.contrib.auth` permissions, # or allow read-only access for unauthenticated users.", "= int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should", "MEDIA_URL = '/media/' STATIC_ROOT = './static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL", "}, 'django.request': { 'level': 'INFO', 'handlers': ['console'] } # 'celery':", "you wish to associate users to errors (assuming you are", "'format': '%(levelname)s %(asctime)s %(module)s: %(message)s' }, }, 'handlers': { 'console':", "'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\": \"image/png\", \"purpose\": \"any maskable\" },", "= os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL =", "'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a", "print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY =", "'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF =", "'UTC' USE_I18N = True USE_L10N = True USE_TZ = True", "errors (assuming you are using # django.contrib.auth) you may enable", "\"192x192\", \"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\",", "EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\") DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY # CELERY_BROKER_URL", "[ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION", "LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N", "'django.request': { 'level': 'INFO', 'handlers': ['console'] } # 'celery': {", "'./static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL = 'home'", "WSGI_APPLICATION = 'tracks.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = {", "= [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', },", "\"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\": \"512x512\", \"type\": \"image/png\",", "10 } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters':", "{ \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"), \"NAME\": os.environ.get(\"SQL_DATABASE\", os.path.join(BASE_DIR, \"db.sqlite3\")),", "STATIC_ROOT = './static/' MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL", "= 'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS = [ { 'src':", "\"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\": \"/static/routes/favicon_io/android-chrome-512x512.png\", \"sizes\":", "{ \"src\": \"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\": \"any maskable\"", "{ 'class': 'logging.StreamHandler', 'formatter': 'console' }, }, 'loggers': { '':", "= { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console': {", "\") # Application definition INSTALLED_APPS = [ 'routes', 'accounts', 'dashboard.apps.DashboardConfig',", "'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'tracks.wsgi.application' #", "%(message)s' }, }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter':", "int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>') # 'DJANGO_ALLOWED_HOSTS' should be", "'INFO', 'handlers': ['console'] } # 'celery': { # 'handlers': ['console'],", "Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\",", "'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY = os.environ.get(\"SECRET_KEY\", '<KEY>')", "'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls'", "'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'widget_tweaks', 'rest_framework', 'pwa', ] # 'celery',", "you are using # django.contrib.auth) you may enable sending PII", "'serviceworker.js') print(os.path.join( BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')) DEBUG = int(os.environ.get(\"DEBUG\", default=0)) SECRET_KEY", "'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION =", "MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware',", "Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N", "for localhost or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST = os.environ.get(\"EMAIL_HOST\")", "'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls' TEMPLATES = [ { 'BACKEND':", "DEFAULT_FROM_EMAIL = '<EMAIL>' # CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0' #", "PWA_APP_START_URL = '/' PWA_APP_ICONS = [ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes':", "# https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { \"default\": { \"ENGINE\": os.environ.get(\"SQL_ENGINE\", \"django.db.backends.sqlite3\"),", "{ 'console': { 'format': '%(levelname)s %(asctime)s %(module)s: %(message)s' }, },", "= 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N =", "# os.path.join(BASE_DIR, 'static'), # ] PWA_APP_NAME = 'ChalkTracks' PWA_APP_DESCRIPTION =", "= 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json']", "# CELERY_RESULT_BACKEND = 'redis://localhost:6379/' # CELERY_ACCEPT_CONTENT = ['application/json'] # CELERY_TASK_SERIALIZER", "'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True", "'/static/' MEDIA_URL = '/media/' STATIC_ROOT = './static/' MEDIA_ROOT = './media/'", "email for localhost or staging EMAIL_USE_TLS = os.environ.get(\"EMAIL_USE_TLS\") EMAIL_HOST =", "or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'", "'version': 1, 'disable_existing_loggers': False, 'formatters': { 'console': { 'format': '%(levelname)s", "'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0' # CELERY_RESULT_BACKEND = 'redis://localhost:6379/' #", "'/' PWA_APP_ICONS = [ { 'src': '/static/routes/favicon_io/favicon-32x32.png', 'sizes': '32x32', \"type\":", "be a single string of hosts with a space between", "permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES':", "\"purpose\": \"any maskable\" } ] PWA_APP_DIR = 'ltr' PWA_APP_LANG =", "# https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N =", "# django.contrib.auth) you may enable sending PII data. send_default_pii=True )", "'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS = [ { 'src': '/static/routes/favicon_io/favicon-32x32.png',", "EMAIL_HOST = os.environ.get(\"EMAIL_HOST\") EMAIL_HOST_USER = os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT", "CELERY # CELERY_BROKER_URL = 'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0' #", "'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware',", "'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [", "\"/static/routes/favicon_io/android-chrome-192x192.png\", \"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\": \"any maskable\" }, {", "to errors (assuming you are using # django.contrib.auth) you may", "'tracks.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS':", "False, 'formatters': { 'console': { 'format': '%(levelname)s %(asctime)s %(module)s: %(message)s'", "Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR", "Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' MEDIA_URL = '/media/' STATIC_ROOT", "REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, #", "example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]' ALLOWED_HOSTS = os.environ.get(\"DJANGO_ALLOWED_HOSTS\", 'localhost').split(\" \") #", "%(asctime)s %(module)s: %(message)s' }, }, 'handlers': { 'console': { 'class':", "os.environ.get(\"EMAIL_HOST_USER\") EMAIL_HOST_PASSWORD = os.environ.get(\"EMAIL_HOST_PASSWORD\") EMAIL_PORT = os.environ.get(\"EMAIL_PORT\") EMAIL_BACKEND = os.environ.get(\"EMAIL_BACKEND\")", "True USE_TZ = True # Static files (CSS, JavaScript, Images)", "[ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',", "'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', ), 'DEFAULT_PAGINATION_CLASS':", "\"sizes\": \"192x192\", \"type\": \"image/png\", \"purpose\": \"any maskable\" }, { \"src\":", "MEDIA_ROOT = './media/' LOGIN_REDIRECT_URL = 'home' LOGOUT_REDIRECT_URL = 'home' #", "\"Indoor Climbing Tracker\" PWA_APP_THEME_COLOR = '#000000' PWA_APP_BACKGROUND_COLOR = '#000000' PWA_APP_DISPLAY", "# 'celery', MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',", "{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME':", "'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'tracks.urls' TEMPLATES", "'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE =", "dsn=\"https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812\", integrations=[DjangoIntegration()], # If you wish to associate users to", "['console'] } # 'celery': { # 'handlers': ['console'], # 'level':", "\"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } } # Password validation #", "TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True,", "'django': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django.request': {", "PWA_APP_ORIENTATION = 'portrait' PWA_APP_START_URL = '/' PWA_APP_ICONS = [ {", "\"password\"), \"HOST\": os.environ.get(\"SQL_HOST\", \"localhost\"), \"PORT\": os.environ.get(\"SQL_PORT\", \"5432\"), } } #", "# Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME':", "}, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/", "= True USE_L10N = True USE_TZ = True # Static", "os.getenv('DJANGO_LOG_LEVEL', 'INFO'), }, 'django': { 'handlers': ['console'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),", "= 'redis://redis:6379/0' # CELERY_RESULT_BACKEND = 'redis://redis:6379/0' # BROKER_URL = 'redis://localhost:6379/0'" ]
[ "Lighting Systems ~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes with three options", "pv from pyvista import examples # default: light kit plotter", "plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Again we can check what", "# especially clear when exploring the figures interactively. plotter =", "lighting from scratch by disabling any lighting # on plotter", "color='white') plotter.show() ############################################################################### # Again we can check what kind", "no lighting. With meshes that don't have depth information encoded", "closer to the side. This becomes # especially clear when", "can check what type of lights this lighting comprises: \"\"\"", "a different character to the # figure, in this case", "import pyvista as pv from pyvista import examples # default:", "Light kit ========= The default ``lighting='light kit'`` option recreates a", "plotter = pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Three-lights illumination", "light kit plotter = pv.Plotter() light_types = [light.light_type for light", "The :class:`pyvista.Plotter` class comes with three options for the default", "exploring the figures interactively. plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white')", "by disabling any lighting # on plotter initialization. Adding a", "to objects having larger regions in # shadow: plotter =", "The default ``lighting='light kit'`` option recreates a lighting setup that", "Again we can check what kind of lights this setting", "regions in # shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light", "especially clear when exploring the figures interactively. plotter = pv.Plotter(lighting='three", "# ========================= # # Switching to three-lights illumination gives a", "# Switching to three-lights illumination gives a different character to", "3 import pyvista as pv from pyvista import examples #", "plotter.show() ############################################################################### # Again we can check what kind of", "three options for the default lighting system: * a light", "class comes with three options for the default lighting system:", "* a light kit consisting of a headlight and four", "becomes paramount for accurate visualization. Light kit ========= The default", "# # We can introduce our own lighting from scratch", "scene light to a scene will # often result in", "in plotter.renderer.lights] # Remove from plotters so output is not", "lighting. With meshes that don't have depth information encoded in", "when exploring the figures interactively. plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh,", "docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add a white terrain to", "recreates a lighting setup that corresponds to a ``vtk.vtkLightKit``. We", "information encoded in their color the importance of an appropriate", "initialization. Adding a single scene light to a scene will", "in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom lighting # ===============", "appropriate lighting setup becomes paramount for accurate visualization. Light kit", "gives a different character to the # figure, in this", "* no lighting. With meshes that don't have depth information", "kit'`` option recreates a lighting setup that corresponds to a", "to the scene: mesh = examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh,", "# Remove from plotters so output is not produced in", "we can check what kind of lights this setting uses:", "this setting uses: plotter = pv.Plotter(lighting='three lights') light_types = [light.light_type", "light to a scene will # often result in ominous", "from plotters so output is not produced in docs pv.plotting._ALL_PLOTTERS.clear()", "larger regions in # shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white')", "kind of lights this setting uses: plotter = pv.Plotter(lighting='three lights')", "``lighting='light kit'`` option recreates a lighting setup that corresponds to", "pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Three-lights illumination # =========================", "four camera lights, * an illumination system containing three lights", "setup becomes paramount for accurate visualization. Light kit ========= The", "a single scene light to a scene will # often", "# shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light = pv.Light()", "kit ========= The default ``lighting='light kit'`` option recreates a lighting", "setup that corresponds to a ``vtk.vtkLightKit``. We can check what", "contrast when viewing the mountain from # the top, but", "often result in ominous visuals due to objects having larger", "top, but having more contrast with views closer to the", "on plotter initialization. Adding a single scene light to a", "= pv.Plotter(lighting='three lights') light_types = [light.light_type for light in plotter.renderer.lights]", "to the # figure, in this case showing less contrast", "# the top, but having more contrast with views closer", "accurate visualization. Light kit ========= The default ``lighting='light kit'`` option", "illumination gives a different character to the # figure, in", "# We can introduce our own lighting from scratch by", "figure, in this case showing less contrast when viewing the", "containing three lights arranged around the camera, * no lighting.", "the # figure, in this case showing less contrast when", "of an appropriate lighting setup becomes paramount for accurate visualization.", "# Three-lights illumination # ========================= # # Switching to three-lights", "lighting # =============== # # We can introduce our own", "Plotter Lighting Systems ~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes with three", "import examples # default: light kit plotter = pv.Plotter() light_types", "this case showing less contrast when viewing the mountain from", "to three-lights illumination gives a different character to the #", "examples # default: light kit plotter = pv.Plotter() light_types =", "the default lighting system: * a light kit consisting of", "headlight and four camera lights, * an illumination system containing", "pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add a white terrain to the", "With meshes that don't have depth information encoded in their", "plotters so output is not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types", "the side. This becomes # especially clear when exploring the", "as pv from pyvista import examples # default: light kit", "plotter initialization. Adding a single scene light to a scene", "kit plotter = pv.Plotter() light_types = [light.light_type for light in", "having larger regions in # shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh,", "is not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom", "in # shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light =", "light in plotter.renderer.lights] # Remove from plotters so output is", "pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light = pv.Light() light.set_direction_angle(30, 0) plotter.add_light(light) plotter.show()", "check what type of lights this lighting comprises: \"\"\" #", "of lights this lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number = 3", "becomes # especially clear when exploring the figures interactively. plotter", "with views closer to the side. This becomes # especially", "meshes that don't have depth information encoded in their color", "to a scene will # often result in ominous visuals", "``vtk.vtkLightKit``. We can check what type of lights this lighting", "Adding a single scene light to a scene will #", "pv.Plotter(lighting='three lights') light_types = [light.light_type for light in plotter.renderer.lights] #", "so output is not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ###############################################################################", "a ``vtk.vtkLightKit``. We can check what type of lights this", "scene will # often result in ominous visuals due to", "light kit consisting of a headlight and four camera lights,", "interactively. plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### #", "scratch by disabling any lighting # on plotter initialization. Adding", "three-lights illumination gives a different character to the # figure,", "consisting of a headlight and four camera lights, * an", "plotter = pv.Plotter() light_types = [light.light_type for light in plotter.renderer.lights]", "our own lighting from scratch by disabling any lighting #", "~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes with three options for the", "kit consisting of a headlight and four camera lights, *", "shadow: plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light = pv.Light() light.set_direction_angle(30,", "the scene: mesh = examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh, color='white')", "the top, but having more contrast with views closer to", "mesh = examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ###############################################################################", "pyvista as pv from pyvista import examples # default: light", "for accurate visualization. Light kit ========= The default ``lighting='light kit'``", "comprises: \"\"\" # sphinx_gallery_thumbnail_number = 3 import pyvista as pv", "= examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### #", "side. This becomes # especially clear when exploring the figures", "pyvista import examples # default: light kit plotter = pv.Plotter()", "========================= # # Switching to three-lights illumination gives a different", "this lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number = 3 import pyvista", "option recreates a lighting setup that corresponds to a ``vtk.vtkLightKit``.", "light_types ############################################################################### # Custom lighting # =============== # # We", "importance of an appropriate lighting setup becomes paramount for accurate", "docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom lighting # =============== #", "lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number = 3 import pyvista as", ":class:`pyvista.Plotter` class comes with three options for the default lighting", "* an illumination system containing three lights arranged around the", "camera, * no lighting. With meshes that don't have depth", "that don't have depth information encoded in their color the", "encoded in their color the importance of an appropriate lighting", "what kind of lights this setting uses: plotter = pv.Plotter(lighting='three", "lights this setting uses: plotter = pv.Plotter(lighting='three lights') light_types =", "plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Again", "pv.Plotter() light_types = [light.light_type for light in plotter.renderer.lights] # Remove", "lights this lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number = 3 import", "lighting setup that corresponds to a ``vtk.vtkLightKit``. We can check", "color='white') plotter.show() ############################################################################### # Three-lights illumination # ========================= # #", "lights') plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Again we can check", "system containing three lights arranged around the camera, * no", "in their color the importance of an appropriate lighting setup", "plotter = pv.Plotter(lighting='three lights') light_types = [light.light_type for light in", "# figure, in this case showing less contrast when viewing", "have depth information encoded in their color the importance of", "that corresponds to a ``vtk.vtkLightKit``. We can check what type", "for light in plotter.renderer.lights] # Remove from plotters so output", "plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Three-lights illumination # ========================= #", "Systems ~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes with three options for", "objects having larger regions in # shadow: plotter = pv.Plotter(lighting='none')", "not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add a", "clear when exploring the figures interactively. plotter = pv.Plotter(lighting='three lights')", "############################################################################### # Custom lighting # =============== # # We can", "due to objects having larger regions in # shadow: plotter", "from # the top, but having more contrast with views", "= pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light = pv.Light() light.set_direction_angle(30, 0) plotter.add_light(light)", "terrain to the scene: mesh = examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter()", "default lighting system: * a light kit consisting of a", "own lighting from scratch by disabling any lighting # on", "scene: mesh = examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show()", "not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom lighting", "visuals due to objects having larger regions in # shadow:", "in this case showing less contrast when viewing the mountain", "setting uses: plotter = pv.Plotter(lighting='three lights') light_types = [light.light_type for", "= pv.Plotter() light_types = [light.light_type for light in plotter.renderer.lights] #", "the mountain from # the top, but having more contrast", "different character to the # figure, in this case showing", "less contrast when viewing the mountain from # the top,", "disabling any lighting # on plotter initialization. Adding a single", "the importance of an appropriate lighting setup becomes paramount for", "# sphinx_gallery_thumbnail_number = 3 import pyvista as pv from pyvista", "lighting setup becomes paramount for accurate visualization. Light kit =========", "output is not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### #", "We can check what type of lights this lighting comprises:", "and four camera lights, * an illumination system containing three", "is not produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add", "character to the # figure, in this case showing less", "pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Again we can", "plotter.show() ############################################################################### # Three-lights illumination # ========================= # # Switching", "light_types ############################################################################### # Add a white terrain to the scene:", "############################################################################### # Three-lights illumination # ========================= # # Switching to", "Three-lights illumination # ========================= # # Switching to three-lights illumination", "a headlight and four camera lights, * an illumination system", "figures interactively. plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show() ###############################################################################", "from pyvista import examples # default: light kit plotter =", "sphinx_gallery_thumbnail_number = 3 import pyvista as pv from pyvista import", "of a headlight and four camera lights, * an illumination", "Custom lighting # =============== # # We can introduce our", "examples.download_st_helens().warp_by_scalar() plotter = pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Three-lights", "more contrast with views closer to the side. This becomes", "ominous visuals due to objects having larger regions in #", "check what kind of lights this setting uses: plotter =", "# Again we can check what kind of lights this", "when viewing the mountain from # the top, but having", "case showing less contrast when viewing the mountain from #", "produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add a white", "views closer to the side. This becomes # especially clear", "don't have depth information encoded in their color the importance", "illumination # ========================= # # Switching to three-lights illumination gives", "# on plotter initialization. Adding a single scene light to", "produced in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom lighting #", "contrast with views closer to the side. This becomes #", "lights arranged around the camera, * no lighting. With meshes", "showing less contrast when viewing the mountain from # the", "result in ominous visuals due to objects having larger regions", "camera lights, * an illumination system containing three lights arranged", "plotter = pv.Plotter(lighting='none') plotter.add_mesh(mesh, color='white') light = pv.Light() light.set_direction_angle(30, 0)", "light_types = [light.light_type for light in plotter.renderer.lights] # Remove from", "introduce our own lighting from scratch by disabling any lighting", "We can introduce our own lighting from scratch by disabling", "with three options for the default lighting system: * a", "to a ``vtk.vtkLightKit``. We can check what type of lights", "= pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Again we", "pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Custom lighting # =============== # #", "can introduce our own lighting from scratch by disabling any", "comes with three options for the default lighting system: *", "having more contrast with views closer to the side. This", "Switching to three-lights illumination gives a different character to the", "lights') light_types = [light.light_type for light in plotter.renderer.lights] # Remove", "# # Switching to three-lights illumination gives a different character", "viewing the mountain from # the top, but having more", "three lights arranged around the camera, * no lighting. With", "This becomes # especially clear when exploring the figures interactively.", "[light.light_type for light in plotter.renderer.lights] # Remove from plotters so", "# =============== # # We can introduce our own lighting", "single scene light to a scene will # often result", "in ominous visuals due to objects having larger regions in", "Add a white terrain to the scene: mesh = examples.download_st_helens().warp_by_scalar()", "a lighting setup that corresponds to a ``vtk.vtkLightKit``. We can", "but having more contrast with views closer to the side.", "# Custom lighting # =============== # # We can introduce", "from scratch by disabling any lighting # on plotter initialization.", "default: light kit plotter = pv.Plotter() light_types = [light.light_type for", "<reponame>akeshavan/pyvista \"\"\" Plotter Lighting Systems ~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes", "# default: light kit plotter = pv.Plotter() light_types = [light.light_type", "a white terrain to the scene: mesh = examples.download_st_helens().warp_by_scalar() plotter", "can check what kind of lights this setting uses: plotter", "# often result in ominous visuals due to objects having", "the camera, * no lighting. With meshes that don't have", "visualization. Light kit ========= The default ``lighting='light kit'`` option recreates", "will # often result in ominous visuals due to objects", "white terrain to the scene: mesh = examples.download_st_helens().warp_by_scalar() plotter =", "############################################################################### # Again we can check what kind of lights", "illumination system containing three lights arranged around the camera, *", "plotter.renderer.lights] # Remove from plotters so output is not produced", "arranged around the camera, * no lighting. With meshes that", "any lighting # on plotter initialization. Adding a single scene", "############################################################################### # Add a white terrain to the scene: mesh", "system: * a light kit consisting of a headlight and", "an illumination system containing three lights arranged around the camera,", "paramount for accurate visualization. Light kit ========= The default ``lighting='light", "color the importance of an appropriate lighting setup becomes paramount", "\"\"\" # sphinx_gallery_thumbnail_number = 3 import pyvista as pv from", "options for the default lighting system: * a light kit", "# Add a white terrain to the scene: mesh =", "mountain from # the top, but having more contrast with", "uses: plotter = pv.Plotter(lighting='three lights') light_types = [light.light_type for light", "lighting # on plotter initialization. Adding a single scene light", "a scene will # often result in ominous visuals due", "= [light.light_type for light in plotter.renderer.lights] # Remove from plotters", "corresponds to a ``vtk.vtkLightKit``. We can check what type of", "type of lights this lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number =", "= 3 import pyvista as pv from pyvista import examples", "their color the importance of an appropriate lighting setup becomes", "a light kit consisting of a headlight and four camera", "========= The default ``lighting='light kit'`` option recreates a lighting setup", "of lights this setting uses: plotter = pv.Plotter(lighting='three lights') light_types", "=============== # # We can introduce our own lighting from", "in docs pv.plotting._ALL_PLOTTERS.clear() light_types ############################################################################### # Add a white terrain", "lights, * an illumination system containing three lights arranged around", "around the camera, * no lighting. With meshes that don't", "lighting system: * a light kit consisting of a headlight", "what type of lights this lighting comprises: \"\"\" # sphinx_gallery_thumbnail_number", "for the default lighting system: * a light kit consisting", "Remove from plotters so output is not produced in docs", "an appropriate lighting setup becomes paramount for accurate visualization. Light", "= pv.Plotter() plotter.add_mesh(mesh, color='white') plotter.show() ############################################################################### # Three-lights illumination #", "to the side. This becomes # especially clear when exploring", "the figures interactively. plotter = pv.Plotter(lighting='three lights') plotter.add_mesh(mesh, color='white') plotter.show()", "depth information encoded in their color the importance of an", "\"\"\" Plotter Lighting Systems ~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`pyvista.Plotter` class comes with", "default ``lighting='light kit'`` option recreates a lighting setup that corresponds" ]
[ "FileSource('src.h') swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert swim(Function.Behaviour()(src))", "Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert swim(Function.Behaviour()(src)) > 0 swim.write('example.i')", "= Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert swim(Function.Behaviour()(src)) > 0", "from swimport.all import * src = FileSource('src.h') swim = Swim('example')", "* src = FileSource('src.h') swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True))", "swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert swim(Function.Behaviour()(src)) > 0 swim.write('example.i') print('ok!')", "swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert swim(Function.Behaviour()(src)) >", "swimport.all import * src = FileSource('src.h') swim = Swim('example') swim(pools.c_string)", "src = FileSource('src.h') swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src))", "import * src = FileSource('src.h') swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\",", "= FileSource('src.h') swim = Swim('example') swim(pools.c_string) swim(pools.numpy_arrays(r\"../resources\", allow_char_arrays=True)) swim(pools.include(src)) assert", "<reponame>talos-gis/swimport<gh_stars>1-10 from swimport.all import * src = FileSource('src.h') swim =" ]
[ "for i in range(N_stars): a = np.linspace(0, 1, x.shape[0]) *", "np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z =", "**scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst", "y[:, i] += yo z[:, i] += zo sprite =", "+ zr**2) for i in range(N_stars): a = np.linspace(0, 1,", "return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100)", "sprite.material.depthWrite = False sprite.material.alphaTest = 0.1 return sprite, line def", "z = rng.normal(size=(3, N)) r = np.sqrt(x**2 + y**2 +", "N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1,", "mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent = True mw.material.depthWrite", "= np.sqrt(x**2 + y**2 + z**2)/(radius + thickness * radius", "ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url", "orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create a", "yo = r[i] * np.cos(a) zo = a * 0", "N)) r = np.sqrt(x**2 + y**2 + z**2)/(radius + thickness", "False sprite.material.alphaTest = 0.1 return sprite, line def radial_sprite(shape, color):", "fake galaxy around the points orbit_x/y/z with N_stars around it\"\"\"", "import ipyvolume as ipv from .datasets import UrlCached def _randomSO3():", "import scipy.interpolate x = np.linspace(0, 1, len(orbit_x)) x_smooth = np.linspace(0,", "1, shape[0]) y = np.linspace(-1, 1, shape[1]) x, y =", "mw.material.transparent = True mw.material.depthWrite = False mw.material.alphaTest = 0.1 ipv.xyzlim(size)", "= orbit_y orbit_z_line = orbit_z line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line,", "sprite, line def radial_sprite(shape, color): color = np.array(color) ara =", "N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}):", "np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3,", "= np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]])", "orbit_x orbit_y_line = orbit_y orbit_z_line = orbit_z line = ipv.plot(orbit_x_line,", "orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x,", "random rotatation matrix, algo by <NAME>\"\"\" u1 = np.random.random() u2", "texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst =", "[0, 0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H =", "= np.linspace(0, 1, x.shape[0]) * 2 * np.pi * N_star_orbits", "2 * np.pi * N_star_orbits xo = r[i] * np.sin(a)", "kind = 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line =", "v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return -", "= np.array(color) ara = np.zeros(shape[:2] + (4,), dtype=np.uint8) x =", "= 0.1 return sprite, line def radial_sprite(shape, color): color =", "with N_stars around it\"\"\" if orbit_line_interpolate > 1: import scipy.interpolate", "y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync():", "points orbit_x/y/z with N_stars around it\"\"\" if orbit_line_interpolate > 1:", "np.random.RandomState(seed) x, y, z = rng.normal(size=(3, N)) r = np.sqrt(x**2", "dtype=np.uint8) x = np.linspace(-1, 1, shape[0]) y = np.linspace(-1, 1,", "ymw = np.meshgrid(xmw, ymw) zmw = xmw * 0 +", "sprite.material.alphaTest = 0.1 return sprite, line def radial_sprite(shape, color): color", "0.5 radius = np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] =", "np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr,", "= scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else:", "= PIL.Image.open(milkyway_image.fetch()) rescale = 40 t = np.linspace(0, 1, 100)", "mw.material.blendEquation = 'AddEquation' mw.material.transparent = True mw.material.depthWrite = False mw.material.alphaTest", "yo, zo]) #print(x.shape, xo.shape) x[:, i] += xo y[:, i]", "def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]): import ipyvolume", "in range(N_stars): a = np.linspace(0, 1, x.shape[0]) * 2 *", "= orbit_z line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x =", "r[i] * np.sin(a) yo = r[i] * np.cos(a) zo =", "color * amplitude.reshape(shape + (1,)) im = PIL.Image.fromarray(ara, 'RGBA') return", "= np.random.random() u3 = np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0],", "N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create a fake galaxy", "x = np.linspace(0, 1, len(orbit_x)) x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate)", "orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200],", "t = np.linspace(0, 1, 100) xmw = np.linspace(0, 1, 10)", "yr**2 + zr**2) for i in range(N_stars): a = np.linspace(0,", "= np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape, xo.shape) x[:, i] +=", "10) ymw = np.linspace(0, 1, 10) xmw, ymw = np.meshgrid(xmw,", "orbit_y_line, orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y =", "= np.sqrt(xr**2 + yr**2 + zr**2) for i in range(N_stars):", "r[i] * np.cos(a) zo = a * 0 xo, yo,", "i] += zo sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64),", "= np.linspace(-1, 1, shape[0]) y = np.linspace(-1, 1, shape[1]) x,", "x, y, z = rng.normal(size=(3, N)) r = np.sqrt(x**2 +", "orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line = orbit_x orbit_y_line", "as ipv rng = np.random.RandomState(seed) x, y, z = rng.normal(size=(3,", "= np.random.RandomState(seed) x, y, z = rng.normal(size=(3, N)) r =", "PIL.Image.fromarray(ara, 'RGBA') return im def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255,", "N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z,", "= np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr, zr = np.random.normal(0, scale=sigma_r,", "= rng.normal(size=(3, N)) r = np.sqrt(x**2 + y**2 + z**2)/(radius", "240, 240]): import ipyvolume as ipv rng = np.random.RandomState(seed) x,", "ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending", "it\"\"\" if orbit_line_interpolate > 1: import scipy.interpolate x = np.linspace(0,", "mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending", "def radial_sprite(shape, color): color = np.array(color) ara = np.zeros(shape[:2] +", "* 2 * np.pi * N_star_orbits xo = r[i] *", "orbit_y orbit_z_line = orbit_z line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible)", "yo, zo = np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape, xo.shape) x[:,", "H = np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y,", "by <NAME>\"\"\" u1 = np.random.random() u2 = np.random.random() u3 =", "= np.linspace(0, 1, 10) ymw = np.linspace(0, 1, 10) xmw,", "1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth)", "+ (1,)) im = PIL.Image.fromarray(ara, 'RGBA') return im def stars(N=1000,", "* np.random.random(N)) x /= r y /= r z /=", "= scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line", "texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image", "PIL.Image import pythreejs import ipyvolume as ipv from .datasets import", "np.linspace(0, 1, len(orbit_x)) x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind =", "zo]) #print(x.shape, xo.shape) x[:, i] += xo y[:, i] +=", "= UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale =", "kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z,", "zr**2) for i in range(N_stars): a = np.linspace(0, 1, x.shape[0])", "range(N_stars): a = np.linspace(0, 1, x.shape[0]) * 2 * np.pi", "scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line =", "return sprite, line def radial_sprite(shape, color): color = np.array(color) ara", "sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent", "plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale = 40 t =", "def plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale = 40 t", "r = np.sqrt(xr**2 + yr**2 + zr**2) for i in", "np.pi * N_star_orbits xo = r[i] * np.sin(a) yo =", "np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude * 255)", "= np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude * 255) ara[...,:3] =", "= ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs)", "64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image =", "np.sin(a) yo = r[i] * np.cos(a) zo = a *", "np.sqrt(xr**2 + yr**2 + zr**2) for i in range(N_stars): a", "ara[...,:3] = color * amplitude.reshape(shape + (1,)) im = PIL.Image.fromarray(ara,", "color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url)", "np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x,", "np.linspace(0, 1, x.shape[0]) * 2 * np.pi * N_star_orbits xo", "= np.linspace(-1, 1, shape[1]) x, y = np.meshgrid(x, y) s", "import pythreejs import ipyvolume as ipv from .datasets import UrlCached", "N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr, zr =", "z**2)/(radius + thickness * radius * np.random.random(N)) x /= r", "= np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return - np.dot(H,", "* radius * np.random.random(N)) x /= r y /= r", "pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent = True sprite.material.depthWrite = False", "'AddEquation' mw.material.transparent = True mw.material.depthWrite = False mw.material.alphaTest = 0.1", "= np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x,", "sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create", "N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr, zr", "rescale = 40 t = np.linspace(0, 1, 100) xmw =", "_randomSO3(): \"\"\"return random rotatation matrix, algo by <NAME>\"\"\" u1 =", "* 0 xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo])", "= 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x,", "radius * np.random.random(N)) x /= r y /= r z", "r z /= r return ipv.scatter(x, y, z, texture=radial_sprite((64, 64),", "= a * 0 xo, yo, zo = np.dot(_randomSO3(), [xo,", "i in range(N_stars): a = np.linspace(0, 1, x.shape[0]) * 2", "ymw = np.linspace(0, 1, 10) xmw, ymw = np.meshgrid(xmw, ymw)", "(1,)) im = PIL.Image.fromarray(ara, 'RGBA') return im def stars(N=1000, radius=100000,", "= r[i] * np.sin(a) yo = r[i] * np.cos(a) zo", "pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent = True mw.material.depthWrite = False", "np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude * 255) ara[...,:3] = color", "shape[0]) y = np.linspace(-1, 1, shape[1]) x, y = np.meshgrid(x,", "[xo, yo, zo]) #print(x.shape, xo.shape) x[:, i] += xo y[:,", "size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale = 40 t = np.linspace(0,", "0.01 mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw,", "y**2 + z**2)/(radius + thickness * radius * np.random.random(N)) x", "rotatation matrix, algo by <NAME>\"\"\" u1 = np.random.random() u2 =", "if orbit_line_interpolate > 1: import scipy.interpolate x = np.linspace(0, 1,", "np.linspace(0, 1, 100) xmw = np.linspace(0, 1, 10) ymw =", "return - np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1,", "np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]]) v", "ipv from .datasets import UrlCached def _randomSO3(): \"\"\"return random rotatation", "np.cos(a) zo = a * 0 xo, yo, zo =", "= ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending =", "ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs) with", "= pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent = True sprite.material.depthWrite =", "as np import PIL.Image import pythreejs import ipyvolume as ipv", "xo = r[i] * np.sin(a) yo = r[i] * np.cos(a)", "np.sqrt(x**2 + y**2 + z**2)/(radius + thickness * radius *", "scatter_kwargs={}): \"\"\"Create a fake galaxy around the points orbit_x/y/z with", "= True mw.material.depthWrite = False mw.material.alphaTest = 0.1 ipv.xyzlim(size) return", "color=[255, 220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create a fake galaxy around", "200], size_star=1, scatter_kwargs={}): \"\"\"Create a fake galaxy around the points", "= np.meshgrid(x, y) s = 0.5 radius = np.sqrt(x**2+y**2) amplitude", "= 0.5 radius = np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3]", "np.meshgrid(xmw, ymw) zmw = xmw * 0 + 0.01 mw", "/= r y /= r z /= r return ipv.scatter(x,", "0.1 return sprite, line def radial_sprite(shape, color): color = np.array(color)", "1, shape[1]) x, y = np.meshgrid(x, y) s = 0.5", "size=(3, N_stars))# + r = np.sqrt(xr**2 + yr**2 + zr**2)", "x /= r y /= r z /= r return", "texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending =", "mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent", "xmw * 0 + 0.01 mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale,", "u3 = np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1),", "xmw = np.linspace(0, 1, 10) ymw = np.linspace(0, 1, 10)", "1, 10) ymw = np.linspace(0, 1, 10) xmw, ymw =", "import ipyvolume as ipv rng = np.random.RandomState(seed) x, y, z", "1, x.shape[0]) * 2 * np.pi * N_star_orbits xo =", "ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y", "orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y,", "mw_image = PIL.Image.open(milkyway_image.fetch()) rescale = 40 t = np.linspace(0, 1,", "= True sprite.material.depthWrite = False sprite.material.alphaTest = 0.1 return sprite,", "= 'AddEquation' sprite.material.transparent = True sprite.material.depthWrite = False sprite.material.alphaTest =", "np.linspace(-1, 1, shape[0]) y = np.linspace(-1, 1, shape[1]) x, y", "a fake galaxy around the points orbit_x/y/z with N_stars around", "= np.meshgrid(xmw, ymw) zmw = xmw * 0 + 0.01", "= np.linspace(0, 1, 100) xmw = np.linspace(0, 1, 10) ymw", "(4,), dtype=np.uint8) x = np.linspace(-1, 1, shape[0]) y = np.linspace(-1,", "R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10,", "= orbit_x orbit_y_line = orbit_y orbit_z_line = orbit_z line =", "1: import scipy.interpolate x = np.linspace(0, 1, len(orbit_x)) x_smooth =", "orbit_z line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x,", "(amplitude * 255) ara[...,:3] = color * amplitude.reshape(shape + (1,))", "marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def", "amplitude.reshape(shape + (1,)) im = PIL.Image.fromarray(ara, 'RGBA') return im def", "scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line", "np.linspace(0, 1, 10) ymw = np.linspace(0, 1, 10) xmw, ymw", "size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100):", "kind)(x_smooth) else: orbit_x_line = orbit_x orbit_y_line = orbit_y orbit_z_line =", "* 255) ara[...,:3] = color * amplitude.reshape(shape + (1,)) im", "pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent = True", "y) s = 0.5 radius = np.sqrt(x**2+y**2) amplitude = np.maximum(0,", "import PIL.Image import pythreejs import ipyvolume as ipv from .datasets", "sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation", "'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y,", "= np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0],", "r = np.sqrt(x**2 + y**2 + z**2)/(radius + thickness *", "orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1,", "#print(x.shape, xo.shape) x[:, i] += xo y[:, i] += yo", "+= zo sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color),", "* N_star_orbits xo = r[i] * np.sin(a) yo = r[i]", "= np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z,", "import UrlCached def _randomSO3(): \"\"\"return random rotatation matrix, algo by", "yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# + r =", "<gh_stars>1-10 import numpy as np import PIL.Image import pythreejs import", "ipv rng = np.random.RandomState(seed) x, y, z = rng.normal(size=(3, N))", "else: orbit_x_line = orbit_x orbit_y_line = orbit_y orbit_z_line = orbit_z", "sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent = True sprite.material.depthWrite", "0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v])", "x, y = np.meshgrid(x, y) s = 0.5 radius =", "= mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False)", "x = np.linspace(-1, 1, shape[0]) y = np.linspace(-1, 1, shape[1])", "y = np.meshgrid(x, y) s = 0.5 radius = np.sqrt(x**2+y**2)", "N_stars)) xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# +", "* np.pi * N_star_orbits xo = r[i] * np.sin(a) yo", "'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch())", "np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R)", "v=ymw, texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst", "ara = np.zeros(shape[:2] + (4,), dtype=np.uint8) x = np.linspace(-1, 1,", "+= xo y[:, i] += yo z[:, i] += zo", "np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape, xo.shape) x[:, i] += xo", "np.zeros(shape[:2] + (4,), dtype=np.uint8) x = np.linspace(-1, 1, shape[0]) y", "thickness=3, seed=42, color=[255, 240, 240]): import ipyvolume as ipv rng", "np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R) def", "= r[i] * np.cos(a) zo = a * 0 xo,", "z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr, yr, zr = np.random.normal(0,", "= np.linspace(0, 1, len(orbit_x)) x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind", "z[:, i] += zo sprite = ipv.scatter(x, y, z, texture=radial_sprite((64,", "(ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc", "1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return", "np.random.random() u2 = np.random.random() u3 = np.random.random() R = np.array([[np.cos(2*np.pi*u1),", "xo y[:, i] += yo z[:, i] += zo sprite", "orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth)", "from .datasets import UrlCached def _randomSO3(): \"\"\"return random rotatation matrix,", "scale=sigma_r, size=(3, N_stars))# + r = np.sqrt(xr**2 + yr**2 +", "sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor", "scipy.interpolate x = np.linspace(0, 1, len(orbit_x)) x_smooth = np.linspace(0, 1,", "xo.shape) x[:, i] += xo y[:, i] += yo z[:,", "numpy as np import PIL.Image import pythreejs import ipyvolume as", "= np.zeros(shape[:2] + (4,), dtype=np.uint8) x = np.linspace(-1, 1, shape[0])", "im = PIL.Image.fromarray(ara, 'RGBA') return im def stars(N=1000, radius=100000, thickness=3,", "im def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]): import", "len(orbit_x)) x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line", "ymw) zmw = xmw * 0 + 0.01 mw =", "= False sprite.material.alphaTest = 0.1 return sprite, line def radial_sprite(shape,", "= PIL.Image.fromarray(ara, 'RGBA') return im def stars(N=1000, radius=100000, thickness=3, seed=42,", "algo by <NAME>\"\"\" u1 = np.random.random() u2 = np.random.random() u3", "\"\"\"return random rotatation matrix, algo by <NAME>\"\"\" u1 = np.random.random()", "np.sqrt(1-u3)]) H = np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R) def spherical_galaxy_orbit(orbit_x,", "orbit_z, kind)(x_smooth) else: orbit_x_line = orbit_x orbit_y_line = orbit_y orbit_z_line", "N_stars).reshape((-1, N_stars)) xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))#", "= (amplitude * 255) ara[...,:3] = color * amplitude.reshape(shape +", "64), color), marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending", "yo z[:, i] += zo sprite = ipv.scatter(x, y, z,", "= pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent =", "r y /= r z /= r return ipv.scatter(x, y,", "i] += xo y[:, i] += yo z[:, i] +=", "spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220,", "np.random.normal(0, scale=sigma_r, size=(3, N_stars))# + r = np.sqrt(xr**2 + yr**2", "+= yo z[:, i] += zo sprite = ipv.scatter(x, y,", "= pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation =", "the points orbit_x/y/z with N_stars around it\"\"\" if orbit_line_interpolate >", "sprite.material.transparent = True sprite.material.depthWrite = False sprite.material.alphaTest = 0.1 return", "PIL.Image.open(milkyway_image.fetch()) rescale = 40 t = np.linspace(0, 1, 100) xmw", "True mw.material.depthWrite = False mw.material.alphaTest = 0.1 ipv.xyzlim(size) return mesh", "* np.sin(a) yo = r[i] * np.cos(a) zo = a", "zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc =", "thickness * radius * np.random.random(N)) x /= r y /=", "> 1: import scipy.interpolate x = np.linspace(0, 1, len(orbit_x)) x_smooth", "= 'AddEquation' mw.material.transparent = True mw.material.depthWrite = False mw.material.alphaTest =", "np import PIL.Image import pythreejs import ipyvolume as ipv from", "N_stars))# + r = np.sqrt(xr**2 + yr**2 + zr**2) for", "u=xmw, v=ymw, texture=mw_image, wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor", "a * 0 xo, yo, zo = np.dot(_randomSO3(), [xo, yo,", "return im def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]):", "= np.random.normal(0, scale=sigma_r, size=(3, N_stars))# + r = np.sqrt(xr**2 +", "color), marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc", "u1 = np.random.random() u2 = np.random.random() u3 = np.random.random() R", "sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star,", "= ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars))", "True sprite.material.depthWrite = False sprite.material.alphaTest = 0.1 return sprite, line", "220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create a fake galaxy around the", "def _randomSO3(): \"\"\"return random rotatation matrix, algo by <NAME>\"\"\" u1", "0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3),", "orbit_y_line = orbit_y orbit_z_line = orbit_z line = ipv.plot(orbit_x_line, orbit_y_line,", "y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url =", "= color * amplitude.reshape(shape + (1,)) im = PIL.Image.fromarray(ara, 'RGBA')", "= pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent =", "x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line =", "pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation' sprite.material.transparent = True", "np.array(color) ara = np.zeros(shape[:2] + (4,), dtype=np.uint8) x = np.linspace(-1,", "= pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation' mw.material.transparent = True mw.material.depthWrite =", "wireframe=False) mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor", "scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line = orbit_x orbit_y_line = orbit_y", "+ y**2 + z**2)/(radius + thickness * radius * np.random.random(N))", "* 0 + 0.01 mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun,", "grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8,", "orbit_x/y/z with N_stars around it\"\"\" if orbit_line_interpolate > 1: import", "radial_sprite(shape, color): color = np.array(color) ara = np.zeros(shape[:2] + (4,),", "x.shape[0]) * 2 * np.pi * N_star_orbits xo = r[i]", "i] += yo z[:, i] += zo sprite = ipv.scatter(x,", "size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor", "sprite.material.blendEquation = 'AddEquation' sprite.material.transparent = True sprite.material.depthWrite = False sprite.material.alphaTest", "= 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image =", "= pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation =", "visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1,", "100) xmw = np.linspace(0, 1, 10) ymw = np.linspace(0, 1,", "N_star_orbits xo = r[i] * np.sin(a) yo = r[i] *", "zmw = xmw * 0 + 0.01 mw = mesh", "xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# + r", "N_stars around it\"\"\" if orbit_line_interpolate > 1: import scipy.interpolate x", "= xmw * 0 + 0.01 mw = mesh =", "z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending", "+ (4,), dtype=np.uint8) x = np.linspace(-1, 1, shape[0]) y =", "kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line = orbit_x", "color = np.array(color) ara = np.zeros(shape[:2] + (4,), dtype=np.uint8) x", "y, z = rng.normal(size=(3, N)) r = np.sqrt(x**2 + y**2", "+ thickness * radius * np.random.random(N)) x /= r y", "y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars))", "u2 = np.random.random() u3 = np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1),", "np.random.random() u3 = np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1),", "= np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars)) xr,", "milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale", "xmw, ymw = np.meshgrid(xmw, ymw) zmw = xmw * 0", "orbit_x_line = orbit_x orbit_y_line = orbit_y orbit_z_line = orbit_z line", "shape[1]) x, y = np.meshgrid(x, y) s = 0.5 radius", "len(orbit_x)*orbit_line_interpolate) kind = 'quadratic' orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth) orbit_y_line", ".datasets import UrlCached def _randomSO3(): \"\"\"return random rotatation matrix, algo", "np.identity(3)-2*v*np.transpose([v]) return - np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100,", "= np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars)) z", "stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]): import ipyvolume as", "0 xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape,", "+ r = np.sqrt(xr**2 + yr**2 + zr**2) for i", "around the points orbit_x/y/z with N_stars around it\"\"\" if orbit_line_interpolate", "radius=100000, thickness=3, seed=42, color=[255, 240, 240]): import ipyvolume as ipv", "seed=42, color=[255, 240, 240]): import ipyvolume as ipv rng =", "milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg' milkyway_image = UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image", "galaxy around the points orbit_x/y/z with N_stars around it\"\"\" if", "[-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3),", "+ yr**2 + zr**2) for i in range(N_stars): a =", "+ 0.01 mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw,", "10) xmw, ymw = np.meshgrid(xmw, ymw) zmw = xmw *", "a = np.linspace(0, 1, x.shape[0]) * 2 * np.pi *", "+ z**2)/(radius + thickness * radius * np.random.random(N)) x /=", "color=[255, 240, 240]): import ipyvolume as ipv rng = np.random.RandomState(seed)", "around it\"\"\" if orbit_line_interpolate > 1: import scipy.interpolate x =", "rng.normal(size=(3, N)) r = np.sqrt(x**2 + y**2 + z**2)/(radius +", "def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255,", "np.random.random(N)) x /= r y /= r z /= r", "amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude * 255) ara[...,:3]", "0], [0, 0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)]) H", "/= r z /= r return ipv.scatter(x, y, z, texture=radial_sprite((64,", "z /= r return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color),", "np.meshgrid(x, y) s = 0.5 radius = np.sqrt(x**2+y**2) amplitude =", "UrlCached def _randomSO3(): \"\"\"return random rotatation matrix, algo by <NAME>\"\"\"", "mw.material.blending = pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation", "255) ara[...,:3] = color * amplitude.reshape(shape + (1,)) im =", "y /= r z /= r return ipv.scatter(x, y, z,", "np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude * 255) ara[...,:3] = color *", "orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth)", "40 t = np.linspace(0, 1, 100) xmw = np.linspace(0, 1,", "matrix, algo by <NAME>\"\"\" u1 = np.random.random() u2 = np.random.random()", "as ipv from .datasets import UrlCached def _randomSO3(): \"\"\"return random", "* np.cos(a) zo = a * 0 xo, yo, zo", "np.linspace(-1, 1, shape[1]) x, y = np.meshgrid(x, y) s =", "with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst =", "- np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False,", "size_star=1, scatter_kwargs={}): \"\"\"Create a fake galaxy around the points orbit_x/y/z", "z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100) milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg'", "line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x = np.repeat(orbit_x, N_stars).reshape((-1,", "pythreejs.BlendingMode.CustomBlending mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor mw.material.blendDst = pythreejs.BlendFactors.OneFactor mw.material.blendEquation = 'AddEquation'", "240]): import ipyvolume as ipv rng = np.random.RandomState(seed) x, y,", "= np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude *", "'AddEquation' sprite.material.transparent = True sprite.material.depthWrite = False sprite.material.alphaTest = 0.1", "ara[...,3] = (amplitude * 255) ara[...,:3] = color * amplitude.reshape(shape", "zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# + r = np.sqrt(xr**2", "y = np.linspace(-1, 1, shape[1]) x, y = np.meshgrid(x, y)", "radius = np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T ara[...,3] = (amplitude", "x[:, i] += xo y[:, i] += yo z[:, i]", "orbit_z_line = orbit_z line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible) x", "orbit_line_interpolate > 1: import scipy.interpolate x = np.linspace(0, 1, len(orbit_x))", "line def radial_sprite(shape, color): color = np.array(color) ara = np.zeros(shape[:2]", "\"\"\"Create a fake galaxy around the points orbit_x/y/z with N_stars", "pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor sprite.material.blendDst = pythreejs.BlendFactors.OneFactor sprite.material.blendEquation = 'AddEquation'", "color): color = np.array(color) ara = np.zeros(shape[:2] + (4,), dtype=np.uint8)", "r return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False,", "pythreejs import ipyvolume as ipv from .datasets import UrlCached def", "np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]]) v =", "<NAME>\"\"\" u1 = np.random.random() u2 = np.random.random() u3 = np.random.random()", "np.linspace(0, 1, 10) xmw, ymw = np.meshgrid(xmw, ymw) zmw =", "marker='square_2d', size=size_star, **scatter_kwargs) with sprite.material.hold_sync(): sprite.material.blending = pythreejs.BlendingMode.CustomBlending sprite.material.blendSrc =", "UrlCached(milkyway_url) def plot_milkyway(R_sun=8, size=100): mw_image = PIL.Image.open(milkyway_image.fetch()) rescale = 40", "xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape, xo.shape)", "0 + 0.01 mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw,", "1, 100) xmw = np.linspace(0, 1, 10) ymw = np.linspace(0,", "= np.random.random() u2 = np.random.random() u3 = np.random.random() R =", "= np.linspace(0, 1, 10) xmw, ymw = np.meshgrid(xmw, ymw) zmw", "mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image,", "rng = np.random.RandomState(seed) x, y, z = rng.normal(size=(3, N)) r", "ipyvolume as ipv from .datasets import UrlCached def _randomSO3(): \"\"\"return", "zo = np.dot(_randomSO3(), [xo, yo, zo]) #print(x.shape, xo.shape) x[:, i]", "import numpy as np import PIL.Image import pythreejs import ipyvolume", "'RGBA') return im def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240,", "np.random.random() R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0,", "1, len(orbit_x)) x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate) kind = 'quadratic'", "R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0,", "np.cos(2*np.pi*u1), 0], [0, 0, 1]]) v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)])", "orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}): \"\"\"Create a fake", "* amplitude.reshape(shape + (1,)) im = PIL.Image.fromarray(ara, 'RGBA') return im", "np.dot(H, R) def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5,", "s = 0.5 radius = np.sqrt(x**2+y**2) amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T", "/= r return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d',", "ipyvolume as ipv rng = np.random.RandomState(seed) x, y, z =", "= 40 t = np.linspace(0, 1, 100) xmw = np.linspace(0,", "orbit_y, kind)(x_smooth) orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line =", "zo sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d',", "zo = a * 0 xo, yo, zo = np.dot(_randomSO3(),", "x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars)) y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars))", "1, 10) xmw, ymw = np.meshgrid(xmw, ymw) zmw = xmw", "= scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth) else: orbit_x_line = orbit_x orbit_y_line =" ]
[ "= weights[1] * (target * torch.log(output)) + \\ weights[0] *", "loss function -------------------------- ''' ''' In some degree, it can", "- alpha) * (1 - y_true) * torch.log(1 - y_pred)", "* torch.log(1 - output)) else: loss = target * torch.log(output)", "@desc ''' import torch '''--------------------- Weighted Binary cross Entropy ----------------------'''", "<EMAIL> * @create date 2020-09-25 14:33:38 * @desc ''' import", "''' ---------------------- Binary focal loss function -------------------------- ''' ''' In", "loss = target * torch.log(output) + (1 - target) *", "focal loss function -------------------------- ''' ''' In some degree, it", "= target * torch.log(output) + (1 - target) * torch.log(1", "output) return torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal loss function --------------------------", "In Torch BCELoss, weight is set to every element of", "''' import torch '''--------------------- Weighted Binary cross Entropy ----------------------''' '''", "torch.log(1 - output)) else: loss = target * torch.log(output) +", "y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha * y_true * torch.log(y_pred) * (1", "function -------------------------- ''' ''' In some degree, it can reduce", "y_true * torch.log(y_pred) * (1 - y_pred) ** gamma\\ -", "output)) else: loss = target * torch.log(output) + (1 -", "''' In some degree, it can reduce the influence of", "some degree, it can reduce the influence of imbalanced dataset", "''' def weighted_binary_cross_entropy(output, target, weights=None): if weights is not None:", "instead of to every class ''' def weighted_binary_cross_entropy(output, target, weights=None):", "* @desc ''' import torch '''--------------------- Weighted Binary cross Entropy", "of imbalanced dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) ,", "- output) return torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal loss function", "dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7)", "weight is set to every element of input instead of", "2020-09-25 14:33:38 * @desc ''' import torch '''--------------------- Weighted Binary", "import torch '''--------------------- Weighted Binary cross Entropy ----------------------''' ''' In", "torch.log(output)) + \\ weights[0] * ((1 - target) * torch.log(1", "element of input instead of to every class ''' def", "every element of input instead of to every class '''", "target * torch.log(output) + (1 - target) * torch.log(1 -", "alpha * y_true * torch.log(y_pred) * (1 - y_pred) **", "torch '''--------------------- Weighted Binary cross Entropy ----------------------''' ''' In Torch", "influence of imbalanced dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device)", "+ (1 - target) * torch.log(1 - output) return torch.neg(torch.mean(loss))", "* y_true * torch.log(y_pred) * (1 - y_pred) ** gamma\\", "- (1 - alpha) * (1 - y_true) * torch.log(1", "class ''' def weighted_binary_cross_entropy(output, target, weights=None): if weights is not", "cross Entropy ----------------------''' ''' In Torch BCELoss, weight is set", "target) * torch.log(1 - output) return torch.neg(torch.mean(loss)) ''' ---------------------- Binary", "imbalanced dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device)", "''' In Torch BCELoss, weight is set to every element", "- target) * torch.log(1 - output)) else: loss = target", "* @create date 2020-09-25 14:33:38 * @desc ''' import torch", ", torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha * y_true * torch.log(y_pred)", "(target * torch.log(output)) + \\ weights[0] * ((1 - target)", "return - alpha * y_true * torch.log(y_pred) * (1 -", "alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha *", "if weights is not None: assert len(weights) == 2 loss", "Torch BCELoss, weight is set to every element of input", "* (1 - y_true) * torch.log(1 - y_pred) * y_pred", "every class ''' def weighted_binary_cross_entropy(output, target, weights=None): if weights is", "weights[1] * (target * torch.log(output)) + \\ weights[0] * ((1", "Waldinsamkeit * @email <EMAIL> * @create date 2020-09-25 14:33:38 *", "of input instead of to every class ''' def weighted_binary_cross_entropy(output,", "----------------------''' ''' In Torch BCELoss, weight is set to every", "assert len(weights) == 2 loss = weights[1] * (target *", "(1 - y_pred) ** gamma\\ - (1 - alpha) *", "- target) * torch.log(1 - output) return torch.neg(torch.mean(loss)) ''' ----------------------", "y_pred) ** gamma\\ - (1 - alpha) * (1 -", "Binary focal loss function -------------------------- ''' ''' In some degree,", "the influence of imbalanced dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma =", "* @author Waldinsamkeit * @email <EMAIL> * @create date 2020-09-25", "is not None: assert len(weights) == 2 loss = weights[1]", "weighted_binary_cross_entropy(output, target, weights=None): if weights is not None: assert len(weights)", "of to every class ''' def weighted_binary_cross_entropy(output, target, weights=None): if", "torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha * y_true *", "len(weights) == 2 loss = weights[1] * (target * torch.log(output))", "def weighted_binary_cross_entropy(output, target, weights=None): if weights is not None: assert", "not None: assert len(weights) == 2 loss = weights[1] *", "torch.log(y_pred) * (1 - y_pred) ** gamma\\ - (1 -", "degree, it can reduce the influence of imbalanced dataset '''", "@author Waldinsamkeit * @email <EMAIL> * @create date 2020-09-25 14:33:38", "- output)) else: loss = target * torch.log(output) + (1", "Weighted Binary cross Entropy ----------------------''' ''' In Torch BCELoss, weight", "set to every element of input instead of to every", "it can reduce the influence of imbalanced dataset ''' def", "- alpha * y_true * torch.log(y_pred) * (1 - y_pred)", "14:33:38 * @desc ''' import torch '''--------------------- Weighted Binary cross", "\\ weights[0] * ((1 - target) * torch.log(1 - output))", "'''--------------------- Weighted Binary cross Entropy ----------------------''' ''' In Torch BCELoss,", "''' def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return", "return torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal loss function -------------------------- '''", "to every element of input instead of to every class", "target, weights=None): if weights is not None: assert len(weights) ==", "-------------------------- ''' ''' In some degree, it can reduce the", "focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha", "(1 - alpha) * (1 - y_true) * torch.log(1 -", "BCELoss, weight is set to every element of input instead", "to every class ''' def weighted_binary_cross_entropy(output, target, weights=None): if weights", "(1 - target) * torch.log(1 - output) return torch.neg(torch.mean(loss)) '''", "((1 - target) * torch.log(1 - output)) else: loss =", "* @email <EMAIL> * @create date 2020-09-25 14:33:38 * @desc", "* ((1 - target) * torch.log(1 - output)) else: loss", "* torch.log(1 - output) return torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal", "torch.log(output) + (1 - target) * torch.log(1 - output) return", "== 2 loss = weights[1] * (target * torch.log(output)) +", "loss = weights[1] * (target * torch.log(output)) + \\ weights[0]", "2 loss = weights[1] * (target * torch.log(output)) + \\", "else: loss = target * torch.log(output) + (1 - target)", "@create date 2020-09-25 14:33:38 * @desc ''' import torch '''---------------------", "+ \\ weights[0] * ((1 - target) * torch.log(1 -", "torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha * y_true * torch.log(y_pred) *", "In some degree, it can reduce the influence of imbalanced", "torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal loss function -------------------------- ''' '''", "''' * @author Waldinsamkeit * @email <EMAIL> * @create date", "* (1 - y_pred) ** gamma\\ - (1 - alpha)", "''' ''' In some degree, it can reduce the influence", "** gamma\\ - (1 - alpha) * (1 - y_true)", "def focal_loss(y_true,y_pred,device): alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return -", "weights[0] * ((1 - target) * torch.log(1 - output)) else:", "weights is not None: assert len(weights) == 2 loss =", "can reduce the influence of imbalanced dataset ''' def focal_loss(y_true,y_pred,device):", "alpha) * (1 - y_true) * torch.log(1 - y_pred) *", "Entropy ----------------------''' ''' In Torch BCELoss, weight is set to", "* torch.log(y_pred) * (1 - y_pred) ** gamma\\ - (1", "Binary cross Entropy ----------------------''' ''' In Torch BCELoss, weight is", "None: assert len(weights) == 2 loss = weights[1] * (target", "@email <EMAIL> * @create date 2020-09-25 14:33:38 * @desc '''", "date 2020-09-25 14:33:38 * @desc ''' import torch '''--------------------- Weighted", "weights=None): if weights is not None: assert len(weights) == 2", "torch.log(1 - output) return torch.neg(torch.mean(loss)) ''' ---------------------- Binary focal loss", "---------------------- Binary focal loss function -------------------------- ''' ''' In some", "* (target * torch.log(output)) + \\ weights[0] * ((1 -", "* torch.log(output)) + \\ weights[0] * ((1 - target) *", "* torch.log(output) + (1 - target) * torch.log(1 - output)", "gamma\\ - (1 - alpha) * (1 - y_true) *", "= torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device) y_pred=torch.clamp(y_pred,1e-7,1-1e-7) return - alpha * y_true", "input instead of to every class ''' def weighted_binary_cross_entropy(output, target,", "target) * torch.log(1 - output)) else: loss = target *", "- y_pred) ** gamma\\ - (1 - alpha) * (1", "is set to every element of input instead of to", "reduce the influence of imbalanced dataset ''' def focal_loss(y_true,y_pred,device): alpha,gamma" ]
[ "int = 1 destination_logical: int = 0 destination_physical: Optional[int] =", ">> 6 else: lower = address << 1 higher =", "address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0] >> 1 destination_physical", "( (destination_logical, destination_physical, destination_length), (source_logical, source_physical, source_length), ) @staticmethod def", "== 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0] >>", "out.extend( [logical_higher, logical_lower, physical_higher, physical_lower] ) else: # no physical", "byte is indicated by the of the last byte LSB", "= hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find", "bool(end_byte & 0b00000001): # Found end byte: destination_length = _length", "address. destination_length: int = 1 destination_logical: int = 0 destination_physical:", "so mark the logial as end. logical_lower = logical_lower |", "\"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib( default=None,", "frame_bytes: bytes, address_type: str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical,", "int = 0 source_physical: Optional[int] = 0 source_position_list: List[Tuple[int, int]]", "address is used to address a physical device ( a", "= logical_lower | 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes = list() for", "in destination_positions_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): #", "\"\"\" A client address shall always be expressed on one", "HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address source_length: int = 1 source_logical:", "upper = address_bytes[0] >> 1 lower = address_bytes[1] >> 1", "!= 2: raise ValueError(f\"Can only parse 2 bytes for address\")", "1 higher = (address & 0b0011111110000000) >> 6 else: lower", "0b0000000001111111) << 1 higher = (address & 0b0011111110000000) >> 6", "To enable addressing more than one logical device within a", "> 0b01111111: lower = (address & 0b0000000001111111) << 1 higher", "address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod def _split_address(address: int) ->", "last byte LSB being 1 The first address is the", ": 5 + source_length] source_logical = address_bytes[0] >> 1 source_physical", "and the seconds is the source address. :param frame_bytes: :return:", "= None elif destination_length == 2: address_bytes = hdlc_frame_bytes[3:5] destination_logical", "source_length] source_logical = address_bytes[0] >> 1 source_physical = address_bytes[1] >>", "else: lower = address << 1 higher = None return", "address_bytes = hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) #", "-> Tuple[Optional[int], int]: higher: Optional[int] lower: int if address >", "one logical device within a single physical device and to", "address. out.append(((self.logical_address << 1) | 0b00000001)) else: # server address", "None return higher, lower @staticmethod def _address_to_byte(address: int) -> bytes:", "break continue if destination_length == 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\")", "device ( a physical device on a multi-drop) The physical", "return higher, lower @staticmethod def _address_to_byte(address: int) -> bytes: return", "= address_bytes[1] >> 1 return lower + (upper << 7)", "destination_positions_list ] for pos, _length in source_position_list: end_byte = hdlc_frame_bytes[pos]", "0 destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6,", "bytes for address\") upper = address_bytes[0] >> 1 lower =", "& 0b0000000001111111) << 1 higher = (address & 0b0011111110000000) >>", "be divided in two parts– may be divided into two", "address in out: if address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod", "long. the end byte is indicated by the of the", "class HdlcAddress: \"\"\" A client address shall always be expressed", "device within a single physical device and to support the", "default=None, validator=[validators.validate_hdlc_address] ) address_type: str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] )", "omitted it not used. \"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address:", "physical address can be omitted it not used. \"\"\" logical_address:", "= _length break continue if source_length == 1: address_bytes =", "\"big\") source_logical = address_bytes[0] >> 1 source_physical = None elif", "physical_lower = physical_lower | 0b00000001 out.extend( [logical_higher, logical_lower, physical_higher, physical_lower]", "| 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes = list() for address in", "destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]", "(destination_logical, destination_physical, destination_length), (source_logical, source_physical, source_length), ) @staticmethod def parse_two_byte_address(address_bytes:", "self._split_address( self.physical_address ) # mark physical lower as end physical_lower", "destination_address_data return cls(destination_logical, destination_physical, address_type) @classmethod def source_from_bytes(cls, frame_bytes: bytes,", "being 1 The first address is the destination address and", "b\"\".join(out_bytes) @staticmethod def _split_address(address: int) -> Tuple[Optional[int], int]: higher: Optional[int]", "item in destination_positions_list ] for pos, _length in source_position_list: end_byte", "= hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >> 1 destination_physical = address_bytes[1]", "+ destination_length : 7 + source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical", "@staticmethod def parse_two_byte_address(address_bytes: bytes): if address_bytes != 2: raise ValueError(f\"Can", "The physical address can be omitted it not used. \"\"\"", "# Find destination address. destination_length: int = 1 destination_logical: int", "= HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical, destination_physical, destination_length, ) =", ">> 1 lower = address_bytes[1] >> 1 return lower +", "source_position_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found", "divided into two parts: The logical address to address a", "destination_length == 4: address_bytes = hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical", "as end physical_lower = physical_lower | 0b00000001 out.extend( [logical_higher, logical_lower,", "physical_lower | 0b00000001 out.extend( [logical_higher, logical_lower, physical_higher, physical_lower] ) else:", "HDLC address The logical address must always be present. The", "higher = None return higher, lower @staticmethod def _address_to_byte(address: int)", ") -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]: \"\"\" address", "# no physical address so mark the logial as end.", "if source_length == 1: address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\")", "1: address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0]", "1 destination_logical: int = 0 destination_physical: Optional[int] = 0 destination_positions_list:", "configuration the server address may be divided in two parts–", "int]]: \"\"\" address can be 1, 2 or 4 bytes", "end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found end", "physical_higher, physical_lower = self._split_address( self.physical_address ) # mark physical lower", "& 0b00000001): # Found end byte: destination_length = _length break", "and to support the multi-drop configuration the server address may", "= HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical, destination_length), (source_logical, source_physical, source_length),", "is the source address. :param frame_bytes: :return: \"\"\" # Find", "destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical, destination_physical, destination_length,", ":return: \"\"\" return len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]] = list()", "source_physical = None elif source_length == 2: address_bytes = hdlc_frame_bytes[3", "attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type: str", "destination_length == 4: address_bytes = hdlc_frame_bytes[3 + destination_length : 7", "addressable entity within a physical device) makes up the upper", "length(self): \"\"\" The number of bytes the address makes up.", "byte. To enable addressing more than one logical device within", "( a physical device on a multi-drop) The physical address", "@property def length(self): \"\"\" The number of bytes the address", "logical_higher, logical_lower = self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower = self._split_address(", "the server address may be divided in two parts– may", "physical_lower = self._split_address( self.physical_address ) # mark physical lower as", "str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length = source_address_data", "\"\"\" # Find destination address. destination_length: int = 1 destination_logical:", "end byte: destination_length = _length break continue if destination_length ==", "destination_length : 5 + source_length] source_logical = address_bytes[0] >> 1", "destination_positions_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found", "if self.physical_address: physical_higher, physical_lower = self._split_address( self.physical_address ) # mark", "destination_physical, destination_length, ) = destination_address_data return cls(destination_logical, destination_physical, address_type) @classmethod", "= hdlc_frame_bytes[3 + destination_length : 5 + source_length] source_logical =", "on one byte. To enable addressing more than one logical", "destination address. destination_length: int = 1 destination_logical: int = 0", ") @property def length(self): \"\"\" The number of bytes the", "within a single physical device and to support the multi-drop", "= hdlc_frame_bytes[3 + destination_length : 7 + source_length] source_logical =", "be omitted it not used. \"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])", "out: if address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod def _split_address(address:", "= self._split_address( self.physical_address ) # mark physical lower as end", "hdlc_frame_bytes[3 + destination_length : 5 + source_length] source_logical = address_bytes[0]", "by the of the last byte LSB being 1 The", "destination_length), (source_logical, source_physical, source_length), ) @staticmethod def parse_two_byte_address(address_bytes: bytes): if", "destination_logical, destination_physical, destination_length, ) = destination_address_data return cls(destination_logical, destination_physical, address_type)", "4)] address_bytes: bytes for pos, _length in destination_positions_list: end_byte =", "( destination_logical, destination_physical, destination_length, ) = destination_address_data return cls(destination_logical, destination_physical,", "address_bytes[1] >> 1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3:7]", "a multi-drop) The physical address can be omitted it not", "address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0] >>", "= (address & 0b0011111110000000) >> 6 else: lower = address", "bytes): if address_bytes != 2: raise ValueError(f\"Can only parse 2", "= destination_address_data return cls(destination_logical, destination_physical, address_type) @classmethod def source_from_bytes(cls, frame_bytes:", "address_bytes[0] >> 1 lower = address_bytes[1] >> 1 return lower", "into two parts: The logical address to address a logical", "2: raise ValueError(f\"Can only parse 2 bytes for address\") upper", "physical_address: Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type: str =", "logical_lower | 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes = list() for address", "multi-drop configuration the server address may be divided in two", "HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical, destination_length), (source_logical, source_physical, source_length), )", "elif destination_length == 4: address_bytes = hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])", "if bool(end_byte & 0b00000001): # Found end byte: destination_length =", "address_bytes[0] >> 1 source_physical = None elif source_length == 2:", "List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)] address_bytes:", "physical address is used to address a physical device (", "address a physical device ( a physical device on a", "in two parts– may be divided into two parts: The", ") ( destination_logical, destination_physical, destination_length, ) = destination_address_data return cls(destination_logical,", "1 destination_physical = address_bytes[1] >> 1 elif destination_length == 4:", "to address a logical device (separate addressable entity within a", ">> 1 destination_physical = address_bytes[1] >> 1 elif destination_length ==", "1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3 + destination_length", "physical address so mark the logial as end. logical_lower =", "@staticmethod def _address_to_byte(address: int) -> bytes: return address.to_bytes(1, \"big\") @classmethod", "_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length = source_address_data return", "2: address_bytes = hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >> 1 destination_physical", "return cls(destination_logical, destination_physical, address_type) @classmethod def source_from_bytes(cls, frame_bytes: bytes, address_type:", "no physical address so mark the logial as end. logical_lower", "source_logical = address_bytes[0] >> 1 source_physical = None elif source_length", "the source address. :param frame_bytes: :return: \"\"\" # Find destination", "logical_lower, physical_higher, physical_lower] ) else: # no physical address so", "source address. :param frame_bytes: :return: \"\"\" # Find destination address.", "A client address shall always be expressed on one byte.", "_address_to_byte(address: int) -> bytes: return address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls,", "typing import * import attr from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True)", "for pos, _length in destination_positions_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte", "return address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):", "1 The first address is the destination address and the", "= attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type: str = attr.ib( default=\"client\",", "= address_bytes[1] >> 1 elif destination_length == 4: address_bytes =", "the destination address and the seconds is the source address.", "int) -> bytes: return address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls, frame_bytes:", "the seconds is the source address. :param frame_bytes: :return: \"\"\"", "address << 1 higher = None return higher, lower @staticmethod", "item[1]) for item in destination_positions_list ] for pos, _length in", "address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length] source_logical", "elif destination_length == 2: address_bytes = hdlc_frame_bytes[3:5] destination_logical = address_bytes[0]", "device (separate addressable entity within a physical device) makes up", "2 or 4 bytes long. the end byte is indicated", "bytes long. the end byte is indicated by the of", "elif source_length == 2: address_bytes = hdlc_frame_bytes[3 + destination_length :", "def destination_from_bytes(cls, frame_bytes: bytes, address_type: str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(", "(item[0] + destination_length, item[1]) for item in destination_positions_list ] for", ">> 1 source_physical = None elif source_length == 2: address_bytes", "HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical, destination_length), (source_logical,", "source address source_length: int = 1 source_logical: int = 0", "@attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A client address shall always be", "= 1 source_logical: int = 0 source_physical: Optional[int] = 0", "= address_bytes[0] >> 1 source_physical = None elif source_length ==", "list() if self.address_type == \"client\": # shift left 1 bit", "source_physical, address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int,", "= _length break continue if destination_length == 1: address_bytes =", "_length in source_position_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001):", "The logical address must always be present. The physical address", "import * import attr from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class", "| 0b00000001 out.extend( [logical_higher, logical_lower, physical_higher, physical_lower] ) else: #", "None elif source_length == 2: address_bytes = hdlc_frame_bytes[3 + destination_length", "0b00000001): # Found end byte: source_length = _length break continue", "lsb to mark end of address. out.append(((self.logical_address << 1) |", "be 1, 2 or 4 bytes long. the end byte", "def _address_to_byte(address: int) -> bytes: return address.to_bytes(1, \"big\") @classmethod def", "dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A client address", "@staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int, Optional[int], int],", "address makes up. :return: \"\"\" return len(self.to_bytes()) def to_bytes(self): out:", "bool(end_byte & 0b00000001): # Found end byte: source_length = _length", "the of the last byte LSB being 1 The first", "= None return higher, lower @staticmethod def _address_to_byte(address: int) ->", "byte: destination_length = _length break continue if destination_length == 1:", "Find source address source_length: int = 1 source_logical: int =", "out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod def _split_address(address: int) -> Tuple[Optional[int],", ") address_type: str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def", "# server address type logical_higher, logical_lower = self._split_address(self.logical_address) if self.physical_address:", "is the destination address and the seconds is the source", "\"big\") destination_logical = address_bytes[0] >> 1 destination_physical = None elif", "destination_logical = address_bytes[0] >> 1 destination_physical = None elif destination_length", "in out: if address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod def", "server address may be divided in two parts– may be", "self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower = self._split_address( self.physical_address ) #", "two parts– may be divided into two parts: The logical", "the lsb to mark end of address. out.append(((self.logical_address << 1)", "source_length = source_address_data return cls(source_logical, source_physical, address_type) @staticmethod def find_address_in_frame_bytes(", "source_physical, source_length), ) @staticmethod def parse_two_byte_address(address_bytes: bytes): if address_bytes !=", "lower as end physical_lower = physical_lower | 0b00000001 out.extend( [logical_higher,", "| 0b00000001)) else: # server address type logical_higher, logical_lower =", "destination_physical: Optional[int] = 0 destination_positions_list: List[Tuple[int, int]] = [(3, 1),", ":param frame_bytes: :return: \"\"\" # Find destination address. destination_length: int", "int if address > 0b01111111: lower = (address & 0b0000000001111111)", "end byte: source_length = _length break continue if source_length ==", "out_bytes = list() for address in out: if address: out_bytes.append(address.to_bytes(1,", "& 0b00000001): # Found end byte: source_length = _length break", "bytes: return address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls, frame_bytes: bytes, address_type:", "on a multi-drop) The physical address can be omitted it", "def parse_two_byte_address(address_bytes: bytes): if address_bytes != 2: raise ValueError(f\"Can only", "Optional[int], int]]: \"\"\" address can be 1, 2 or 4", "hdlc_frame_bytes[3 + destination_length : 7 + source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])", "1 destination_physical = None elif destination_length == 2: address_bytes =", "the upper HDLC address The logical address must always be", "= list() for address in out: if address: out_bytes.append(address.to_bytes(1, \"big\"))", "HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length = source_address_data return cls(source_logical, source_physical, address_type)", "None elif destination_length == 2: address_bytes = hdlc_frame_bytes[3:5] destination_logical =", "address The logical address must always be present. The physical", "List[Optional[int]] = list() if self.address_type == \"client\": # shift left", "out.extend([logical_higher, logical_lower]) out_bytes = list() for address in out: if", "= address_bytes[0] >> 1 lower = address_bytes[1] >> 1 return", "source_logical: int = 0 source_physical: Optional[int] = 0 source_position_list: List[Tuple[int,", "Optional[int], int], Tuple[int, Optional[int], int]]: \"\"\" address can be 1,", "source_logical, source_physical, source_length = source_address_data return cls(source_logical, source_physical, address_type) @staticmethod", "<< 1 higher = (address & 0b0011111110000000) >> 6 else:", "addressing more than one logical device within a single physical", "address_bytes = hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >> 1 destination_physical =", "== 1: address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical =", "be divided into two parts: The logical address to address", "1 source_physical = None elif source_length == 2: address_bytes =", "= [ (item[0] + destination_length, item[1]) for item in destination_positions_list", ">> 1 destination_physical = None elif destination_length == 2: address_bytes", "\"\"\" The number of bytes the address makes up. :return:", "= physical_lower | 0b00000001 out.extend( [logical_higher, logical_lower, physical_higher, physical_lower] )", "= attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def length(self): \"\"\" The", "physical device and to support the multi-drop configuration the server", "= (address & 0b0000000001111111) << 1 higher = (address &", "logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address]", "end. logical_lower = logical_lower | 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes =", "= hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0] >> 1", "if self.address_type == \"client\": # shift left 1 bit and", "1 source_logical: int = 0 source_physical: Optional[int] = 0 source_position_list:", "used. \"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib(", "source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length = source_address_data return cls(source_logical,", "lower: int if address > 0b01111111: lower = (address &", "== \"client\": # shift left 1 bit and set the", "address_bytes[0] >> 1 destination_physical = None elif destination_length == 2:", "from typing import * import attr from dlms_cosem.hdlc import validators", "destination_physical = address_bytes[1] >> 1 elif destination_length == 4: address_bytes", "destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address", "if address_bytes != 2: raise ValueError(f\"Can only parse 2 bytes", "0b00000001 out.extend([logical_higher, logical_lower]) out_bytes = list() for address in out:", "device and to support the multi-drop configuration the server address", "(4, 2), (6, 4)] address_bytes: bytes for pos, _length in", "The first address is the destination address and the seconds", "attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def length(self): \"\"\" The number", "frame_bytes: bytes, address_type: str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes )", "of address. out.append(((self.logical_address << 1) | 0b00000001)) else: # server", "more than one logical device within a single physical device", "set the lsb to mark end of address. out.append(((self.logical_address <<", "physical device ( a physical device on a multi-drop) The", "1) | 0b00000001)) else: # server address type logical_higher, logical_lower", "hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0] >> 1 source_physical", "byte LSB being 1 The first address is the destination", "it not used. \"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int]", "cls(destination_logical, destination_physical, address_type) @classmethod def source_from_bytes(cls, frame_bytes: bytes, address_type: str):", ": 7 + source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])", "address shall always be expressed on one byte. To enable", "validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A client address shall always", "# Find source address source_length: int = 1 source_logical: int", "of bytes the address makes up. :return: \"\"\" return len(self.to_bytes())", "Optional[int] = 0 destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4,", "destination_length == 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0]", "logical address to address a logical device (separate addressable entity", "list() for address in out: if address: out_bytes.append(address.to_bytes(1, \"big\")) return", "the last byte LSB being 1 The first address is", "1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0] >> 1", "int) -> Tuple[Optional[int], int]: higher: Optional[int] lower: int if address", "_ = HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical, destination_physical, destination_length, )", "0b00000001): # Found end byte: destination_length = _length break continue", ") @staticmethod def parse_two_byte_address(address_bytes: bytes): if address_bytes != 2: raise", "to mark end of address. out.append(((self.logical_address << 1) | 0b00000001))", "= hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0] >> 1 destination_physical =", "1 source_physical = address_bytes[1] >> 1 elif destination_length == 4:", "== 4: address_bytes = hdlc_frame_bytes[3 + destination_length : 7 +", "2: address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]", "destination_length : 7 + source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical =", "\"big\") @classmethod def destination_from_bytes(cls, frame_bytes: bytes, address_type: str): destination_address_data, _", "2), (6, 4)] address_bytes: bytes for pos, _length in destination_positions_list:", "logical_lower = self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower = self._split_address( self.physical_address", "else: # no physical address so mark the logial as", "= self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower = self._split_address( self.physical_address )", "hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical = address_bytes[0] >> 1 destination_physical = None", "expressed on one byte. To enable addressing more than one", "<< 1 higher = None return higher, lower @staticmethod def", "= HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address source_length:", "logical device within a single physical device and to support", "device on a multi-drop) The physical address can be omitted", "def length(self): \"\"\" The number of bytes the address makes", "hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >> 1 destination_physical = address_bytes[1] >>", "source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical, destination_length), (source_logical, source_physical,", "logical device (separate addressable entity within a physical device) makes", "destination_from_bytes(cls, frame_bytes: bytes, address_type: str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes", "address so mark the logial as end. logical_lower = logical_lower", "= address_bytes[0] >> 1 destination_physical = None elif destination_length ==", "byte: source_length = _length break continue if source_length == 1:", "address is the destination address and the seconds is the", "# Found end byte: destination_length = _length break continue if", "logical_lower = logical_lower | 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes = list()", "self.address_type == \"client\": # shift left 1 bit and set", "_length break continue if destination_length == 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1,", "address_type: str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical,", "server address type logical_higher, logical_lower = self._split_address(self.logical_address) if self.physical_address: physical_higher,", "raise ValueError(f\"Can only parse 2 bytes for address\") upper =", "shall always be expressed on one byte. To enable addressing", "hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found end byte: destination_length", "makes up the upper HDLC address The logical address must", "_length break continue if source_length == 1: address_bytes = hdlc_frame_bytes[3", "destination_logical = address_bytes[0] >> 1 destination_physical = address_bytes[1] >> 1", "a logical device (separate addressable entity within a physical device)", "out.append(((self.logical_address << 1) | 0b00000001)) else: # server address type", ") = destination_address_data return cls(destination_logical, destination_physical, address_type) @classmethod def source_from_bytes(cls,", "-> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]: \"\"\" address can", "frame_bytes: :return: \"\"\" # Find destination address. destination_length: int =", "hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:", "in destination_positions_list ] for pos, _length in source_position_list: end_byte =", "== 2: address_bytes = hdlc_frame_bytes[3 + destination_length : 5 +", "pos, _length in source_position_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte &", "cls(source_logical, source_physical, address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) ->", "be expressed on one byte. To enable addressing more than", "+ source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return (", "1, 2 or 4 bytes long. the end byte is", "import attr from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\"", "= 0 destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2),", "to support the multi-drop configuration the server address may be", "6 else: lower = address << 1 higher = None", "<< 1) | 0b00000001)) else: # server address type logical_higher,", "bytes for pos, _length in destination_positions_list: end_byte = hdlc_frame_bytes[pos] if", "the multi-drop configuration the server address may be divided in", "lower = address << 1 higher = None return higher,", "than one logical device within a single physical device and", "+ destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0] >> 1 source_physical =", "address\") upper = address_bytes[0] >> 1 lower = address_bytes[1] >>", "= None elif source_length == 2: address_bytes = hdlc_frame_bytes[3 +", "only parse 2 bytes for address\") upper = address_bytes[0] >>", "= HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length = source_address_data return cls(source_logical, source_physical,", "pos, _length in destination_positions_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte &", "destination_length = _length break continue if destination_length == 1: address_bytes", "Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]: \"\"\" address can be", "can be 1, 2 or 4 bytes long. the end", "The number of bytes the address makes up. :return: \"\"\"", "-> bytes: return address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls, frame_bytes: bytes,", "must always be present. The physical address is used to", "always be present. The physical address is used to address", "Found end byte: destination_length = _length break continue if destination_length", "validator=[validators.validate_hdlc_address] ) address_type: str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property", "source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical,", "address may be divided in two parts– may be divided", "def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int, Optional[int], int], Tuple[int,", "one byte. To enable addressing more than one logical device", "to address a physical device ( a physical device on", "\"big\")) return b\"\".join(out_bytes) @staticmethod def _split_address(address: int) -> Tuple[Optional[int], int]:", ">> 1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3:7] destination_logical", "find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int],", "address can be 1, 2 or 4 bytes long. the", "source_position_list: List[Tuple[int, int]] = [ (item[0] + destination_length, item[1]) for", "physical_lower] ) else: # no physical address so mark the", "elif destination_length == 4: address_bytes = hdlc_frame_bytes[3 + destination_length :", "lower @staticmethod def _address_to_byte(address: int) -> bytes: return address.to_bytes(1, \"big\")", "= hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found end byte:", "parse_two_byte_address(address_bytes: bytes): if address_bytes != 2: raise ValueError(f\"Can only parse", "0b01111111: lower = (address & 0b0000000001111111) << 1 higher =", "destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address source_length: int =", "address_type: str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def length(self):", "end physical_lower = physical_lower | 0b00000001 out.extend( [logical_higher, logical_lower, physical_higher,", "str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical, destination_physical,", "# shift left 1 bit and set the lsb to", "Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type: str = attr.ib(", "= 0 source_physical: Optional[int] = 0 source_position_list: List[Tuple[int, int]] =", "present. The physical address is used to address a physical", "destination_length == 2: address_bytes = hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >>", "validator=[validators.validate_hdlc_address_type] ) @property def length(self): \"\"\" The number of bytes", "attr from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A", "(address & 0b0000000001111111) << 1 higher = (address & 0b0011111110000000)", "source_length = _length break continue if source_length == 1: address_bytes", "single physical device and to support the multi-drop configuration the", "first address is the destination address and the seconds is", "self.physical_address ) # mark physical lower as end physical_lower =", "address to address a logical device (separate addressable entity within", "a physical device on a multi-drop) The physical address can", "end of address. out.append(((self.logical_address << 1) | 0b00000001)) else: #", "0b0011111110000000) >> 6 else: lower = address << 1 higher", "HdlcAddress: \"\"\" A client address shall always be expressed on", "[logical_higher, logical_lower, physical_higher, physical_lower] ) else: # no physical address", "address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, ) -> Tuple[Tuple[int, Optional[int],", "a physical device) makes up the upper HDLC address The", "higher: Optional[int] lower: int if address > 0b01111111: lower =", "[(3, 1), (4, 2), (6, 4)] address_bytes: bytes for pos,", "int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address] )", "source_address_data return cls(source_logical, source_physical, address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes,", "logical_lower]) out_bytes = list() for address in out: if address:", "int = 1 source_logical: int = 0 source_physical: Optional[int] =", "+ source_length] source_logical = address_bytes[0] >> 1 source_physical = address_bytes[1]", "Found end byte: source_length = _length break continue if source_length", "[ (item[0] + destination_length, item[1]) for item in destination_positions_list ]", "return ( (destination_logical, destination_physical, destination_length), (source_logical, source_physical, source_length), ) @staticmethod", "int]] = [(3, 1), (4, 2), (6, 4)] address_bytes: bytes", "source_logical = address_bytes[0] >> 1 source_physical = address_bytes[1] >> 1", "address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length] source_logical", "may be divided in two parts– may be divided into", "of the last byte LSB being 1 The first address", "(source_logical, source_physical, source_length), ) @staticmethod def parse_two_byte_address(address_bytes: bytes): if address_bytes", "(address & 0b0011111110000000) >> 6 else: lower = address <<", "physical device) makes up the upper HDLC address The logical", "_split_address(address: int) -> Tuple[Optional[int], int]: higher: Optional[int] lower: int if", "int]] = [ (item[0] + destination_length, item[1]) for item in", ") else: # no physical address so mark the logial", "address_bytes[0] >> 1 source_physical = address_bytes[1] >> 1 elif destination_length", "_length in destination_positions_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001):", "enable addressing more than one logical device within a single", "return b\"\".join(out_bytes) @staticmethod def _split_address(address: int) -> Tuple[Optional[int], int]: higher:", "1 lower = address_bytes[1] >> 1 return lower + (upper", "source_from_bytes(cls, frame_bytes: bytes, address_type: str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical,", "7 + source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return", "# mark physical lower as end physical_lower = physical_lower |", "source_length), ) @staticmethod def parse_two_byte_address(address_bytes: bytes): if address_bytes != 2:", "Optional[int] lower: int if address > 0b01111111: lower = (address", "Find destination address. destination_length: int = 1 destination_logical: int =", "mark the logial as end. logical_lower = logical_lower | 0b00000001", "List[Tuple[int, int]] = [ (item[0] + destination_length, item[1]) for item", "physical lower as end physical_lower = physical_lower | 0b00000001 out.extend(", "4: address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]", "destination address and the seconds is the source address. :param", "can be omitted it not used. \"\"\" logical_address: int =", "@classmethod def source_from_bytes(cls, frame_bytes: bytes, address_type: str): _, source_address_data =", "address and the seconds is the source address. :param frame_bytes:", "number of bytes the address makes up. :return: \"\"\" return", "str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def length(self): \"\"\"", "return cls(source_logical, source_physical, address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes: bytes, )", "0b00000001 out.extend( [logical_higher, logical_lower, physical_higher, physical_lower] ) else: # no", "the address makes up. :return: \"\"\" return len(self.to_bytes()) def to_bytes(self):", "= 0 source_position_list: List[Tuple[int, int]] = [ (item[0] + destination_length,", "for address\") upper = address_bytes[0] >> 1 lower = address_bytes[1]", "multi-drop) The physical address can be omitted it not used.", "address can be omitted it not used. \"\"\" logical_address: int", "shift left 1 bit and set the lsb to mark", "def to_bytes(self): out: List[Optional[int]] = list() if self.address_type == \"client\":", "= list() if self.address_type == \"client\": # shift left 1", "= address_bytes[0] >> 1 source_physical = address_bytes[1] >> 1 elif", "break continue if source_length == 1: address_bytes = hdlc_frame_bytes[3 +", "destination_physical, destination_length), (source_logical, source_physical, source_length), ) @staticmethod def parse_two_byte_address(address_bytes: bytes):", "parts– may be divided into two parts: The logical address", "continue if destination_length == 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical", "source_length == 1: address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, \"big\") source_logical", "hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source", "higher, lower @staticmethod def _address_to_byte(address: int) -> bytes: return address.to_bytes(1,", "not used. \"\"\" logical_address: int = attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] =", "ValueError(f\"Can only parse 2 bytes for address\") upper = address_bytes[0]", "1 bit and set the lsb to mark end of", "source_length] source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical,", "1 higher = None return higher, lower @staticmethod def _address_to_byte(address:", "+ destination_length : 5 + source_length] source_logical = address_bytes[0] >>", "address_bytes != 2: raise ValueError(f\"Can only parse 2 bytes for", "destination_length, ) = destination_address_data return cls(destination_logical, destination_physical, address_type) @classmethod def", "if address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes) @staticmethod def _split_address(address: int)", "destination_length].to_bytes(1, \"big\") source_logical = address_bytes[0] >> 1 source_physical = None", "up the upper HDLC address The logical address must always", "or 4 bytes long. the end byte is indicated by", "if destination_length == 1: address_bytes = hdlc_frame_bytes[3].to_bytes(1, \"big\") destination_logical =", "from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A client", "address type logical_higher, logical_lower = self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower", "destination_physical = None elif destination_length == 2: address_bytes = hdlc_frame_bytes[3:5]", "import validators @attr.s(auto_attribs=True) class HdlcAddress: \"\"\" A client address shall", "device) makes up the upper HDLC address The logical address", "else: # server address type logical_higher, logical_lower = self._split_address(self.logical_address) if", "source_length == 2: address_bytes = hdlc_frame_bytes[3 + destination_length : 5", "address > 0b01111111: lower = (address & 0b0000000001111111) << 1", "a single physical device and to support the multi-drop configuration", "destination_length: int = 1 destination_logical: int = 0 destination_physical: Optional[int]", "= address << 1 higher = None return higher, lower", "mark end of address. out.append(((self.logical_address << 1) | 0b00000001)) else:", "= HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address source_length: int = 1", "is indicated by the of the last byte LSB being", "0 source_position_list: List[Tuple[int, int]] = [ (item[0] + destination_length, item[1])", "if address > 0b01111111: lower = (address & 0b0000000001111111) <<", "1), (4, 2), (6, 4)] address_bytes: bytes for pos, _length", "\"\"\" address can be 1, 2 or 4 bytes long.", "+ destination_length, item[1]) for item in destination_positions_list ] for pos,", "is used to address a physical device ( a physical", "parse 2 bytes for address\") upper = address_bytes[0] >> 1", "1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3:7] destination_logical =", "2 bytes for address\") upper = address_bytes[0] >> 1 lower", "def _split_address(address: int) -> Tuple[Optional[int], int]: higher: Optional[int] lower: int", "as end. logical_lower = logical_lower | 0b00000001 out.extend([logical_higher, logical_lower]) out_bytes", "a physical device ( a physical device on a multi-drop)", "= [(3, 1), (4, 2), (6, 4)] address_bytes: bytes for", "for item in destination_positions_list ] for pos, _length in source_position_list:", "The logical address to address a logical device (separate addressable", ">> 1 source_physical = address_bytes[1] >> 1 elif destination_length ==", "support the multi-drop configuration the server address may be divided", "for address in out: if address: out_bytes.append(address.to_bytes(1, \"big\")) return b\"\".join(out_bytes)", "source_physical: Optional[int] = 0 source_position_list: List[Tuple[int, int]] = [ (item[0]", "(6, 4)] address_bytes: bytes for pos, _length in destination_positions_list: end_byte", "attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type: str = attr.ib( default=\"client\", validator=[validators.validate_hdlc_address_type]", "divided in two parts– may be divided into two parts:", "logical address must always be present. The physical address is", "return len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]] = list() if self.address_type", "address. :param frame_bytes: :return: \"\"\" # Find destination address. destination_length:", "continue if source_length == 1: address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1,", "= 1 destination_logical: int = 0 destination_physical: Optional[int] = 0", "physical device on a multi-drop) The physical address can be", "may be divided into two parts: The logical address to", "within a physical device) makes up the upper HDLC address", ":return: \"\"\" # Find destination address. destination_length: int = 1", "address a logical device (separate addressable entity within a physical", "makes up. :return: \"\"\" return len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]]", "entity within a physical device) makes up the upper HDLC", "always be expressed on one byte. To enable addressing more", "HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) # Find source address source_length: int", "bytes, address_type: str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length", "destination_physical, address_type) @classmethod def source_from_bytes(cls, frame_bytes: bytes, address_type: str): _,", "be present. The physical address is used to address a", "The physical address is used to address a physical device", "parts: The logical address to address a logical device (separate", "bytes the address makes up. :return: \"\"\" return len(self.to_bytes()) def", "lower = (address & 0b0000000001111111) << 1 higher = (address", "# Found end byte: source_length = _length break continue if", "@staticmethod def _split_address(address: int) -> Tuple[Optional[int], int]: higher: Optional[int] lower:", "int = 0 destination_physical: Optional[int] = 0 destination_positions_list: List[Tuple[int, int]]", "mark physical lower as end physical_lower = physical_lower | 0b00000001", "address_bytes[0] >> 1 destination_physical = address_bytes[1] >> 1 elif destination_length", "bytes, ) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]: \"\"\"", "address_bytes[1] >> 1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3", "bit and set the lsb to mark end of address.", "def source_from_bytes(cls, frame_bytes: bytes, address_type: str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)", "source_physical = address_bytes[1] >> 1 elif destination_length == 4: address_bytes", "= address_bytes[0] >> 1 destination_physical = address_bytes[1] >> 1 elif", "out: List[Optional[int]] = list() if self.address_type == \"client\": # shift", "\"client\": # shift left 1 bit and set the lsb", "Tuple[Optional[int], int]: higher: Optional[int] lower: int if address > 0b01111111:", ") # mark physical lower as end physical_lower = physical_lower", "* import attr from dlms_cosem.hdlc import validators @attr.s(auto_attribs=True) class HdlcAddress:", "address source_length: int = 1 source_logical: int = 0 source_physical:", "self.physical_address: physical_higher, physical_lower = self._split_address( self.physical_address ) # mark physical", "bytes, address_type: str): destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes( frame_bytes ) (", "physical_higher, physical_lower] ) else: # no physical address so mark", "lower = address_bytes[1] >> 1 return lower + (upper <<", "address_type) @classmethod def source_from_bytes(cls, frame_bytes: bytes, address_type: str): _, source_address_data", "Optional[int] = 0 source_position_list: List[Tuple[int, int]] = [ (item[0] +", "for pos, _length in source_position_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte", "and set the lsb to mark end of address. out.append(((self.logical_address", "== 2: address_bytes = hdlc_frame_bytes[3:5] destination_logical = address_bytes[0] >> 1", "source_length: int = 1 source_logical: int = 0 source_physical: Optional[int]", "logial as end. logical_lower = logical_lower | 0b00000001 out.extend([logical_higher, logical_lower])", "int], Tuple[int, Optional[int], int]]: \"\"\" address can be 1, 2", "\"\"\" return len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]] = list() if", "frame_bytes ) ( destination_logical, destination_physical, destination_length, ) = destination_address_data return", "upper HDLC address The logical address must always be present.", "4: address_bytes = hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])", "& 0b0011111110000000) >> 6 else: lower = address << 1", "len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]] = list() if self.address_type ==", "@classmethod def destination_from_bytes(cls, frame_bytes: bytes, address_type: str): destination_address_data, _ =", "= 0 destination_physical: Optional[int] = 0 destination_positions_list: List[Tuple[int, int]] =", "source_physical, source_length = source_address_data return cls(source_logical, source_physical, address_type) @staticmethod def", "= HdlcAddress.parse_two_byte_address(address_bytes[:2]) source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:]) return ( (destination_logical, destination_physical, destination_length),", "destination_length, item[1]) for item in destination_positions_list ] for pos, _length", "int]: higher: Optional[int] lower: int if address > 0b01111111: lower", "in source_position_list: end_byte = hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): #", "two parts: The logical address to address a logical device", "(separate addressable entity within a physical device) makes up the", "up. :return: \"\"\" return len(self.to_bytes()) def to_bytes(self): out: List[Optional[int]] =", "default=\"client\", validator=[validators.validate_hdlc_address_type] ) @property def length(self): \"\"\" The number of", "higher = (address & 0b0011111110000000) >> 6 else: lower =", "type logical_higher, logical_lower = self._split_address(self.logical_address) if self.physical_address: physical_higher, physical_lower =", "= attr.ib(validator=[validators.validate_hdlc_address]) physical_address: Optional[int] = attr.ib( default=None, validator=[validators.validate_hdlc_address] ) address_type:", "address must always be present. The physical address is used", "the end byte is indicated by the of the last", "5 + source_length] source_logical = address_bytes[0] >> 1 source_physical =", "HdlcAddress.find_address_in_frame_bytes( frame_bytes ) ( destination_logical, destination_physical, destination_length, ) = destination_address_data", "end byte is indicated by the of the last byte", "indicated by the of the last byte LSB being 1", "0 destination_physical: Optional[int] = 0 destination_positions_list: List[Tuple[int, int]] = [(3,", "address_bytes: bytes for pos, _length in destination_positions_list: end_byte = hdlc_frame_bytes[pos]", "if bool(end_byte & 0b00000001): # Found end byte: source_length =", "the logial as end. logical_lower = logical_lower | 0b00000001 out.extend([logical_higher,", "seconds is the source address. :param frame_bytes: :return: \"\"\" #", "= source_address_data return cls(source_logical, source_physical, address_type) @staticmethod def find_address_in_frame_bytes( hdlc_frame_bytes:", "0b00000001)) else: # server address type logical_higher, logical_lower = self._split_address(self.logical_address)", "LSB being 1 The first address is the destination address", "client address shall always be expressed on one byte. To", "left 1 bit and set the lsb to mark end", "4 bytes long. the end byte is indicated by the", "to_bytes(self): out: List[Optional[int]] = list() if self.address_type == \"client\": #", "== 4: address_bytes = hdlc_frame_bytes[3:7] destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2]) destination_physical =", "0 source_physical: Optional[int] = 0 source_position_list: List[Tuple[int, int]] = [", "Tuple[int, Optional[int], int]]: \"\"\" address can be 1, 2 or", "hdlc_frame_bytes[pos] if bool(end_byte & 0b00000001): # Found end byte: source_length", "address.to_bytes(1, \"big\") @classmethod def destination_from_bytes(cls, frame_bytes: bytes, address_type: str): destination_address_data,", "used to address a physical device ( a physical device", "] for pos, _length in source_position_list: end_byte = hdlc_frame_bytes[pos] if", "destination_logical: int = 0 destination_physical: Optional[int] = 0 destination_positions_list: List[Tuple[int,", "address_type: str): _, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes) source_logical, source_physical, source_length =", ">> 1 elif destination_length == 4: address_bytes = hdlc_frame_bytes[3 +" ]
[ "scale=10) elif properties == 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties", "= ['distribution', 'properties'] params = [ ['cauchy', 'gamma', 'beta'], ['pdf',", "if changing the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names", "variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark): param_names =", "np.random.rand(2,2) * 10 self.a = a def time_fisher_exact(self, alternative): oddsratio,", "scale=10) elif properties == 'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif", "'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif distribution == 'cauchy': if properties", "loc=4, scale=10) # Retain old benchmark results (remove this if", "with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names = ['alternative']", "self.c, equal_var=False) class Distribution(Benchmark): param_names = ['distribution', 'properties'] params =", "if distribution == 'gamma': if properties == 'pdf': stats.gamma.pdf(self.x, a=5,", "a=5, b=3, loc=4, scale=10) elif properties == 'cdf': stats.beta.cdf(self.x, a=5,", "a=5, loc=4, scale=10) elif properties == 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4,", "def time_fisher_exact(self, alternative): oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark):", "= [np.random.normal(loc=i, size=1000) for i in range(3)] def time_anderson_ksamp(self): with", "scipy.stats as stats except ImportError: pass from .common import Benchmark", "loc=4, scale=10) elif properties == 'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4,", "loc=4, scale=10) elif properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif", "warnings import numpy as np try: import scipy.stats as stats", "np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10, size=500) self.b = stats.norm.rvs(loc=8, scale=10,", "from .common import Benchmark class Anderson_KSamp(Benchmark): def setup(self, *args): self.rand", "np.random.seed(12345678) self.x = np.random.rand(100) def time_distribution(self, distribution, properties): if distribution", "import Benchmark class Anderson_KSamp(Benchmark): def setup(self, *args): self.rand = [np.random.normal(loc=i,", "== 'gamma': if properties == 'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10)", "stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif properties == 'rvs': stats.gamma.rvs(size=1000, a=5,", "def time_ttest_ind_diff_var(self): # test different sized sample with different variances", "setup(self, *args): self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]", "'fit'] ] def setup(self, distribution, properties): np.random.seed(12345678) self.x = np.random.rand(100)", "setup(self, distribution, properties): np.random.seed(12345678) self.x = np.random.rand(100) def time_distribution(self, distribution,", "param_names = ['n_levels'] params = [ [10, 1000] ] def", "'cdf', 'rvs', 'fit'] ] def setup(self, distribution, properties): np.random.seed(12345678) self.x", "['two-sided', 'less', 'greater'] ] def setup(self, mode): a = np.random.rand(2,2)", "self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark): param_names = ['distribution', 'properties']", "[ ['cauchy', 'gamma', 'beta'], ['pdf', 'cdf', 'rvs', 'fit'] ] def", "changing the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names =", "* 10 self.a = a def time_fisher_exact(self, alternative): oddsratio, pvalue", "b=3, loc=4, scale=10) elif properties == 'fit': stats.beta.fit(self.x, loc=4, scale=10)", "a=5, loc=4, scale=10) elif properties == 'cdf': stats.gamma.cdf(self.x, a=5, loc=4,", "['distribution', 'properties'] params = [ ['cauchy', 'gamma', 'beta'], ['pdf', 'cdf',", "scale=10) elif properties == 'fit': stats.beta.fit(self.x, loc=4, scale=10) # Retain", "equal_var=False) def time_ttest_ind_diff_var(self): # test different sized sample with different", "Distribution(Benchmark): param_names = ['distribution', 'properties'] params = [ ['cauchy', 'gamma',", "try: import scipy.stats as stats except ImportError: pass from .common", "'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif properties == 'rvs': stats.gamma.rvs(size=1000,", "pvalue = stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a", "elif properties == 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties ==", "warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names = ['alternative'] params =", "== 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties == 'rvs': stats.cauchy.rvs(size=1000,", "stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution == 'beta': if properties ==", "class CorrelationFunctions(Benchmark): param_names = ['alternative'] params = [ ['two-sided', 'less',", "Retain old benchmark results (remove this if changing the benchmark)", "range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark):", "[np.random.normal(loc=i, size=1000) for i in range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings():", "InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10, size=500) self.b", "elif properties == 'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10) elif", "b=3, loc=4, scale=10) elif properties == 'cdf': stats.beta.cdf(self.x, a=5, b=3,", "def setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10, size=500) self.b =", "Anderson_KSamp(Benchmark): def setup(self, *args): self.rand = [np.random.normal(loc=i, size=1000) for i", "= stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a =", "if properties == 'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif properties", "== 'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif properties == 'rvs':", "loc=4, scale=10) elif properties == 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)", "== 'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10) elif properties ==", "size=20) def time_ttest_ind_same_var(self): # test different sized sample with variances", "distribution == 'gamma': if properties == 'pdf': stats.gamma.pdf(self.x, a=5, loc=4,", "== 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties == 'cdf': stats.cauchy.cdf(self.x,", "this if changing the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark):", "variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self): # test", "test different sized sample with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b,", "as np try: import scipy.stats as stats except ImportError: pass", "setup(self, n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000, 10)) def time_mode(self,", "(remove this if changing the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class", "class DescriptiveStats(Benchmark): param_names = ['n_levels'] params = [ [10, 1000]", "'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10) elif properties == 'fit':", "stats.beta.fit(self.x, loc=4, scale=10) # Retain old benchmark results (remove this", "numpy as np try: import scipy.stats as stats except ImportError:", "'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif properties == 'cdf': stats.gamma.cdf(self.x,", "time_ttest_ind_same_var(self): # test different sized sample with variances stats.ttest_ind(self.a, self.b)", "scale=10) # Retain old benchmark results (remove this if changing", "time_fisher_exact(self, alternative): oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def", "properties == 'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif properties ==", "properties == 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties == 'cdf':", "properties == 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties == 'rvs':", "self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self): # test different sized", "# test different sized sample with different variances stats.ttest_ind(self.a, self.c)", "scale=10) elif distribution == 'beta': if properties == 'pdf': stats.beta.pdf(self.x,", "= [ ['two-sided', 'less', 'greater'] ] def setup(self, mode): a", "different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark): param_names", "stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5,", "stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self): # test different sized sample", "stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10) elif properties == 'cdf': stats.beta.cdf(self.x,", "scale=10, size=20) self.c = stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self): #", "scale=10) elif properties == 'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)", "class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10, size=500)", "time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names = ['n_levels'] params =", "np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000, 10)) def time_mode(self, n_levels): stats.mode(self.levels,", "'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10) elif properties == 'rvs':", "mode): a = np.random.rand(2,2) * 10 self.a = a def", "== 'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10) elif properties ==", "stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10) elif properties == 'rvs': stats.beta.rvs(size=1000,", "'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif properties == 'fit': stats.gamma.fit(self.x,", "scale=10) elif properties == 'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution", "stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark): param_names = ['distribution',", "sized sample with different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False)", "loc=4, scale=10) elif distribution == 'cauchy': if properties == 'pdf':", "== 'cauchy': if properties == 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif", "stats.norm.rvs(loc=8, scale=10, size=20) self.c = stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self):", "stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10) elif properties == 'fit': stats.beta.fit(self.x,", "with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self): #", "self.b = stats.norm.rvs(loc=8, scale=10, size=20) self.c = stats.norm.rvs(loc=8, scale=20, size=20)", "print_function import warnings import numpy as np try: import scipy.stats", "properties == 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif properties ==", "self.a = a def time_fisher_exact(self, alternative): oddsratio, pvalue = stats.fisher_exact(self.a,", "division, absolute_import, print_function import warnings import numpy as np try:", "param_names = ['distribution', 'properties'] params = [ ['cauchy', 'gamma', 'beta'],", "= np.random.rand(100) def time_distribution(self, distribution, properties): if distribution == 'gamma':", "= ['n_levels'] params = [ [10, 1000] ] def setup(self,", "self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)] def time_anderson_ksamp(self):", "stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif properties == 'fit': stats.gamma.fit(self.x, loc=4,", "stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties == 'fit': stats.cauchy.fit(self.x, loc=4, scale=10)", "absolute_import, print_function import warnings import numpy as np try: import", "__future__ import division, absolute_import, print_function import warnings import numpy as", "== 'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif properties == 'cdf':", ".common import Benchmark class Anderson_KSamp(Benchmark): def setup(self, *args): self.rand =", "test different sized sample with different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a,", "different sized sample with different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c,", "old benchmark results (remove this if changing the benchmark) time_distribution.version", "properties): np.random.seed(12345678) self.x = np.random.rand(100) def time_distribution(self, distribution, properties): if", "warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names = ['alternative'] params", "stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names = ['alternative'] params = [ ['two-sided',", "import numpy as np try: import scipy.stats as stats except", "['alternative'] params = [ ['two-sided', 'less', 'greater'] ] def setup(self,", "with different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark):", "elif properties == 'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution ==", "Benchmark class Anderson_KSamp(Benchmark): def setup(self, *args): self.rand = [np.random.normal(loc=i, size=1000)", "from __future__ import division, absolute_import, print_function import warnings import numpy", "in range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class", "'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution == 'beta': if properties", "= stats.norm.rvs(loc=8, scale=10, size=20) self.c = stats.norm.rvs(loc=8, scale=20, size=20) def", "== 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif properties == 'fit':", "setup(self, mode): a = np.random.rand(2,2) * 10 self.a = a", "properties): if distribution == 'gamma': if properties == 'pdf': stats.gamma.pdf(self.x,", "elif properties == 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif properties", "== 'beta': if properties == 'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4,", "scale=10) elif properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties", "properties == 'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution == 'beta':", "benchmark results (remove this if changing the benchmark) time_distribution.version =", "[ [10, 1000] ] def setup(self, n_levels): np.random.seed(12345678) self.levels =", "class Anderson_KSamp(Benchmark): def setup(self, *args): self.rand = [np.random.normal(loc=i, size=1000) for", "stats.ttest_ind(self.a, self.c, equal_var=False) class Distribution(Benchmark): param_names = ['distribution', 'properties'] params", "stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self): # test different sized sample", "properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties == 'fit':", "'properties'] params = [ ['cauchy', 'gamma', 'beta'], ['pdf', 'cdf', 'rvs',", "scale=10) elif distribution == 'cauchy': if properties == 'pdf': stats.cauchy.pdf(self.x,", "sample with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self):", "self.c = stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self): # test different", "== 'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10) elif properties ==", "# test different sized sample with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a,", "size=1000) for i in range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore',", "[10, 1000] ] def setup(self, n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels,", "def time_ttest_ind_same_var(self): # test different sized sample with variances stats.ttest_ind(self.a,", "a = np.random.rand(2,2) * 10 self.a = a def time_fisher_exact(self,", "elif distribution == 'cauchy': if properties == 'pdf': stats.cauchy.pdf(self.x, loc=4,", "n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000, 10)) def time_mode(self, n_levels):", "properties == 'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10) elif properties", "loc=4, scale=10) elif properties == 'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif", "def setup(self, distribution, properties): np.random.seed(12345678) self.x = np.random.rand(100) def time_distribution(self,", "a=5, b=3, loc=4, scale=10) elif properties == 'fit': stats.beta.fit(self.x, loc=4,", "import division, absolute_import, print_function import warnings import numpy as np", "params = [ ['cauchy', 'gamma', 'beta'], ['pdf', 'cdf', 'rvs', 'fit']", "elif distribution == 'beta': if properties == 'pdf': stats.beta.pdf(self.x, a=5,", "'rvs', 'fit'] ] def setup(self, distribution, properties): np.random.seed(12345678) self.x =", "'beta'], ['pdf', 'cdf', 'rvs', 'fit'] ] def setup(self, distribution, properties):", "scale=10, size=500) self.b = stats.norm.rvs(loc=8, scale=10, size=20) self.c = stats.norm.rvs(loc=8,", "time_distribution(self, distribution, properties): if distribution == 'gamma': if properties ==", "'gamma', 'beta'], ['pdf', 'cdf', 'rvs', 'fit'] ] def setup(self, distribution,", "oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678)", "params = [ ['two-sided', 'less', 'greater'] ] def setup(self, mode):", "] def setup(self, n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000, 10))", "= a def time_fisher_exact(self, alternative): oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)", "= [ ['cauchy', 'gamma', 'beta'], ['pdf', 'cdf', 'rvs', 'fit'] ]", "elif properties == 'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif distribution ==", "stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties == 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10)", "a def time_fisher_exact(self, alternative): oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative) class", "sized sample with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def", "'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties == 'cdf': stats.cauchy.cdf(self.x, loc=4,", "= stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self): # test different sized", "'cauchy': if properties == 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties", "elif properties == 'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10) elif", "elif properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties ==", "as stats except ImportError: pass from .common import Benchmark class", "scale=10) elif properties == 'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif distribution", "time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names =", "np try: import scipy.stats as stats except ImportError: pass from", "stats.gamma.fit(self.x, loc=4, scale=10) elif distribution == 'cauchy': if properties ==", "*args): self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)] def", "distribution == 'cauchy': if properties == 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10)", "if properties == 'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10) elif", "the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names = ['n_levels']", "= \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names = ['n_levels'] params = [", "scale=10) elif properties == 'rvs': stats.gamma.rvs(size=1000, a=5, loc=4, scale=10) elif", "['n_levels'] params = [ [10, 1000] ] def setup(self, n_levels):", "== 'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif distribution == 'cauchy': if", "alternative): oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative) class InferentialStats(Benchmark): def setup(self):", "a=5, b=3, loc=4, scale=10) elif properties == 'rvs': stats.beta.rvs(size=1000, a=5,", "scale=10) elif properties == 'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)", "size=500) self.b = stats.norm.rvs(loc=8, scale=10, size=20) self.c = stats.norm.rvs(loc=8, scale=20,", "DescriptiveStats(Benchmark): param_names = ['n_levels'] params = [ [10, 1000] ]", "'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10) elif properties == 'cdf':", "except ImportError: pass from .common import Benchmark class Anderson_KSamp(Benchmark): def", "def setup(self, *args): self.rand = [np.random.normal(loc=i, size=1000) for i in", "'gamma': if properties == 'pdf': stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif", "= stats.norm.rvs(loc=5, scale=10, size=500) self.b = stats.norm.rvs(loc=8, scale=10, size=20) self.c", "time_ttest_ind_diff_var(self): # test different sized sample with different variances stats.ttest_ind(self.a,", "i in range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand)", "== 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties == 'fit': stats.cauchy.fit(self.x,", "# Retain old benchmark results (remove this if changing the", "params = [ [10, 1000] ] def setup(self, n_levels): np.random.seed(12345678)", "properties == 'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10) elif properties", "ImportError: pass from .common import Benchmark class Anderson_KSamp(Benchmark): def setup(self,", "def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names", "scale=20, size=20) def time_ttest_ind_same_var(self): # test different sized sample with", "class Distribution(Benchmark): param_names = ['distribution', 'properties'] params = [ ['cauchy',", "distribution, properties): np.random.seed(12345678) self.x = np.random.rand(100) def time_distribution(self, distribution, properties):", "import scipy.stats as stats except ImportError: pass from .common import", "UserWarning) stats.anderson_ksamp(self.rand) class CorrelationFunctions(Benchmark): param_names = ['alternative'] params = [", "] def setup(self, mode): a = np.random.rand(2,2) * 10 self.a", "= np.random.rand(2,2) * 10 self.a = a def time_fisher_exact(self, alternative):", "'greater'] ] def setup(self, mode): a = np.random.rand(2,2) * 10", "elif properties == 'fit': stats.beta.fit(self.x, loc=4, scale=10) # Retain old", "'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4,", "CorrelationFunctions(Benchmark): param_names = ['alternative'] params = [ ['two-sided', 'less', 'greater']", "properties == 'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif properties ==", "loc=4, scale=10) elif properties == 'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif", "def setup(self, mode): a = np.random.rand(2,2) * 10 self.a =", "self.b, equal_var=False) def time_ttest_ind_diff_var(self): # test different sized sample with", "stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False) def time_ttest_ind_diff_var(self): # test different", "distribution, properties): if distribution == 'gamma': if properties == 'pdf':", "b=3, loc=4, scale=10) elif properties == 'rvs': stats.beta.rvs(size=1000, a=5, b=3,", "== 'fit': stats.beta.fit(self.x, loc=4, scale=10) # Retain old benchmark results", "'beta': if properties == 'pdf': stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)", "alternative=alternative) class InferentialStats(Benchmark): def setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10,", "'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10) elif properties == 'fit': stats.cauchy.fit(self.x, loc=4,", "for i in range(3)] def time_anderson_ksamp(self): with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning)", "equal_var=False) class Distribution(Benchmark): param_names = ['distribution', 'properties'] params = [", "setup(self): np.random.seed(12345678) self.a = stats.norm.rvs(loc=5, scale=10, size=500) self.b = stats.norm.rvs(loc=8,", "loc=4, scale=10) elif distribution == 'beta': if properties == 'pdf':", "param_names = ['alternative'] params = [ ['two-sided', 'less', 'greater'] ]", "loc=4, scale=10) elif properties == 'fit': stats.beta.fit(self.x, loc=4, scale=10) #", "self.x = np.random.rand(100) def time_distribution(self, distribution, properties): if distribution ==", "= ['alternative'] params = [ ['two-sided', 'less', 'greater'] ] def", "benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names = ['n_levels'] params", "loc=4, scale=10) elif properties == 'rvs': stats.beta.rvs(size=1000, a=5, b=3, loc=4,", "= [ [10, 1000] ] def setup(self, n_levels): np.random.seed(12345678) self.levels", "pass from .common import Benchmark class Anderson_KSamp(Benchmark): def setup(self, *args):", "stats except ImportError: pass from .common import Benchmark class Anderson_KSamp(Benchmark):", "stats.gamma.pdf(self.x, a=5, loc=4, scale=10) elif properties == 'cdf': stats.gamma.cdf(self.x, a=5,", "properties == 'cdf': stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10) elif properties", "results (remove this if changing the benchmark) time_distribution.version = \"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\"", "self.levels = np.random.randint(n_levels, size=(1000, 10)) def time_mode(self, n_levels): stats.mode(self.levels, axis=0)", "stats.cauchy.cdf(self.x, loc=4, scale=10) elif properties == 'rvs': stats.cauchy.rvs(size=1000, loc=4, scale=10)", "10 self.a = a def time_fisher_exact(self, alternative): oddsratio, pvalue =", "size=20) self.c = stats.norm.rvs(loc=8, scale=20, size=20) def time_ttest_ind_same_var(self): # test", "different sized sample with variances stats.ttest_ind(self.a, self.b) stats.ttest_ind(self.a, self.b, equal_var=False)", "elif properties == 'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10) elif properties", "['pdf', 'cdf', 'rvs', 'fit'] ] def setup(self, distribution, properties): np.random.seed(12345678)", "stats.norm.rvs(loc=5, scale=10, size=500) self.b = stats.norm.rvs(loc=8, scale=10, size=20) self.c =", "a=5, loc=4, scale=10) elif properties == 'fit': stats.gamma.fit(self.x, loc=4, scale=10)", "sample with different variances stats.ttest_ind(self.a, self.c) stats.ttest_ind(self.a, self.c, equal_var=False) class", "== 'fit': stats.cauchy.fit(self.x, loc=4, scale=10) elif distribution == 'beta': if", "distribution == 'beta': if properties == 'pdf': stats.beta.pdf(self.x, a=5, b=3,", "def time_distribution(self, distribution, properties): if distribution == 'gamma': if properties", "loc=4, scale=10) elif properties == 'cdf': stats.cauchy.cdf(self.x, loc=4, scale=10) elif", "'fit': stats.beta.fit(self.x, loc=4, scale=10) # Retain old benchmark results (remove", "'less', 'greater'] ] def setup(self, mode): a = np.random.rand(2,2) *", "self.a = stats.norm.rvs(loc=5, scale=10, size=500) self.b = stats.norm.rvs(loc=8, scale=10, size=20)", "\"fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0\" class DescriptiveStats(Benchmark): param_names = ['n_levels'] params = [ [10,", "1000] ] def setup(self, n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000,", "if properties == 'pdf': stats.cauchy.pdf(self.x, loc=4, scale=10) elif properties ==", "['cauchy', 'gamma', 'beta'], ['pdf', 'cdf', 'rvs', 'fit'] ] def setup(self,", "properties == 'fit': stats.gamma.fit(self.x, loc=4, scale=10) elif distribution == 'cauchy':", "loc=4, scale=10) elif properties == 'cdf': stats.gamma.cdf(self.x, a=5, loc=4, scale=10)", "[ ['two-sided', 'less', 'greater'] ] def setup(self, mode): a =", "import warnings import numpy as np try: import scipy.stats as", "] def setup(self, distribution, properties): np.random.seed(12345678) self.x = np.random.rand(100) def", "properties == 'fit': stats.beta.fit(self.x, loc=4, scale=10) # Retain old benchmark", "def setup(self, n_levels): np.random.seed(12345678) self.levels = np.random.randint(n_levels, size=(1000, 10)) def", "np.random.rand(100) def time_distribution(self, distribution, properties): if distribution == 'gamma': if" ]
[ "SIMPLICITY) def has_module_perms(self, app_label): return True class Client(AbstractBaseUser): email =", "= models.BooleanField(default=False) is_staff = models.BooleanField(default=False) # notice the absence of", "have a username') user = self.model( email=self.normalize_email(email), username=username, ) user.set_password(password)", "app_label): return True class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact", "models.BooleanField(default=False) is_staff = models.BooleanField(default=False) # notice the absence of a", "to keep it simple all admin have ALL permissons def", "['username'] # Email & Password are required by default. def", "that is built in. objects = MyAccountManager() USERNAME_FIELD = 'email'", "= MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username'] # Email", "= models.CharField(max_length=30) location = models.CharField(max_length=30) profession = models.CharField(max_length=30) experience =", "models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined =", "image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) # notice the absence", "= models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image =", "admin have ALL permissons def has_perm(self, perm, obj=None): return self.is_admin", "return user class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact =", "user.is_superuser = True user.save(using=self._db) return user class UserVendor(AbstractBaseUser): email =", "joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff =", "# Email & Password are required by default. def __str__(self):", "by default. def __str__(self): return self.email # For checking permissions.", "class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True)", "verified_id = models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active =", "is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/',", "Email & Password are required by default. def __str__(self): return", "username = models.CharField(max_length=30) location = models.CharField(max_length=30) profession = models.CharField(max_length=30) experience", "username: raise ValueError('Users must have a username') user = self.model(", ") user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, username, password):", "FOR SIMPLICITY) def has_module_perms(self, app_label): return True class Client(AbstractBaseUser): email", "password=None): if not email: raise ValueError('Users must have an email", "def __str__(self): return self.email # For checking permissions. to keep", "= models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined =", "is_staff = models.BooleanField(default=False) # notice the absence of a \"Password", "= self.create_user( email=self.normalize_email(email), password=password, username=username, ) user.is_admin = True user.is_staff", "True user.save(using=self._db) return user class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True)", "__str__(self): return self.email # For checking permissions. to keep it", "models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date", "= True user.is_staff = True user.is_superuser = True user.save(using=self._db) return", "password=password, username=username, ) user.is_admin = True user.is_staff = True user.is_superuser", "= models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) # notice the absence of", "obj=None): return self.is_admin # Does this user have permission to", "not email: raise ValueError('Users must have an email address') if", "if not email: raise ValueError('Users must have an email address')", "USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username'] # Email & Password", "= models.CharField(max_length=30) profession = models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id =", "is built in. objects = MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS", "def has_module_perms(self, app_label): return True class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email", "user = self.create_user( email=self.normalize_email(email), password=password, username=username, ) user.is_admin = True", "class MyAccountManager(BaseUserManager): def create_user(self, email, username, password=None): if not email:", "MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username'] # Email &", "= models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True)", "UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username", "a \"Password field\", that is built in. objects = MyAccountManager()", "date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin =", "True user.is_superuser = True user.save(using=self._db) return user class UserVendor(AbstractBaseUser): email", "BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self, email, username, password=None): if not", "username, password): user = self.create_user( email=self.normalize_email(email), password=password, username=username, ) user.is_admin", "create_user(self, email, username, password=None): if not email: raise ValueError('Users must", "return self.email # For checking permissions. to keep it simple", "= models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30)", "from django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager class", "username = models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined',", "= self.model( email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db) return user def", "has_module_perms(self, app_label): return True class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True)", "built in. objects = MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS =", "models.BooleanField(default=False) # notice the absence of a \"Password field\", that", "an email address') if not username: raise ValueError('Users must have", "For checking permissions. to keep it simple all admin have", "# Does this user have permission to view this app?", "have an email address') if not username: raise ValueError('Users must", "models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) # notice the absence of a", "True user.is_staff = True user.is_superuser = True user.save(using=self._db) return user", "username') user = self.model( email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db) return", "location = models.CharField(max_length=30) profession = models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id", "if not username: raise ValueError('Users must have a username') user", "user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, username, password): user", "user.is_staff = True user.is_superuser = True user.save(using=self._db) return user class", "is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image", "# notice the absence of a \"Password field\", that is", "Does this user have permission to view this app? (ALWAYS", "username=username, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, username,", "blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) profession = models.CharField(max_length=30)", "models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location", "not username: raise ValueError('Users must have a username') user =", "must have an email address') if not username: raise ValueError('Users", "<gh_stars>1-10 from django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager", "user def create_superuser(self, email, username, password): user = self.create_user( email=self.normalize_email(email),", "checking permissions. to keep it simple all admin have ALL", "permissons def has_perm(self, perm, obj=None): return self.is_admin # Does this", "True class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True,", "ValueError('Users must have an email address') if not username: raise", "self.is_admin # Does this user have permission to view this", "user class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True,", "email: raise ValueError('Users must have an email address') if not", "from django.contrib.auth.models import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self, email,", "username, password=None): if not email: raise ValueError('Users must have an", "models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image = models.ImageField(default='profile1.png'", "user = self.model( email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db) return user", "= True user.save(using=self._db) return user class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email", "email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username =", "models.CharField(max_length=30) location = models.CharField(max_length=30) profession = models.CharField(max_length=30) experience = models.CharField(max_length=30)", "return self.is_admin # Does this user have permission to view", "YES FOR SIMPLICITY) def has_module_perms(self, app_label): return True class Client(AbstractBaseUser):", "= models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True,", "class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True)", "absence of a \"Password field\", that is built in. objects", "all admin have ALL permissons def has_perm(self, perm, obj=None): return", "models.BooleanField(default=False) is_staff = models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True)", "user.is_admin = True user.is_staff = True user.is_superuser = True user.save(using=self._db)", "auto_now_add=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False)", "Password are required by default. def __str__(self): return self.email #", "have ALL permissons def has_perm(self, perm, obj=None): return self.is_admin #", "to view this app? (ALWAYS YES FOR SIMPLICITY) def has_module_perms(self,", "= models.BooleanField(default=False) # notice the absence of a \"Password field\",", "= models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) # notice", "models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff", "create_superuser(self, email, username, password): user = self.create_user( email=self.normalize_email(email), password=password, username=username,", "of a \"Password field\", that is built in. objects =", "self.email # For checking permissions. to keep it simple all", "django.db import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager):", "django.contrib.auth.models import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self, email, username,", "it simple all admin have ALL permissons def has_perm(self, perm,", "def create_superuser(self, email, username, password): user = self.create_user( email=self.normalize_email(email), password=password,", "return user def create_superuser(self, email, username, password): user = self.create_user(", "def create_user(self, email, username, password=None): if not email: raise ValueError('Users", "profession = models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined", "email, username, password=None): if not email: raise ValueError('Users must have", "models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) # notice the", "models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self,", "address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location =", "user.save(using=self._db) return user class UserVendor(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact", "'email' REQUIRED_FIELDS = ['username'] # Email & Password are required", "models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active", "return True class Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact =", "AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self, email, username, password=None): if", "in. objects = MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username']", "objects = MyAccountManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username'] #", "must have a username') user = self.model( email=self.normalize_email(email), username=username, )", "= models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined", "= 'email' REQUIRED_FIELDS = ['username'] # Email & Password are", "username=username, ) user.is_admin = True user.is_staff = True user.is_superuser =", "models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) profession =", "= models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) profession", "REQUIRED_FIELDS = ['username'] # Email & Password are required by", "ValueError('Users must have a username') user = self.model( email=self.normalize_email(email), username=username,", "models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin", "ALL permissons def has_perm(self, perm, obj=None): return self.is_admin # Does", "the absence of a \"Password field\", that is built in.", "simple all admin have ALL permissons def has_perm(self, perm, obj=None):", "= models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)", "models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active", "def has_perm(self, perm, obj=None): return self.is_admin # Does this user", "= models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True)", "keep it simple all admin have ALL permissons def has_perm(self,", "raise ValueError('Users must have a username') user = self.model( email=self.normalize_email(email),", "permission to view this app? (ALWAYS YES FOR SIMPLICITY) def", "= ['username'] # Email & Password are required by default.", "view this app? (ALWAYS YES FOR SIMPLICITY) def has_module_perms(self, app_label):", "this app? (ALWAYS YES FOR SIMPLICITY) def has_module_perms(self, app_label): return", "this user have permission to view this app? (ALWAYS YES", "email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email,", "location = models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active =", "email=self.normalize_email(email), password=password, username=username, ) user.is_admin = True user.is_staff = True", ") user.is_admin = True user.is_staff = True user.is_superuser = True", "blank=True) # notice the absence of a \"Password field\", that", "= models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) # notice", "email, username, password): user = self.create_user( email=self.normalize_email(email), password=password, username=username, )", "import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def create_user(self, email, username, password=None):", "email address') if not username: raise ValueError('Users must have a", "notice the absence of a \"Password field\", that is built", "= models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)", "field\", that is built in. objects = MyAccountManager() USERNAME_FIELD =", "address') if not username: raise ValueError('Users must have a username')", "import models from django.contrib.auth.models import AbstractBaseUser, BaseUserManager class MyAccountManager(BaseUserManager): def", "# For checking permissions. to keep it simple all admin", "models.CharField(max_length=30) profession = models.CharField(max_length=30) experience = models.CharField(max_length=30) verified_id = models.CharField(max_length=255)", "are required by default. def __str__(self): return self.email # For", "user.save(using=self._db) return user def create_superuser(self, email, username, password): user =", "= models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False)", "a username') user = self.model( email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db)", "have permission to view this app? (ALWAYS YES FOR SIMPLICITY)", ",upload_to='profiles/images/', null=True, blank=True) # notice the absence of a \"Password", "blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date", "MyAccountManager(BaseUserManager): def create_user(self, email, username, password=None): if not email: raise", "permissions. to keep it simple all admin have ALL permissons", "user have permission to view this app? (ALWAYS YES FOR", "raise ValueError('Users must have an email address') if not username:", "self.create_user( email=self.normalize_email(email), password=password, username=username, ) user.is_admin = True user.is_staff =", "self.model( email=self.normalize_email(email), username=username, ) user.set_password(password) user.save(using=self._db) return user def create_superuser(self,", "contact = models.IntegerField(null=True, blank=True) username = models.CharField(max_length=30) location = models.CharField(max_length=30)", "perm, obj=None): return self.is_admin # Does this user have permission", "password): user = self.create_user( email=self.normalize_email(email), password=password, username=username, ) user.is_admin =", "models.CharField(max_length=30) date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True) is_active = models.BooleanField(default=True) is_admin", "is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) # notice the absence", "null=True, blank=True) # notice the absence of a \"Password field\",", "experience = models.CharField(max_length=30) verified_id = models.CharField(max_length=255) date_joined = models.DateTimeField(verbose_name='date joined',", "Client(AbstractBaseUser): email = models.EmailField(verbose_name='email address',max_length=255,unique=True) contact = models.IntegerField(null=True, blank=True) username", "app? (ALWAYS YES FOR SIMPLICITY) def has_module_perms(self, app_label): return True", "is_active = models.BooleanField(default=True) is_admin = models.BooleanField(default=False) is_staff = models.BooleanField(default=False) #", "models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) # notice the", "= True user.is_superuser = True user.save(using=self._db) return user class UserVendor(AbstractBaseUser):", "(ALWAYS YES FOR SIMPLICITY) def has_module_perms(self, app_label): return True class", "\"Password field\", that is built in. objects = MyAccountManager() USERNAME_FIELD", "has_perm(self, perm, obj=None): return self.is_admin # Does this user have", "& Password are required by default. def __str__(self): return self.email", "is_staff = models.BooleanField(default=False) image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True) #", "default. def __str__(self): return self.email # For checking permissions. to", "required by default. def __str__(self): return self.email # For checking" ]
[ "from dataclasses import dataclass, field from typing import Optional, Iterable,", "Metabase additional connection opts use_http: bool = False verify: Union[str,", "additional connection opts use_http: bool = False verify: Union[str, bool]", "DbtConfig: # dbt Reader database: str manifest_path: Optional[str] = None", "Metabase Client database: str host: str user: str password: str", "host: str user: str password: str # Metabase additional connection", "schema: Optional[str] = None schema_excludes: Iterable = field(default_factory=list) includes: Iterable", "Client database: str host: str user: str password: str #", "manifest_path: Optional[str] = None path: Optional[str] = None # dbt", "= field(default_factory=list) includes: Iterable = field(default_factory=list) excludes: Iterable = field(default_factory=list)", "verify: Union[str, bool] = True # Metabase Sync sync_skip: bool", "MetabaseConfig: # Metabase Client database: str host: str user: str", "str password: str # Metabase additional connection opts use_http: bool", "str user: str password: str # Metabase additional connection opts", "None schema_excludes: Iterable = field(default_factory=list) includes: Iterable = field(default_factory=list) excludes:", "bool] = True # Metabase Sync sync_skip: bool = False", "Optional[str] = None path: Optional[str] = None # dbt Target", "Optional[str] = None # dbt Target Models schema: Optional[str] =", "Iterable = field(default_factory=list) includes: Iterable = field(default_factory=list) excludes: Iterable =", "class MetabaseConfig: # Metabase Client database: str host: str user:", "str manifest_path: Optional[str] = None path: Optional[str] = None #", "= None @dataclass class DbtConfig: # dbt Reader database: str", "# dbt Target Models schema: Optional[str] = None schema_excludes: Iterable", "= None # dbt Target Models schema: Optional[str] = None", "# dbt Reader database: str manifest_path: Optional[str] = None path:", "from typing import Optional, Iterable, Union @dataclass class MetabaseConfig: #", "str # Metabase additional connection opts use_http: bool = False", "@dataclass class DbtConfig: # dbt Reader database: str manifest_path: Optional[str]", "Optional[int] = None @dataclass class DbtConfig: # dbt Reader database:", "connection opts use_http: bool = False verify: Union[str, bool] =", "False verify: Union[str, bool] = True # Metabase Sync sync_skip:", "Union[str, bool] = True # Metabase Sync sync_skip: bool =", "dbt Reader database: str manifest_path: Optional[str] = None path: Optional[str]", "@dataclass class MetabaseConfig: # Metabase Client database: str host: str", "= False sync_timeout: Optional[int] = None @dataclass class DbtConfig: #", "field from typing import Optional, Iterable, Union @dataclass class MetabaseConfig:", "path: Optional[str] = None # dbt Target Models schema: Optional[str]", "= True # Metabase Sync sync_skip: bool = False sync_timeout:", "bool = False verify: Union[str, bool] = True # Metabase", "= None schema_excludes: Iterable = field(default_factory=list) includes: Iterable = field(default_factory=list)", "dbt Target Models schema: Optional[str] = None schema_excludes: Iterable =", "class DbtConfig: # dbt Reader database: str manifest_path: Optional[str] =", "database: str manifest_path: Optional[str] = None path: Optional[str] = None", "Reader database: str manifest_path: Optional[str] = None path: Optional[str] =", "schema_excludes: Iterable = field(default_factory=list) includes: Iterable = field(default_factory=list) excludes: Iterable", "bool = False sync_timeout: Optional[int] = None @dataclass class DbtConfig:", "sync_timeout: Optional[int] = None @dataclass class DbtConfig: # dbt Reader", "Models schema: Optional[str] = None schema_excludes: Iterable = field(default_factory=list) includes:", "Optional, Iterable, Union @dataclass class MetabaseConfig: # Metabase Client database:", "password: str # Metabase additional connection opts use_http: bool =", "Sync sync_skip: bool = False sync_timeout: Optional[int] = None @dataclass", "opts use_http: bool = False verify: Union[str, bool] = True", "None @dataclass class DbtConfig: # dbt Reader database: str manifest_path:", "use_http: bool = False verify: Union[str, bool] = True #", "import Optional, Iterable, Union @dataclass class MetabaseConfig: # Metabase Client", "dataclasses import dataclass, field from typing import Optional, Iterable, Union", "Optional[str] = None schema_excludes: Iterable = field(default_factory=list) includes: Iterable =", "str host: str user: str password: str # Metabase additional", "import dataclass, field from typing import Optional, Iterable, Union @dataclass", "database: str host: str user: str password: str # Metabase", "True # Metabase Sync sync_skip: bool = False sync_timeout: Optional[int]", "Metabase Sync sync_skip: bool = False sync_timeout: Optional[int] = None", "None path: Optional[str] = None # dbt Target Models schema:", "False sync_timeout: Optional[int] = None @dataclass class DbtConfig: # dbt", "Iterable, Union @dataclass class MetabaseConfig: # Metabase Client database: str", "# Metabase Sync sync_skip: bool = False sync_timeout: Optional[int] =", "sync_skip: bool = False sync_timeout: Optional[int] = None @dataclass class", "typing import Optional, Iterable, Union @dataclass class MetabaseConfig: # Metabase", "= False verify: Union[str, bool] = True # Metabase Sync", "= None path: Optional[str] = None # dbt Target Models", "None # dbt Target Models schema: Optional[str] = None schema_excludes:", "# Metabase Client database: str host: str user: str password:", "Target Models schema: Optional[str] = None schema_excludes: Iterable = field(default_factory=list)", "# Metabase additional connection opts use_http: bool = False verify:", "Union @dataclass class MetabaseConfig: # Metabase Client database: str host:", "user: str password: str # Metabase additional connection opts use_http:", "dataclass, field from typing import Optional, Iterable, Union @dataclass class" ]
[ "f: content = f.readlines() return content # Convert usecs (numeric)", ">>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs =", "Make the numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes),", "# Open the file, read the string contents into a", "# Take a list of string tuples (timestamp, metric), #", "outBytesList # Plotting driver program. def driver(dataFile): datetimeList, outBytesList =", "and return the list. def GetLinesListFromFile(filename): with open(filename) as f:", "read the string contents into a list, # and return", "import sys import os # Open the file, read the", "datetimeList, outBytesList # Plotting driver program. def driver(dataFile): datetimeList, outBytesList", "notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers comma", "# >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs", "the string contents into a list, # and return the", "problem, # see REVISIT above. # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f')", "outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() # main", "datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) # print usecs, secs,", "as dt import sys import os # Open the file,", "string tuples (timestamp, metric), # parses them into numerical values", "# Avoid scientific notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make", "datetimeObj = dt.datetime.fromtimestamp(secs) # print usecs, secs, datetimeObj return datetimeObj", "micro-seconds, but facing some problem, # see REVISIT above. #", "f.readlines() return content # Convert usecs (numeric) to datetime #", "= ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList # Plotting driver", "plt.gca() # Intended to show micro-seconds, but facing some problem,", "Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() # main if len(sys.argv)", "matplotlib.pyplot as plt import matplotlib.dates as md import datetime as", "parse usecs throws: # ValueError: year is out of range", "plt.grid(True) plt.show() # main if len(sys.argv) == 1: print \"usage:", "# separate lists. def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList =", "facing some problem, # see REVISIT above. # xfmt =", "1000000.0 # >>> x = datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f')", "REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) # print", "ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show()", "returns # separate lists. def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList", "import matplotlib.dates as md import datetime as dt import sys", "plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() # main if len(sys.argv) == 1:", "# Attempt to parse usecs throws: # ValueError: year is", "of string tuples (timestamp, metric), # parses them into numerical", "tuples (timestamp, metric), # parses them into numerical values and", "# Convert usecs (numeric) to datetime # >>> ts =", "out of range # So, using secs instead. REVISIT. #", "return datetimeObj # Take a list of string tuples (timestamp,", "<reponame>arunksaha/heap_tracker<filename>src/plot_timeseries_outstanding_bytes.py<gh_stars>1-10 # # Copyright 2018, <NAME> <<EMAIL>> # import matplotlib", "lineList = GetLinesListFromFile(filename) datetimeList = [] outBytesList = [] for", "= 0.2) plt.xticks(rotation = 25) ax = plt.gca() # Intended", "numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ','))) #", "using secs instead. REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj =", "def ConvertUsecsEpochToDateTime(usecs): secs = usecs / 1000000.0 # Attempt to", "secs instead. REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs)", "is out of range # So, using secs instead. REVISIT.", "a list, # and return the list. def GetLinesListFromFile(filename): with", "to datetime # >>> ts = 1520189090755278 / 1000000.0 #", "for line in lineList: tokens = line.split() # print tokens", "0.2) plt.xticks(rotation = 25) ax = plt.gca() # Intended to", "xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) #", "plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() #", "= usecs / 1000000.0 # Attempt to parse usecs throws:", "/ 1000000.0 # Attempt to parse usecs throws: # ValueError:", "Attempt to parse usecs throws: # ValueError: year is out", "GetLinesListFromFile(filename) datetimeList = [] outBytesList = [] for line in", "# parses them into numerical values and returns # separate", "plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation = 25) ax = plt.gca() #", "# Make the numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p:", "bytes = int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList,", "10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs = usecs / 1000000.0 # Attempt", "= [] outBytesList = [] for line in lineList: tokens", "both sides, but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes", "and returns # separate lists. def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename)", "# print usecs, secs, datetimeObj return datetimeObj # Take a", "numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda", "driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation =", "GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList = [] outBytesList = []", "Take a list of string tuples (timestamp, metric), # parses", "# print tokens assert(len(tokens) >= 2) usecs = int(tokens[0]) bytes", "import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as md", "# # Copyright 2018, <NAME> <<EMAIL>> # import matplotlib import", "# ValueError: year is out of range # So, using", "datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation = 25)", "2018, <NAME> <<EMAIL>> # import matplotlib import matplotlib.pyplot as plt", "list, # and return the list. def GetLinesListFromFile(filename): with open(filename)", "= md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn, use plain", "into numerical values and returns # separate lists. def GetTxListFromFile(filename):", "as plt import matplotlib.dates as md import datetime as dt", "= plt.gca() # Intended to show micro-seconds, but facing some", "as md import datetime as dt import sys import os", "# Intended to show micro-seconds, but facing some problem, #", "outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation = 25) ax", "throws: # ValueError: year is out of range # So,", "= int(tokens[0]) bytes = int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes)", "return the list. def GetLinesListFromFile(filename): with open(filename) as f: content", "2) usecs = int(tokens[0]) bytes = int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs)", "# and return the list. def GetLinesListFromFile(filename): with open(filename) as", "plt.xlabel('timestamp') plt.grid(True) plt.show() # main if len(sys.argv) == 1: print", "# Copyright 2018, <NAME> <<EMAIL>> # import matplotlib import matplotlib.pyplot", "plt.xticks(rotation = 25) ax = plt.gca() # Intended to show", "Intended to show micro-seconds, but facing some problem, # see", "REVISIT above. # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d", "some problem, # see REVISIT above. # xfmt = md.DateFormatter('%Y-%m-%d", "import matplotlib.pyplot as plt import matplotlib.dates as md import datetime", "datetimeList = [] outBytesList = [] for line in lineList:", "int(tokens[0]) bytes = int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return", "working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True)", "sides, but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries')", "to show micro-seconds, but facing some problem, # see REVISIT", "GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation = 25) ax = plt.gca()", "1520189090755278 / 1000000.0 # >>> x = datetime.datetime.fromtimestamp(ts) # >>>", "return content # Convert usecs (numeric) to datetime # >>>", "# Intended the y-axis numbers on both sides, but not", "md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn, use plain numbers.", "y-axis numbers on both sides, but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList,", "ts = 1520189090755278 / 1000000.0 # >>> x = datetime.datetime.fromtimestamp(ts)", "tokens = line.split() # print tokens assert(len(tokens) >= 2) usecs", "ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList # Plotting driver program.", "datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList # Plotting driver program. def", "os # Open the file, read the string contents into", "ConvertUsecsEpochToDateTime(usecs): secs = usecs / 1000000.0 # Attempt to parse", "plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers comma separated. ax.get_yaxis().set_major_formatter(", "Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() # main if len(sys.argv) ==", "print tokens assert(len(tokens) >= 2) usecs = int(tokens[0]) bytes =", "usecs = int(tokens[0]) bytes = int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj)", "1000000.0 # Attempt to parse usecs throws: # ValueError: year", "25) ax = plt.gca() # Intended to show micro-seconds, but", "into a list, # and return the list. def GetLinesListFromFile(filename):", "metric), # parses them into numerical values and returns #", "Convert usecs (numeric) to datetime # >>> ts = 1520189090755278", "# xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt)", "# '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs = usecs / 1000000.0", "usecs, secs, datetimeObj return datetimeObj # Take a list of", "show micro-seconds, but facing some problem, # see REVISIT above.", "datetimeObj return datetimeObj # Take a list of string tuples", "plt import matplotlib.dates as md import datetime as dt import", "parses them into numerical values and returns # separate lists.", "year is out of range # So, using secs instead.", "range # So, using secs instead. REVISIT. # datetimeObj =", "of range # So, using secs instead. REVISIT. # datetimeObj", "plt.show() # main if len(sys.argv) == 1: print \"usage: {}", "So, using secs instead. REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj", "= 1520189090755278 / 1000000.0 # >>> x = datetime.datetime.fromtimestamp(ts) #", "list of string tuples (timestamp, metric), # parses them into", "with open(filename) as f: content = f.readlines() return content #", "use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers comma separated.", "ValueError: year is out of range # So, using secs", "xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn, use", "above. # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')", "# import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as", "bytes, p: format(int(bytes), ','))) # Intended the y-axis numbers on", "instead. REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) #", "/ 1000000.0 # >>> x = datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d", "content # Convert usecs (numeric) to datetime # >>> ts", "= GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation = 25) ax =", "format(int(bytes), ','))) # Intended the y-axis numbers on both sides,", "import os # Open the file, read the string contents", "<<EMAIL>> # import matplotlib import matplotlib.pyplot as plt import matplotlib.dates", "outBytesList.append(bytes) return datetimeList, outBytesList # Plotting driver program. def driver(dataFile):", "%H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn,", "matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ','))) # Intended the y-axis numbers", "dt import sys import os # Open the file, read", "return datetimeList, outBytesList # Plotting driver program. def driver(dataFile): datetimeList,", "but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes')", "datetime as dt import sys import os # Open the", "main if len(sys.argv) == 1: print \"usage: {} <input-text-file>\".format(sys.argv[0]) sys.exit(1)", "values and returns # separate lists. def GetTxListFromFile(filename): lineList =", "secs = usecs / 1000000.0 # Attempt to parse usecs", "on both sides, but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding", ">>> x = datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04", "usecs (numeric) to datetime # >>> ts = 1520189090755278 /", "not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList) plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp')", "%H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs = usecs /", "# datetimeObj = dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) # print usecs,", "tokens assert(len(tokens) >= 2) usecs = int(tokens[0]) bytes = int(tokens[1])", "(numeric) to datetime # >>> ts = 1520189090755278 / 1000000.0", "driver program. def driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom =", "matplotlib import matplotlib.pyplot as plt import matplotlib.dates as md import", "# So, using secs instead. REVISIT. # datetimeObj = dt.datetime.fromtimestamp(usecs)", "as f: content = f.readlines() return content # Convert usecs", "# >>> ts = 1520189090755278 / 1000000.0 # >>> x", "Open the file, read the string contents into a list,", "# Plotting driver program. def driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile)", "= md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid", "content = f.readlines() return content # Convert usecs (numeric) to", "open(filename) as f: content = f.readlines() return content # Convert", "ax = plt.gca() # Intended to show micro-seconds, but facing", "numbers on both sides, but not working. ax.yaxis.set_ticks_position('both') plt.plot(datetimeList, outBytesList)", "line in lineList: tokens = line.split() # print tokens assert(len(tokens)", "x = datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278'", "ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ','))) # Intended the y-axis", "def GetLinesListFromFile(filename): with open(filename) as f: content = f.readlines() return", "program. def driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2)", "assert(len(tokens) >= 2) usecs = int(tokens[0]) bytes = int(tokens[1]) datetimeObj", "ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes,", "datetime # >>> ts = 1520189090755278 / 1000000.0 # >>>", "the list. def GetLinesListFromFile(filename): with open(filename) as f: content =", "# >>> x = datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') #", "lists. def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList = [] outBytesList", "Copyright 2018, <NAME> <<EMAIL>> # import matplotlib import matplotlib.pyplot as", "= f.readlines() return content # Convert usecs (numeric) to datetime", "'2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs = usecs / 1000000.0 #", "string contents into a list, # and return the list.", "ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) #", "the y-axis numbers on both sides, but not working. ax.yaxis.set_ticks_position('both')", "x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs): secs = usecs", "# see REVISIT above. # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt", "%H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False)", "but facing some problem, # see REVISIT above. # xfmt", "Intended the y-axis numbers on both sides, but not working.", "plt.title('Outstanding Bytes Timeseries') plt.ylabel('bytes') plt.xlabel('timestamp') plt.grid(True) plt.show() # main if", "= int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList", "md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') ax.xaxis.set_major_formatter(xfmt) # Avoid scientific", "datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList # Plotting", "line.split() # print tokens assert(len(tokens) >= 2) usecs = int(tokens[0])", "scientific notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the numbers", "outBytesList = [] for line in lineList: tokens = line.split()", "usecs throws: # ValueError: year is out of range #", "contents into a list, # and return the list. def", "secs, datetimeObj return datetimeObj # Take a list of string", "= 25) ax = plt.gca() # Intended to show micro-seconds,", "file, read the string contents into a list, # and", "in lineList: tokens = line.split() # print tokens assert(len(tokens) >=", "comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ','))) # Intended", "a list of string tuples (timestamp, metric), # parses them", "separate lists. def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList = []", "def driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom = 0.2) plt.xticks(rotation", "sys import os # Open the file, read the string", "[] outBytesList = [] for line in lineList: tokens =", "the file, read the string contents into a list, #", "GetLinesListFromFile(filename): with open(filename) as f: content = f.readlines() return content", "int(tokens[1]) datetimeObj = ConvertUsecsEpochToDateTime(usecs) datetimeList.append(datetimeObj) outBytesList.append(bytes) return datetimeList, outBytesList #", "lineList: tokens = line.split() # print tokens assert(len(tokens) >= 2)", "= dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) # print usecs, secs, datetimeObj", "to parse usecs throws: # ValueError: year is out of", "def GetTxListFromFile(filename): lineList = GetLinesListFromFile(filename) datetimeList = [] outBytesList =", "Avoid scientific notatinn, use plain numbers. ax.get_yaxis().get_major_formatter().set_scientific(False) # Make the", "(timestamp, metric), # parses them into numerical values and returns", "p: format(int(bytes), ','))) # Intended the y-axis numbers on both", "separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ','))) # Intended the", "import datetime as dt import sys import os # Open", "numerical values and returns # separate lists. def GetTxListFromFile(filename): lineList", "','))) # Intended the y-axis numbers on both sides, but", "them into numerical values and returns # separate lists. def", "list. def GetLinesListFromFile(filename): with open(filename) as f: content = f.readlines()", "dt.datetime.fromtimestamp(secs) # print usecs, secs, datetimeObj return datetimeObj # Take", "print usecs, secs, datetimeObj return datetimeObj # Take a list", "= [] for line in lineList: tokens = line.split() #", "see REVISIT above. # xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f') xfmt =", "datetimeObj # Take a list of string tuples (timestamp, metric),", "the numbers comma separated. ax.get_yaxis().set_major_formatter( matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ',')))", ">>> ts = 1520189090755278 / 1000000.0 # >>> x =", "dt.datetime.fromtimestamp(usecs) datetimeObj = dt.datetime.fromtimestamp(secs) # print usecs, secs, datetimeObj return", "= datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def", "usecs / 1000000.0 # Attempt to parse usecs throws: #", "if len(sys.argv) == 1: print \"usage: {} <input-text-file>\".format(sys.argv[0]) sys.exit(1) driver(sys.argv[1])", "[] for line in lineList: tokens = line.split() # print", "= dt.datetime.fromtimestamp(secs) # print usecs, secs, datetimeObj return datetimeObj #", "= line.split() # print tokens assert(len(tokens) >= 2) usecs =", "Plotting driver program. def driver(dataFile): datetimeList, outBytesList = GetTxListFromFile(dataFile) plt.subplots_adjust(bottom", ">= 2) usecs = int(tokens[0]) bytes = int(tokens[1]) datetimeObj =", "matplotlib.dates as md import datetime as dt import sys import", "datetime.datetime.fromtimestamp(ts) # >>> x.strftime('%Y-%m-%d %H:%M:%S.%f') # '2018-03-04 10:44:50.755278' def ConvertUsecsEpochToDateTime(usecs):", "= GetLinesListFromFile(filename) datetimeList = [] outBytesList = [] for line", "# main if len(sys.argv) == 1: print \"usage: {} <input-text-file>\".format(sys.argv[0])", "md import datetime as dt import sys import os #", "<NAME> <<EMAIL>> # import matplotlib import matplotlib.pyplot as plt import" ]
[ "process_image path = sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id", "template, text = process_image(open(os.path.join(path, m['photo']), 'rb')) message_id = m['id'] print(f'processing", "cur = conn.cursor() cur.execute(\"INSERT INTO meme (template, text, chat_id, message_id)", "'result.json'), 'r')) chat_id = data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m", "in data['messages']: if 'photo' in m: template, text = process_image(open(os.path.join(path,", "import psycopg2 from meme_classifier.images import process_image path = sys.argv[1] data", "= data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']: if", "message_id) VALUES (%s, %s, %s, %s)\", (template, text, chat_id, message_id))", "from dotenv import load_dotenv load_dotenv() import sys import os import", "data = json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id = data['id'] conn =", "VALUES (%s, %s, %s, %s)\", (template, text, chat_id, message_id)) conn.commit()", "re import json import psycopg2 from meme_classifier.images import process_image path", "if 'photo' in m: template, text = process_image(open(os.path.join(path, m['photo']), 'rb'))", "sys import os import re import json import psycopg2 from", "data['messages']: if 'photo' in m: template, text = process_image(open(os.path.join(path, m['photo']),", "from meme_classifier.images import process_image path = sys.argv[1] data = json.load(open(os.path.join(path,", "load_dotenv() import sys import os import re import json import", "process_image(open(os.path.join(path, m['photo']), 'rb')) message_id = m['id'] print(f'processing message {message_id}') cur", "m['photo']), 'rb')) message_id = m['id'] print(f'processing message {message_id}') cur =", "import re import json import psycopg2 from meme_classifier.images import process_image", "message {message_id}') cur = conn.cursor() cur.execute(\"INSERT INTO meme (template, text,", "import os import re import json import psycopg2 from meme_classifier.images", "= process_image(open(os.path.join(path, m['photo']), 'rb')) message_id = m['id'] print(f'processing message {message_id}')", "{message_id}') cur = conn.cursor() cur.execute(\"INSERT INTO meme (template, text, chat_id,", "message_id = m['id'] print(f'processing message {message_id}') cur = conn.cursor() cur.execute(\"INSERT", "psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']: if 'photo' in m: template,", "import sys import os import re import json import psycopg2", "import process_image path = sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'), 'r'))", "= m['id'] print(f'processing message {message_id}') cur = conn.cursor() cur.execute(\"INSERT INTO", "sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id = data['id'] conn", "json import psycopg2 from meme_classifier.images import process_image path = sys.argv[1]", "chat_id = data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']:", "text = process_image(open(os.path.join(path, m['photo']), 'rb')) message_id = m['id'] print(f'processing message", "'r')) chat_id = data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in", "psycopg2 from meme_classifier.images import process_image path = sys.argv[1] data =", "'rb')) message_id = m['id'] print(f'processing message {message_id}') cur = conn.cursor()", "print(f'processing message {message_id}') cur = conn.cursor() cur.execute(\"INSERT INTO meme (template,", "INTO meme (template, text, chat_id, message_id) VALUES (%s, %s, %s,", "m: template, text = process_image(open(os.path.join(path, m['photo']), 'rb')) message_id = m['id']", "import load_dotenv load_dotenv() import sys import os import re import", "os import re import json import psycopg2 from meme_classifier.images import", "in m: template, text = process_image(open(os.path.join(path, m['photo']), 'rb')) message_id =", "conn.cursor() cur.execute(\"INSERT INTO meme (template, text, chat_id, message_id) VALUES (%s,", "import json import psycopg2 from meme_classifier.images import process_image path =", "meme_classifier.images import process_image path = sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'),", "text, chat_id, message_id) VALUES (%s, %s, %s, %s)\", (template, text,", "= json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id = data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS'))", "dotenv import load_dotenv load_dotenv() import sys import os import re", "path = sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id =", "= psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']: if 'photo' in m:", "m in data['messages']: if 'photo' in m: template, text =", "chat_id, message_id) VALUES (%s, %s, %s, %s)\", (template, text, chat_id,", "conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']: if 'photo' in", "data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for m in data['messages']: if 'photo'", "cur.execute(\"INSERT INTO meme (template, text, chat_id, message_id) VALUES (%s, %s,", "= sys.argv[1] data = json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id = data['id']", "meme (template, text, chat_id, message_id) VALUES (%s, %s, %s, %s)\",", "(template, text, chat_id, message_id) VALUES (%s, %s, %s, %s)\", (template,", "json.load(open(os.path.join(path, 'result.json'), 'r')) chat_id = data['id'] conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS')) for", "for m in data['messages']: if 'photo' in m: template, text", "m['id'] print(f'processing message {message_id}') cur = conn.cursor() cur.execute(\"INSERT INTO meme", "= conn.cursor() cur.execute(\"INSERT INTO meme (template, text, chat_id, message_id) VALUES", "load_dotenv load_dotenv() import sys import os import re import json", "'photo' in m: template, text = process_image(open(os.path.join(path, m['photo']), 'rb')) message_id" ]
[ "models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField(", "old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations',", "migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('nps', '0012_auto_20180314_1600'),", "migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30,", "name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral',", "new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive',", "model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type',", "('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ),", "('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True,", "('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)),", "migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ),", "model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField(", "field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations', unique_together={('client', 'survey', 'user_type')},", "), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField(", "migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ),", "old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations',", "model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True),", "2.0.3 on 2018-03-15 01:05 from django.db import migrations, models class", "model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField(", "model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField(", "new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral',", "models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors',", "), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField(", "old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations',", "new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ),", "('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()),", "null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral',", "('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses',", "model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type',", "[ ('nps', '0012_auto_20180314_1600'), ] operations = [ migrations.CreateModel( name='ClientAggregations', fields=[", "null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers',", "2018-03-15 01:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "= [ ('nps', '0012_auto_20180314_1600'), ] operations = [ migrations.CreateModel( name='ClientAggregations',", "serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30,", "models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None,", "default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ),", "), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether(", "), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors',", "migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ),", "model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField(", "Migration(migrations.Migration): dependencies = [ ('nps', '0012_auto_20180314_1600'), ] operations = [", "on 2018-03-15 01:05 from django.db import migrations, models class Migration(migrations.Migration):", "operations = [ migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ),", "dependencies = [ ('nps', '0012_auto_20180314_1600'), ] operations = [ migrations.CreateModel(", "), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative',", "migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations',", "name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True,", "), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None,", "Django 2.0.3 on 2018-03-15 01:05 from django.db import migrations, models", "new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral',", "'0012_auto_20180314_1600'), ] operations = [ migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True,", "model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations',", "('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score',", "models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()),", "old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations',", "migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ),", "('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True,", "migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)),", "default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)),", "max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True),", "null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel(", "[ migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('nps', '0012_auto_20180314_1600'), ]", "old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations',", "models.IntegerField()), ('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters',", "), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative',", "primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None,", "null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant',", "new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters',", "models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None,", "), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters',", "models class Migration(migrations.Migration): dependencies = [ ('nps', '0012_auto_20180314_1600'), ] operations", "default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel(", "models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score', models.FloatField()),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('nps',", "new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative',", "models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None,", "), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral',", "name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ),", "default=None, max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30,", "01:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey',", "] operations = [ migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ),", "migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ),", "), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral',", "('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults',", "model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral',", "], ), migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations',", "by Django 2.0.3 on 2018-03-15 01:05 from django.db import migrations,", "migrations.DeleteModel( name='AggregatedResults', ), migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors',", "('nps', '0012_auto_20180314_1600'), ] operations = [ migrations.CreateModel( name='ClientAggregations', fields=[ ('id',", "migrations.DeleteModel( name='ProductUsers', ), migrations.RenameField( model_name='productaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations',", "model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type',", "default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations', unique_together={('client', 'survey', 'user_type')}, ),", "models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ], ), migrations.DeleteModel( name='AggregatedResults', ),", "migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ),", "model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField(", "model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors', ), migrations.RenameField(", "), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField(", "old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral',", "models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()),", "new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors',", "default=None, max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors',", "Generated by Django 2.0.3 on 2018-03-15 01:05 from django.db import", "name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ),", "migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations',", "# Generated by Django 2.0.3 on 2018-03-15 01:05 from django.db", "null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ),", "model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField(", "model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField(", "max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters', models.IntegerField()), ('detractors', models.IntegerField()),", "field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None,", "name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations', unique_together={('client', 'survey',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)),", "migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ),", "class Migration(migrations.Migration): dependencies = [ ('nps', '0012_auto_20180314_1600'), ] operations =", "migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField( model_name='productaggregations',", "name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ),", "max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations', unique_together={('client', 'survey', 'user_type')}, ), ]", "new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters',", "old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations',", "), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_detractors', new_name='detractors',", "new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField(", "model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AlterUniqueTogether( name='clientaggregations', unique_together={('client',", "migrations.RenameField( model_name='surveyaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField(", "old_name='total_detractors', new_name='detractors', ), migrations.RenameField( model_name='surveyaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations',", "verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)),", "name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ),", "('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)),", "('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)),", "new_name='neutral', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive',", "models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True, default=None, null=True)), ('percent_neutral',", "), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='surveyaggregations', old_name='number_clients_positive', new_name='num_clients_positive',", "old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters', ), migrations.RenameField( model_name='surveyaggregations',", "= [ migrations.CreateModel( name='ClientAggregations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.FloatField(blank=True, default=None, null=True)), ('percent_neutral', models.FloatField(blank=True, default=None, null=True)), ('statistically_significant', models.BooleanField(default=False)), ],", "), migrations.RemoveField( model_name='surveyaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations', name='percent_clients_neutral', ), migrations.AddField(", "('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)), ('nps_score', models.FloatField()), ('total_responses', models.IntegerField()), ('promoters',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('client', models.CharField(max_length=30)), ('survey', models.CharField(max_length=30)), ('user_type', models.CharField(blank=True,", "migrations.RenameField( model_name='productaggregations', old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ),", "), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive',", "name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AddField( model_name='surveyaggregations', name='user_type', field=models.CharField(blank=True,", "), migrations.RenameField( model_name='productaggregations', old_name='number_clients_positive', new_name='num_clients_positive', ), migrations.RenameField( model_name='productaggregations', old_name='total_promoters', new_name='promoters',", "migrations.RemoveField( model_name='productaggregations', name='number_clients_neutral', ), migrations.RemoveField( model_name='productaggregations', name='percent_clients_neutral', ), migrations.RemoveField( model_name='surveyaggregations',", "), migrations.AddField( model_name='productaggregations', name='user_type', field=models.CharField(blank=True, default=None, max_length=30, null=True), ), migrations.AddField(", "old_name='total_neutral', new_name='neutral', ), migrations.RenameField( model_name='productaggregations', old_name='number_clients_negative', new_name='num_clients_negative', ), migrations.RenameField( model_name='productaggregations',", "('detractors', models.IntegerField()), ('neutral', models.IntegerField()), ('percent_detractors', models.FloatField(blank=True, default=None, null=True)), ('percent_promoters', models.FloatField(blank=True," ]
[ "' 'you shouldn\\'t use this declaration, as it would not", "{\"type\": \"integer\"}}', 'JSON Schema Validation', ( 'And equivalently for any", "v += '\\n|' for i, text in enumerate(row): text =", "'string', '{\"format\": \"dsn\"}', 'Pydantic standard \"format\" extension', '' ], [", "Core', '' ], [ 'ConstrainedStr', 'string', '', 'JSON Schema Core',", "'string', '{\"format\": \"uuid4\"}', 'Pydantic standard \"format\" extension', '' ], [", "'' ], [ 'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic standard \"format\"", "], [ 'ConstrainedFloat', 'number', '', 'JSON Schema Core', ( 'If", "[ 'float', 'number', '', 'JSON Schema Core', '' ], [", "the definition.' ], [ 'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON Schema", "'string', '{\"format\": \"time\"}', 'JSON Schema Validation', '' ], [ 'timedelta',", "i, text in enumerate(row): text = f'``{text}``' if i <", "'{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [ 'SecretBytes', 'string',", "col_width = 300 for _ in range(5): v += '+'", "table of Python / Pydantic to JSON Schema mappings. Done", "le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\":", "in enumerate(row): text = f'``{text}``' if i < 3 and", "'', 'JSON Schema Core', '' ], [ 'ConstrainedStr', 'string', '',", "' 'See the mapping for ``condecimal`` below.' ) ], [", "'See the mapping for ``conint`` below.' ) ], [ 'conint(gt=1,", "JSON Schema).' ) ], [ 'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\":", "true}', 'JSON Schema Validation', '' ], [ 'SecretBytes', 'string', '{\"writeOnly\":", "'If the type has values declared for the constraints, they", "'' ], [ 'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic standard \"format\"", "_ in range(5): v += '+' + '-' * col_width", "< 3 and text else text v += f' {text:{col_width", "], [ 'str', 'string', '', 'JSON Schema Core', '' ],", "[ 'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic standard \"format\" extension', ''", "validations.' 'See the mapping for ``confloat`` below.' ) ], [", "range(5): v += '+' + '=' * col_width v +=", "[ 'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}', 'JSON Schema Validation', 'And", "'JSON Schema Validation', 'And equivalently for any other sub type,", "JSON Schema', 'Defined in', 'Notes', ] v = '' col_width", "'All the literal values in the enum are included in", "for any other sub type, e.g. List[int].' ], [ 'Tuple[str,", "valid as ' 'JSON Schema key types.' ) ], [", "equivalently for any other sub type, e.g. List[int].' ], [", "Type', 'Additional JSON Schema', 'Defined in', 'Notes', ] v =", "'{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 address", "'string', '{\"format\": \"json-string\"}', 'Pydantic standard \"format\" extension', '' ], [", "so, only str is valid as ' 'JSON Schema key", "'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [ 'NegativeFloat',", "+= '\\n|' for i, text in enumerate(row): text = f'``{text}``'", "[ 'ConstrainedFloat', 'number', '', 'JSON Schema Core', ( 'If the", "OpenAPI.' ], [ 'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic standard \"format\"", "with standard JSON Schema, including submodels.' ] ] headings =", "standard JSON Schema, including submodels.' ] ] headings = [", "* col_width v += '+' for row in table: v", "Schema Validation', '' ], [ 'dict', 'object', '', 'JSON Schema", "set of subtypes. Note: If using schemas for OpenAPI, '", "'JSON Schema Validation', '' ], [ 'timedelta', 'number', '{\"format\": \"time-delta\"}',", "standard \"format\" extension', '' ], [ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}',", "(although it is ' 'valid in JSON Schema).' ) ],", "mapping for ``constr`` below.' ) ], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)',", "[ 'ConstrainedInt', 'integer', '', 'JSON Schema Core', ( 'If the", "be included in the schema.' ], [ 'BaseModel', 'object', '',", "v += '+' + '-' * col_width v += '+\\n|'", "Build a table of Python / Pydantic to JSON Schema", "], [ 'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}', 'JSON Schema Validation',", "' 'keys for dicts with Pydantic, only strings are valid", "\"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON Schema", "[ 'Python type', 'JSON Schema Type', 'Additional JSON Schema', 'Defined", "'array', '{\"uniqueItems\": true}', 'JSON Schema Validation', '' ], [ 'List[str]',", "str is valid as ' 'JSON Schema key types.' )", "other subfields for unions.' ], [ 'Enum', 'enum', '{\"enum\": [...]}',", "extension', '' ], [ 'Path', 'string', '{\"format\": \"path\"}', 'Pydantic standard", "text else text v += f' {text:{col_width - 2}} |'", "defined will be defined with standard JSON Schema, including submodels.'", "table: v += '\\n|' for i, text in enumerate(row): text", "extension', 'Suggested in JSON Schema repository\\'s issues by maintainer.' ],", "for any other set of subtypes. Note: If using schemas", "is valid as ' 'JSON Schema key types.' ) ],", "``conint`` below.' ) ], [ 'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)',", "], [ 'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5,", "'JSON Schema Validation', '' ], [ 'List[str]', 'array', '{\"items\": {\"type\":", "Schema Validation', ( 'And equivalently for any other set of", "], [ 'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI', '' ], [", "], [ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\" extension',", "standard \"format\" extension', 'IPv4 or IPv6 address as used in", "'JSON Schema Core', '' ], [ 'UUID1', 'string', '{\"format\": \"uuid1\"}',", "' 'See the mapping for ``conint`` below.' ) ], [", "multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1,", "the mapping for ``condecimal`` below.' ) ], [ 'condecimal(gt=1, ge=2,", "'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [", "keys for JSON, and so, only str is valid as", "'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic standard \"format\" extension', '' ],", "'OpenAPI', '' ], [ 'Decimal', 'number', '', 'JSON Schema Core',", "\"exclusiveMinimum\": 1, \"multipleOf\": 2}', '', 'Any argument not passed to", "+= '+' for row in table: v += '\\n|' for", "'boolean', '', 'JSON Schema Core', '' ], [ 'str', 'string',", "'enum', '{\"enum\": [...]}', 'JSON Schema Validation', 'All the literal values", "type', 'JSON Schema Type', 'Additional JSON Schema', 'Defined in', 'Notes',", "true}', 'JSON Schema Validation', '' ], [ 'EmailStr', 'string', '{\"format\":", "for ``conint`` below.' ) ], [ 'conint(gt=1, ge=2, lt=6, le=5,", "'{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', 'And equivalently", "{\"type\": \"integer\"}]}', 'JSON Schema Validation', 'And equivalently for any other", "lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2,", "'tuple', 'array', '', 'JSON Schema Core', '' ], [ 'set',", "Schema Core', '' ], [ 'set', 'array', '{\"uniqueItems\": true}', 'JSON", "'string', '{\"format\": \"uuid1\"}', 'Pydantic standard \"format\" extension', '' ], [", "Done like this rather than as a raw rst table", "Schema Core', '' ], [ 'ConstrainedStr', 'string', '', 'JSON Schema", "[ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\" extension', 'IPv4", "``ipaddress`` module', ], [ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard", "+= '+' + '-' * col_width v += '+' with", "'JSON Schema Core', '' ], [ 'set', 'array', '{\"uniqueItems\": true}',", "mapping for ``conint`` below.' ) ], [ 'conint(gt=1, ge=2, lt=6,", "``condecimal`` below.' ) ], [ 'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)',", "are included as validations. ' 'See the mapping for ``condecimal``", "extension', '' ], [ 'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic standard", "strings are valid keys for JSON, and so, only str", "'number', '', 'JSON Schema Core', '' ], [ 'int', 'integer',", "' 'valid in JSON Schema).' ) ], [ 'Dict[str, int]',", "= f'``{text}``' if i < 3 and text else text", "2}', 'JSON Schema Validation', 'Any argument not passed to the", "edits easier. Please edit this file directly not .tmp_schema_mappings.rst \"\"\"", "for any other subfields for dicts. Have in mind that", "'Any argument not passed to the function (not defined) will", "'{\"format\": \"time\"}', 'JSON Schema Validation', '' ], [ 'timedelta', 'number',", "headings: v += f' {heading:{col_width - 2}} |' v +=", "Validation', '' ], [ 'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference in", "Schema Validation', 'Any argument not passed to the function (not", "``float``), with Pydantic standard \"format\" extension', 'Suggested in JSON Schema", "'JSON Schema Validation', '' ], [ 'dict', 'object', '', 'JSON", "'And equivalently for any other subfields for unions.' ], [", "'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}', 'JSON Schema Validation', 'And equivalently", "\"format\" extension', '' ], [ 'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic", "'Pydantic standard \"format\" extension', 'IPv4 or IPv6 address as used", "``constr`` below.' ) ], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\":", "Schema Validation', '' ], [ 'ConstrainedFloat', 'number', '', 'JSON Schema", "= 300 for _ in range(5): v += '+' +", "[ 'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ],", "'JSON Schema Validation', '' ], [ 'EmailStr', 'string', '{\"format\": \"email\"}',", "or IPv6 address as used in ``ipaddress`` module', ], [", "\"uuid1\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID3', 'string',", "below.' ) ], [ 'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number',", "'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [ 'ConstrainedFloat',", "[ 'Tuple[str, int]', 'array', '{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON", "to make future edits easier. Please edit this file directly", "module', ], [ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\"", "'' ], [ 'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}', 'JSON Schema", "standard \"format\" extension', 'IPv4 or IPv6 interface as used in", "], [ 'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON Schema Validation', ''", "schema.' ], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation',", "\"time\"}', 'JSON Schema Validation', '' ], [ 'timedelta', 'number', '{\"format\":", "ge=2, lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\":", "extension', '' ], [ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard", "'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6", "], [ 'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\": 5,", "as it would not be valid in OpenAPI (although it", "v += '+' for row in table: v += '\\n|'", "'JSON Schema Core', '' ], [ 'tuple', 'array', '', 'JSON", "2, \"maxLength\": 10}', 'JSON Schema Validation', 'Any argument not passed", "[ 'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema Validation',", "'{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\": 10}', 'JSON Schema Validation', 'Any", "schema.' ], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation',", "[{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', 'And equivalently for", "{\"type\": \"string\"}}', 'JSON Schema Validation', 'And equivalently for any other", "], [ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic standard \"format\" extension',", "], [ 'StrictStr', 'string', '', 'JSON Schema Core', '' ],", "\"format\" extension', 'IPv4 or IPv6 network as used in ``ipaddress``", "'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic standard \"format\" extension', '' ],", "'ConstrainedInt', 'integer', '', 'JSON Schema Core', ( 'If the type", "not passed to the function (not defined) will not be", "only str is valid as ' 'JSON Schema key types.'", "'JSON Schema Validation', '' ], [ 'date', 'string', '{\"format\": \"date\"}',", "le=5, multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\":", "validations. ' 'See the mapping for ``condecimal`` below.' ) ],", "Core', '' ], [ 'set', 'array', '{\"uniqueItems\": true}', 'JSON Schema", "be included in the schema.' ], [ 'ConstrainedInt', 'integer', '',", "below.' ) ], [ 'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'integer',", "], [ 'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', ''", "'JSON Schema Validation', 'All the literal values in the enum", "as used in ``ipaddress`` module', ], [ 'IPvAnyInterface', 'string', '{\"format\":", "as used in ``ipaddress`` module', ], [ 'IPvAnyNetwork', 'string', '{\"format\":", "any other sub type, e.g. List[int].' ], [ 'Tuple[str, int]',", "'' ], [ 'ConstrainedFloat', 'number', '', 'JSON Schema Core', (", "], [ 'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic standard \"format\" extension',", "'' ], [ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic standard \"format\"", "'dict', 'object', '', 'JSON Schema Core', '' ], [ 'list',", "the constraints, they are included as validations.' 'See the mapping", "for the constraints, they are included as validations. ' 'See", "], [ 'date', 'string', '{\"format\": \"date\"}', 'JSON Schema Validation', ''", "v += '+' + '-' * col_width v += '+'", "e.g. List[int].' ], [ 'Tuple[str, int]', 'array', '{\"items\": [{\"type\": \"string\"},", "'Pydantic standard \"format\" extension', '' ], [ 'UUID5', 'string', '{\"format\":", "as validations. ' 'See the mapping for ``constr`` below.' )", "'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\" extension', 'IPv4 or", "], [ 'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', ''", "[ 'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON Schema Validation', '' ],", "Schema Core', '' ], [ 'float', 'number', '', 'JSON Schema", "'string', '{\"format\": \"directory-path\"}', 'Pydantic standard \"format\" extension', '' ], [", "'' ], [ 'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic standard \"format\"", "'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\" extension', 'IPv4 or", "[ 'Decimal', 'number', '', 'JSON Schema Core', '' ], [", "* col_width v += '+' with open('.tmp_schema_mappings.rst', 'w') as f:", "'' ], [ 'ConstrainedStr', 'string', '', 'JSON Schema Core', (", "[ 'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\":", "\"format\" extension', '' ], [ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic", "the schema.' ], [ 'ConstrainedInt', 'integer', '', 'JSON Schema Core',", "in the schema.' ], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON", "other set of subtypes. Note: If using schemas for OpenAPI,", "declaration, as it would not be valid in OpenAPI (although", "], [ 'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic standard \"format\" extension',", "Python / Pydantic to JSON Schema mappings. Done like this", "'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\":", "Schema Validation', '' ], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON", "for heading in headings: v += f' {heading:{col_width - 2}}", "'{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 network", "'Python type', 'JSON Schema Type', 'Additional JSON Schema', 'Defined in',", "standard \"format\" extension', '' ], [ 'UUID3', 'string', '{\"format\": \"uuid3\"}',", "is ' 'valid in JSON Schema).' ) ], [ 'Dict[str,", "[ 'list', 'array', '', 'JSON Schema Core', '' ], [", "'\\n' for _ in range(5): v += '+' + '-'", "rather than as a raw rst table to make future", "[ 'int', 'integer', '', 'JSON Schema Validation', '' ], [", "the mapping for ``conint`` below.' ) ], [ 'conint(gt=1, ge=2,", "[ 'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON Schema Validation', '' ],", "'{\"format\": \"date-time\"}', 'JSON Schema Validation', '' ], [ 'date', 'string',", "= '' col_width = 300 for _ in range(5): v", "\"format\" extension', 'Suggested in JSON Schema repository\\'s issues by maintainer.'", "as a raw rst table to make future edits easier.", "for ``condecimal`` below.' ) ], [ 'condecimal(gt=1, ge=2, lt=6, le=5,", "1, \"multipleOf\": 2}', '', 'Any argument not passed to the", ") ], [ 'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\":", "+= '\\n' for _ in range(5): v += '+' +", "Schema Core', '' ], [ 'list', 'array', '', 'JSON Schema", "network as used in ``ipaddress`` module', ], [ 'StrictStr', 'string',", "Validation', ( 'And equivalently for any other subfields for dicts.", "\"format\" extension', 'IPv4 or IPv6 interface as used in ``ipaddress``", "'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [ 'NegativeInt',", "standard \"format\" extension', 'IPv4 or IPv6 network as used in", "+= f' {text:{col_width - 2}} |' v += '\\n' for", "'string', '{\"format\": \"uuid3\"}', 'Pydantic standard \"format\" extension', '' ], [", "'Enum', 'enum', '{\"enum\": [...]}', 'JSON Schema Validation', 'All the literal", "'+' + '=' * col_width v += '+' for row", "the mapping for ``confloat`` below.' ) ], [ 'confloat(gt=1, ge=2,", "Schema Validation', '' ], [ 'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON", "'list', 'array', '', 'JSON Schema Core', '' ], [ 'tuple',", "(not defined) will not be included in the schema.' ],", "/ Pydantic to JSON Schema mappings. Done like this rather", "Schema).' ) ], [ 'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}',", "standard \"format\" extension', '' ], [ 'UUID5', 'string', '{\"format\": \"uuid5\"}',", "'JSON Schema Core', '' ], [ 'str', 'string', '', 'JSON", "Schema Validation', '' ], [ 'time', 'string', '{\"format\": \"time\"}', 'JSON", "Schema', 'Defined in', 'Notes', ] v = '' col_width =", "], [ 'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic standard \"format\" extension',", "issues by maintainer.' ], [ 'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic", "will not be included in the schema.' ], [ 'PositiveInt',", "[ 'date', 'string', '{\"format\": \"date\"}', 'JSON Schema Validation', '' ],", "Schema repository\\'s issues by maintainer.' ], [ 'Json', 'string', '{\"format\":", "standard \"format\" extension', '' ], [ 'bytes', 'string', '{\"format\": \"binary\"}',", "- 2}} |' v += '\\n' for _ in range(5):", "rst table to make future edits easier. Please edit this", "'Pydantic standard \"format\" extension', '' ], [ 'UUID4', 'string', '{\"format\":", "not be included in the schema.' ], [ 'PositiveFloat', 'number',", "0}', 'JSON Schema Validation', '' ], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\":", "'{\"format\": \"json-string\"}', 'Pydantic standard \"format\" extension', '' ], [ 'IPvAnyAddress',", "+ '=' * col_width v += '+' for row in", "'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [", "'' ], [ 'int', 'integer', '', 'JSON Schema Validation', ''", "type, e.g. List[int].' ], [ 'Tuple[str, int]', 'array', '{\"items\": [{\"type\":", "'string', '{\"format\": \"name-email\"}', 'Pydantic standard \"format\" extension', '' ], [", "that although you can use other types as ' 'keys", "'Pydantic standard \"format\" extension', '' ], [ 'UUID', 'string', '{\"format\":", "'See the mapping for ``condecimal`` below.' ) ], [ 'condecimal(gt=1,", "Schema Core', 'All the properties defined will be defined with", "the literal values in the enum are included in the", "'JSON Schema Validation', '' ], [ 'NameEmail', 'string', '{\"format\": \"name-email\"}',", "'{\"format\": \"uuid3\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID4',", "as ' 'keys for dicts with Pydantic, only strings are", "'Pydantic standard \"format\" extension', '' ], [ 'IPvAnyAddress', 'string', '{\"format\":", "Schema, including submodels.' ] ] headings = [ 'Python type',", "this rather than as a raw rst table to make", "], [ 'tuple', 'array', '', 'JSON Schema Core', '' ],", "Schema Validation', '' ], [ 'date', 'string', '{\"format\": \"date\"}', 'JSON", "the schema.' ], [ 'BaseModel', 'object', '', 'JSON Schema Core',", "JSON Schema mappings. Done like this rather than as a", "col_width v += '+\\n|' for heading in headings: v +=", "Schema Validation', 'And equivalently for any other sub type, e.g.", "'\\n' for _ in range(5): v += '+' + '='", "'{\"items\": {\"type\": \"string\"}}', 'JSON Schema Validation', 'And equivalently for any", "'' ], [ 'ConstrainedDecimal', 'number', '', 'JSON Schema Core', (", "other subfields for dicts. Have in mind that although you", "any other subfields for unions.' ], [ 'Enum', 'enum', '{\"enum\":", "'{\"format\": \"file-path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'DirectoryPath',", "'{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}',", "in range(5): v += '+' + '=' * col_width v", "Schema Core', ( 'If the type has values declared for", "function (not defined) will not be included in the schema.'", "3 and text else text v += f' {text:{col_width -", "included as validations. ' 'See the mapping for ``condecimal`` below.'", "heading in headings: v += f' {heading:{col_width - 2}} |'", "in the schema.' ], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON", "'{\"format\": \"uuid\"}', 'Pydantic standard \"format\" extension', 'Suggested in OpenAPI.' ],", "\"dsn\"}', 'Pydantic standard \"format\" extension', '' ], [ 'bytes', 'string',", "Please edit this file directly not .tmp_schema_mappings.rst \"\"\" table =", "'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema Validation', ( 'And equivalently", "extension', '' ], [ 'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic standard", "with Pydantic standard \"format\" extension', 'Suggested in JSON Schema repository\\'s", "'-' * col_width v += '+' with open('.tmp_schema_mappings.rst', 'w') as", "'JSON Schema Validation', '' ], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}',", "directly not .tmp_schema_mappings.rst \"\"\" table = [ [ 'bool', 'boolean',", "table = [ [ 'bool', 'boolean', '', 'JSON Schema Core',", "'', 'JSON Schema Validation', '' ], [ 'dict', 'object', '',", "Validation', '' ], [ 'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}', 'JSON", "'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON Schema Validation', '' ], [", "in OpenAPI (although it is ' 'valid in JSON Schema).'", "valid keys for JSON, and so, only str is valid", "[ 'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic standard \"format\" extension', ''", "'{\"format\": \"uuid4\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID5',", "'{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema Validation', ( 'And equivalently for", "Validation', 'And equivalently for any other subfields for unions.' ],", "], [ 'Path', 'string', '{\"format\": \"path\"}', 'Pydantic standard \"format\" extension',", "'', 'JSON Schema Core', '' ], [ 'tuple', 'array', '',", "text = f'``{text}``' if i < 3 and text else", "[ 'tuple', 'array', '', 'JSON Schema Core', '' ], [", "address as used in ``ipaddress`` module', ], [ 'IPvAnyInterface', 'string',", "'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic standard \"format\" extension', '' ],", "as validations.' 'See the mapping for ``confloat`` below.' ) ],", "schema.' ], [ 'ConstrainedInt', 'integer', '', 'JSON Schema Core', (", "\"name-email\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UrlStr', 'string',", ") ], [ 'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON", "Validation', ( 'And equivalently for any other set of subtypes.", "], [ 'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic standard \"format\" extension',", "will not be included in the schema.' ], [ 'BaseModel',", "in table: v += '\\n|' for i, text in enumerate(row):", "[ 'Enum', 'enum', '{\"enum\": [...]}', 'JSON Schema Validation', 'All the", "\"ipvanyinterface\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 interface as", "\"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', '', 'Any", "[ 'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic standard \"format\" extension', ''", "\"uuid3\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID4', 'string',", "'{\"enum\": [...]}', 'JSON Schema Validation', 'All the literal values in", "a table of Python / Pydantic to JSON Schema mappings.", "], [ 'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic standard \"format\" extension',", "'JSON Schema Core', '' ], [ 'ConstrainedStr', 'string', '', 'JSON", "\"path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'datetime', 'string',", "other types as ' 'keys for dicts with Pydantic, only", "2}', '', 'Any argument not passed to the function (not", "can use other types as ' 'keys for dicts with", "\"integer\"}]}', 'JSON Schema Validation', ( 'And equivalently for any other", "[ 'Path', 'string', '{\"format\": \"path\"}', 'Pydantic standard \"format\" extension', ''", "module', ], [ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\"", "300 for _ in range(5): v += '+' + '-'", "and text else text v += f' {text:{col_width - 2}}", "in ``ipaddress`` module', ], [ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic", "\"minLength\": 2, \"maxLength\": 10}', 'JSON Schema Validation', 'Any argument not", "'JSON Schema Validation', ( 'And equivalently for any other set", "Core', '' ], [ 'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic standard", "'Pydantic standard \"format\" extension', 'IPv4 or IPv6 network as used", "defined with standard JSON Schema, including submodels.' ] ] headings", "] ] headings = [ 'Python type', 'JSON Schema Type',", "'integer', '', 'JSON Schema Validation', '' ], [ 'dict', 'object',", "'And equivalently for any other sub type, e.g. List[int].' ],", "mind that although you can use other types as '", "\"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', 'And equivalently for any", "in the enum are included in the definition.' ], [", "'Pydantic standard \"format\" extension', 'Suggested in OpenAPI.' ], [ 'FilePath',", "'string', '{\"format\": \"date\"}', 'JSON Schema Validation', '' ], [ 'time',", "included as validations. ' 'See the mapping for ``constr`` below.'", "be valid in OpenAPI (although it is ' 'valid in", "'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\": 10}',", "for dicts with Pydantic, only strings are valid keys for", "\"^text$\", \"minLength\": 2, \"maxLength\": 10}', 'JSON Schema Validation', 'Any argument", "Core', '' ], [ 'int', 'integer', '', 'JSON Schema Validation',", "multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1,", "'BaseModel', 'object', '', 'JSON Schema Core', 'All the properties defined", "Schema Core', '' ], [ 'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic", "[ 'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic standard \"format\" extension', ''", "'Pydantic standard \"format\" extension', '' ], [ 'DirectoryPath', 'string', '{\"format\":", "definition.' ], [ 'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation',", "standard \"format\" extension', 'Suggested in JSON Schema repository\\'s issues by", "subtypes. Note: If using schemas for OpenAPI, ' 'you shouldn\\'t", "'bool', 'boolean', '', 'JSON Schema Core', '' ], [ 'str',", "'IPv4 or IPv6 network as used in ``ipaddress`` module', ],", "would not be valid in OpenAPI (although it is '", "\"multipleOf\": 2}', '', 'Any argument not passed to the function", "used in ``ipaddress`` module', ], [ 'StrictStr', 'string', '', 'JSON", "'ConstrainedFloat', 'number', '', 'JSON Schema Core', ( 'If the type", "'' ], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation',", "Validation', '' ], [ 'dict', 'object', '', 'JSON Schema Core',", "IPv6 interface as used in ``ipaddress`` module', ], [ 'IPvAnyNetwork',", "[ 'ConstrainedStr', 'string', '', 'JSON Schema Core', ( 'If the", "'+' + '-' * col_width v += '+' with open('.tmp_schema_mappings.rst',", "\"date-time\"}', 'JSON Schema Validation', '' ], [ 'date', 'string', '{\"format\":", "are included as validations. ' 'See the mapping for ``conint``", "'JSON Schema Validation', '' ], [ 'time', 'string', '{\"format\": \"time\"}',", "'{\"format\": \"date\"}', 'JSON Schema Validation', '' ], [ 'time', 'string',", "{text:{col_width - 2}} |' v += '\\n' for _ in", "included in the schema.' ], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}',", "\"format\" extension', '' ], [ 'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI',", ") ], [ 'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\":", "as validations. ' 'See the mapping for ``condecimal`` below.' )", "[{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', ( 'And equivalently", "'keys for dicts with Pydantic, only strings are valid keys", "'' ], [ 'set', 'array', '{\"uniqueItems\": true}', 'JSON Schema Validation',", "List[int].' ], [ 'Tuple[str, int]', 'array', '{\"items\": [{\"type\": \"string\"}, {\"type\":", "'+' for row in table: v += '\\n|' for i,", "properties defined will be defined with standard JSON Schema, including", "'number', '', 'JSON Schema Core', '' ], [ 'UUID1', 'string',", "'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6", "10}', 'JSON Schema Validation', 'Any argument not passed to the", "Pydantic to JSON Schema mappings. Done like this rather than", "'' ], [ 'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic standard \"format\"", "'' ], [ 'time', 'string', '{\"format\": \"time\"}', 'JSON Schema Validation',", "any other subfields for dicts. Have in mind that although", "if i < 3 and text else text v +=", "'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema Validation', (", "any other set of subtypes. Note: If using schemas for", "'number', '', 'JSON Schema Core', ( 'If the type has", "\"format\" extension', '' ], [ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic", "Schema Validation', 'All the literal values in the enum are", "of Python / Pydantic to JSON Schema mappings. Done like", "\"format\" extension', '' ], [ 'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON", "[ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\" extension', 'IPv4", "_ in range(5): v += '+' + '=' * col_width", "'', 'JSON Schema Core', '' ], [ 'str', 'string', '',", "\"email\"}', 'JSON Schema Validation', '' ], [ 'NameEmail', 'string', '{\"format\":", "], [ 'ConstrainedDecimal', 'number', '', 'JSON Schema Core', ( 'If", "'string', '', 'JSON Schema Core', '' ], [ 'ConstrainedStr', 'string',", "\"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', '', 'Any argument not", "extension', '' ], [ 'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic standard", "\"maxLength\": 10}', 'JSON Schema Validation', 'Any argument not passed to", "you can use other types as ' 'keys for dicts", "'' ], [ 'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON Schema Validation',", "[ 'ConstrainedDecimal', 'number', '', 'JSON Schema Core', ( 'If the", "'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [ 'EmailStr',", "int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema Validation', ( 'And", "'str', 'string', '', 'JSON Schema Core', '' ], [ 'float',", "'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic standard \"format\" extension', '' ],", "'', 'JSON Schema Core', '' ], [ 'int', 'integer', '',", "'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [", "else text v += f' {text:{col_width - 2}} |' v", "v += '+\\n|' for heading in headings: v += f'", "'string', '{\"format\": \"uuid5\"}', 'Pydantic standard \"format\" extension', '' ], [", "make future edits easier. Please edit this file directly not", "validations. ' 'See the mapping for ``constr`` below.' ) ],", "'object', '', 'JSON Schema Core', 'All the properties defined will", "for _ in range(5): v += '+' + '=' *", "2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', '', 'Any argument not passed", "'number', '{\"format\": \"time-delta\"}', 'Difference in seconds (a ``float``), with Pydantic", "JSON Schema, including submodels.' ] ] headings = [ 'Python", "If using schemas for OpenAPI, ' 'you shouldn\\'t use this", "'set', 'array', '{\"uniqueItems\": true}', 'JSON Schema Validation', '' ], [", "'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI', '' ], [ 'Decimal', 'number',", "Core', ( 'If the type has values declared for the", "\"ipvanyaddress\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 address as", "[ 'set', 'array', '{\"uniqueItems\": true}', 'JSON Schema Validation', '' ],", "'{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [ 'EmailStr', 'string',", "Schema Core', '' ], [ 'str', 'string', '', 'JSON Schema", "0}', 'JSON Schema Validation', '' ], [ 'ConstrainedFloat', 'number', '',", "'' ], [ 'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation',", "'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6,", "{heading:{col_width - 2}} |' v += '\\n' for _ in", "standard \"format\" extension', '' ], [ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}',", "extension', 'IPv4 or IPv6 interface as used in ``ipaddress`` module',", "'+' + '-' * col_width v += '+\\n|' for heading", "a raw rst table to make future edits easier. Please", ") ], [ 'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\":", "enum are included in the definition.' ], [ 'SecretStr', 'string',", "[...]}', 'JSON Schema Validation', 'All the literal values in the", "'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6,", "for ``confloat`` below.' ) ], [ 'confloat(gt=1, ge=2, lt=6, le=5,", "for i, text in enumerate(row): text = f'``{text}``' if i", "'' ], [ 'Decimal', 'number', '', 'JSON Schema Core', ''", "], [ 'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic standard \"format\" extension',", "this declaration, as it would not be valid in OpenAPI", "equivalently for any other subfields for dicts. Have in mind", "has values declared for the constraints, they are included as", "Schema Validation', '' ], [ 'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic", "+= '+' + '=' * col_width v += '+' for", "'', 'JSON Schema Core', 'All the properties defined will be", "'array', '', 'JSON Schema Core', '' ], [ 'set', 'array',", "as ' 'JSON Schema key types.' ) ], [ 'Union[str,", "subfields for unions.' ], [ 'Enum', 'enum', '{\"enum\": [...]}', 'JSON", "extension', '' ], [ 'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON Schema", "equivalently for any other subfields for unions.' ], [ 'Enum',", "' 'See the mapping for ``constr`` below.' ) ], [", "included in the schema.' ], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}',", "[ 'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic standard \"format\" extension', ''", "the schema.' ], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema", "for row in table: v += '\\n|' for i, text", "standard \"format\" extension', '' ], [ 'UrlStr', 'string', '{\"format\": \"uri\"}',", "'Difference in seconds (a ``float``), with Pydantic standard \"format\" extension',", "'', 'JSON Schema Core', ( 'If the type has values", "extension', '' ], [ 'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic standard", "'Pydantic standard \"format\" extension', '' ], [ 'Path', 'string', '{\"format\":", "the mapping for ``constr`` below.' ) ], [ 'constr(regex=\\'^text$\\', min_length=2,", "text v += f' {text:{col_width - 2}} |' v +=", "types.' ) ], [ 'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"},", "\"binary\"}', 'OpenAPI', '' ], [ 'Decimal', 'number', '', 'JSON Schema", "], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\": 2,", "Core', '' ], [ 'float', 'number', '', 'JSON Schema Core',", "mapping for ``confloat`` below.' ) ], [ 'confloat(gt=1, ge=2, lt=6,", "'Notes', ] v = '' col_width = 300 for _", "mappings. Done like this rather than as a raw rst", "[ 'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\":", "submodels.' ] ] headings = [ 'Python type', 'JSON Schema", "'JSON Schema Core', ( 'If the type has values declared", "[ 'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic standard \"format\" extension', ''", "Pydantic standard \"format\" extension', 'Suggested in JSON Schema repository\\'s issues", "\"string\"}}', 'JSON Schema Validation', 'And equivalently for any other sub", "[ 'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic standard \"format\" extension', ''", "'{\"format\": \"dsn\"}', 'Pydantic standard \"format\" extension', '' ], [ 'bytes',", "'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\" extension', 'IPv4 or", "], [ 'ConstrainedStr', 'string', '', 'JSON Schema Core', ( 'If", "'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [", "5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON", "including submodels.' ] ] headings = [ 'Python type', 'JSON", "'-' * col_width v += '+\\n|' for heading in headings:", "OpenAPI (although it is ' 'valid in JSON Schema).' )", "'{\"format\": \"directory-path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'Path',", "], [ 'time', 'string', '{\"format\": \"time\"}', 'JSON Schema Validation', ''", "[ 'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic standard \"format\" extension', ''", "'array', '{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', (", "in ``ipaddress`` module', ], [ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic", "'', 'Any argument not passed to the function (not defined)", "Schema Validation', ( 'And equivalently for any other subfields for", "edit this file directly not .tmp_schema_mappings.rst \"\"\" table = [", "Schema Core', '' ], [ 'int', 'integer', '', 'JSON Schema", "\"json-string\"}', 'Pydantic standard \"format\" extension', '' ], [ 'IPvAnyAddress', 'string',", "the constraints, they are included as validations. ' 'See the", "'StrictStr', 'string', '', 'JSON Schema Core', '' ], [ 'ConstrainedStr',", "0}', 'JSON Schema Validation', '' ], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\":", "constraints, they are included as validations.' 'See the mapping for", "], [ 'int', 'integer', '', 'JSON Schema Validation', '' ],", "'string', '{\"format\": \"file-path\"}', 'Pydantic standard \"format\" extension', '' ], [", "[ 'StrictStr', 'string', '', 'JSON Schema Core', '' ], [", "], [ 'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5,", "in range(5): v += '+' + '-' * col_width v", "'{\"uniqueItems\": true}', 'JSON Schema Validation', '' ], [ 'List[str]', 'array',", "'string', '{\"format\": \"uuid\"}', 'Pydantic standard \"format\" extension', 'Suggested in OpenAPI.'", "'' ], [ 'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON Schema Validation',", "'{\"format\": \"time-delta\"}', 'Difference in seconds (a ``float``), with Pydantic standard", "6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON Schema Validation',", "be included in the schema.' ], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\":", "this file directly not .tmp_schema_mappings.rst \"\"\" table = [ [", "], [ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\" extension',", "repository\\'s issues by maintainer.' ], [ 'Json', 'string', '{\"format\": \"json-string\"}',", "not .tmp_schema_mappings.rst \"\"\" table = [ [ 'bool', 'boolean', '',", "f' {heading:{col_width - 2}} |' v += '\\n' for _", "for JSON, and so, only str is valid as '", "not be valid in OpenAPI (although it is ' 'valid", "'IPv4 or IPv6 interface as used in ``ipaddress`` module', ],", "'array', '', 'JSON Schema Core', '' ], [ 'tuple', 'array',", "[ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic standard \"format\" extension', ''", "'{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [ 'NegativeInt', 'integer',", "'' ], [ 'str', 'string', '', 'JSON Schema Core', ''", "'string', '{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\": 10}', 'JSON Schema Validation',", "\"uuid4\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID5', 'string',", "used in ``ipaddress`` module', ], [ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}',", "'', 'JSON Schema Core', '' ], [ 'UUID1', 'string', '{\"format\":", "'JSON Schema Core', 'All the properties defined will be defined", "v += f' {text:{col_width - 2}} |' v += '\\n'", "values declared for the constraints, they are included as validations.'", "'Pydantic standard \"format\" extension', '' ], [ 'UUID3', 'string', '{\"format\":", "], [ 'Dict[str, int]', 'object', '{\"additionalProperties\": {\"type\": \"integer\"}}', 'JSON Schema", "'' ], [ 'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON Schema Validation',", "'float', 'number', '', 'JSON Schema Core', '' ], [ 'int',", "'{\"format\": \"uuid5\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID',", "type has values declared for the constraints, they are included", "], [ 'BaseModel', 'object', '', 'JSON Schema Core', 'All the", "standard \"format\" extension', '' ], [ 'Path', 'string', '{\"format\": \"path\"}',", "'' ], [ 'date', 'string', '{\"format\": \"date\"}', 'JSON Schema Validation',", "'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic standard \"format\" extension', '' ],", "Schema Validation', '' ], [ 'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON", "+ '-' * col_width v += '+\\n|' for heading in", "'' ], [ 'tuple', 'array', '', 'JSON Schema Core', ''", "'\\n|' for i, text in enumerate(row): text = f'``{text}``' if", "module', ], [ 'StrictStr', 'string', '', 'JSON Schema Core', ''", "' 'JSON Schema key types.' ) ], [ 'Union[str, int]',", "'string', '', 'JSON Schema Core', ( 'If the type has", "'' ], [ 'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic standard \"format\"", "int]', 'array', '{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation',", "'string', '{\"format\": \"binary\"}', 'OpenAPI', '' ], [ 'Decimal', 'number', '',", "IPv6 network as used in ``ipaddress`` module', ], [ 'StrictStr',", "'' ], [ 'dict', 'object', '', 'JSON Schema Core', ''", "'=' * col_width v += '+' for row in table:", "for unions.' ], [ 'Enum', 'enum', '{\"enum\": [...]}', 'JSON Schema", "as validations. ' 'See the mapping for ``conint`` below.' )", "use other types as ' 'keys for dicts with Pydantic,", "], [ 'dict', 'object', '', 'JSON Schema Core', '' ],", "[ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ],", "equivalently for any other set of subtypes. Note: If using", "'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6", "\"integer\"}}', 'JSON Schema Validation', ( 'And equivalently for any other", "\"format\" extension', '' ], [ 'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON", "will not be included in the schema.' ], [ 'ConstrainedInt',", "the enum are included in the definition.' ], [ 'SecretStr',", ".tmp_schema_mappings.rst \"\"\" table = [ [ 'bool', 'boolean', '', 'JSON", "Schema Core', '' ], [ 'tuple', 'array', '', 'JSON Schema", "Schema mappings. Done like this rather than as a raw", "are included as validations. ' 'See the mapping for ``constr``", "'string', '{\"format\": \"email\"}', 'JSON Schema Validation', '' ], [ 'NameEmail',", "ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\":", "python3 \"\"\" Build a table of Python / Pydantic to", "], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', ''", "Core', '' ], [ 'list', 'array', '', 'JSON Schema Core',", "values in the enum are included in the definition.' ],", "'JSON Schema Validation', '' ], [ 'ConstrainedDecimal', 'number', '', 'JSON", "Validation', '' ], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema", "[ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ],", "+= f' {heading:{col_width - 2}} |' v += '\\n' for", "for _ in range(5): v += '+' + '-' *", "int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation',", "'{\"format\": \"uuid1\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID3',", "'Additional JSON Schema', 'Defined in', 'Notes', ] v = ''", "'' ], [ 'float', 'number', '', 'JSON Schema Core', ''", "'Path', 'string', '{\"format\": \"path\"}', 'Pydantic standard \"format\" extension', '' ],", "'' ], [ 'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI', '' ],", "seconds (a ``float``), with Pydantic standard \"format\" extension', 'Suggested in", "\"format\" extension', '' ], [ 'UUID4', 'string', '{\"format\": \"uuid4\"}', 'Pydantic", "#!/usr/bin/env python3 \"\"\" Build a table of Python / Pydantic", "'See the mapping for ``confloat`` below.' ) ], [ 'confloat(gt=1,", "'ConstrainedDecimal', 'number', '', 'JSON Schema Core', ( 'If the type", "of subtypes. Note: If using schemas for OpenAPI, ' 'you", "for dicts. Have in mind that although you can use", "'string', '{\"format\": \"date-time\"}', 'JSON Schema Validation', '' ], [ 'date',", "'Tuple[str, int]', 'array', '{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema", "\"format\" extension', 'Suggested in OpenAPI.' ], [ 'FilePath', 'string', '{\"format\":", "with Pydantic, only strings are valid keys for JSON, and", "in ``ipaddress`` module', ], [ 'StrictStr', 'string', '', 'JSON Schema", "'{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', ( 'And", "], [ 'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference in seconds (a", "'JSON Schema Validation', '' ], [ 'ConstrainedFloat', 'number', '', 'JSON", "\"time-delta\"}', 'Difference in seconds (a ``float``), with Pydantic standard \"format\"", "\"integer\"}]}', 'JSON Schema Validation', 'And equivalently for any other subfields", "extension', 'IPv4 or IPv6 address as used in ``ipaddress`` module',", "[ 'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\":", "used in ``ipaddress`` module', ], [ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}',", "Schema Type', 'Additional JSON Schema', 'Defined in', 'Notes', ] v", ") ], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\":", "= [ [ 'bool', 'boolean', '', 'JSON Schema Core', ''", "maintainer.' ], [ 'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic standard \"format\"", "in the schema.' ], [ 'ConstrainedInt', 'integer', '', 'JSON Schema", "Validation', '' ], [ 'ConstrainedDecimal', 'number', '', 'JSON Schema Core',", "extension', '' ], [ 'DirectoryPath', 'string', '{\"format\": \"directory-path\"}', 'Pydantic standard", "[ 'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON Schema Validation', '' ],", "max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\": 10}', 'JSON Schema", "'Pydantic standard \"format\" extension', '' ], [ 'UrlStr', 'string', '{\"format\":", "\"ipvanynetwork\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 network as", "\"directory-path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'Path', 'string',", "f'``{text}``' if i < 3 and text else text v", "types as ' 'keys for dicts with Pydantic, only strings", "defined) will not be included in the schema.' ], [", "'array', '{\"items\": {\"type\": \"string\"}}', 'JSON Schema Validation', 'And equivalently for", "'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ], [ 'SecretBytes',", "'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic standard \"format\" extension', '' ],", "[ 'BaseModel', 'object', '', 'JSON Schema Core', 'All the properties", "'Suggested in OpenAPI.' ], [ 'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic", "'int', 'integer', '', 'JSON Schema Validation', '' ], [ 'dict',", "], [ 'Enum', 'enum', '{\"enum\": [...]}', 'JSON Schema Validation', 'All", "], [ 'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic standard \"format\" extension',", "Schema Validation', '' ], [ 'List[str]', 'array', '{\"items\": {\"type\": \"string\"}}',", "interface as used in ``ipaddress`` module', ], [ 'IPvAnyNetwork', 'string',", "key types.' ) ], [ 'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\":", "'{\"format\": \"ipvanyinterface\"}', 'Pydantic standard \"format\" extension', 'IPv4 or IPv6 interface", "'valid in JSON Schema).' ) ], [ 'Dict[str, int]', 'object',", "'', 'JSON Schema Core', '' ], [ 'set', 'array', '{\"uniqueItems\":", "using schemas for OpenAPI, ' 'you shouldn\\'t use this declaration,", "included in the definition.' ], [ 'SecretStr', 'string', '{\"writeOnly\": true}',", "in JSON Schema repository\\'s issues by maintainer.' ], [ 'Json',", "OpenAPI, ' 'you shouldn\\'t use this declaration, as it would", "by maintainer.' ], [ 'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic standard", "'Defined in', 'Notes', ] v = '' col_width = 300", "standard \"format\" extension', '' ], [ 'UUID', 'string', '{\"format\": \"uuid\"}',", "'' ], [ 'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference in seconds", "are included as validations.' 'See the mapping for ``confloat`` below.'", "valid in OpenAPI (although it is ' 'valid in JSON", "not be included in the schema.' ], [ 'PositiveInt', 'integer',", "'{\"format\": \"uri\"}', 'JSON Schema Validation', '' ], [ 'DSN', 'string',", "v += f' {heading:{col_width - 2}} |' v += '\\n'", "below.' ) ], [ 'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number',", "\"\"\" Build a table of Python / Pydantic to JSON", "[ 'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON", "included in the schema.' ], [ 'ConstrainedInt', 'integer', '', 'JSON", "'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic standard \"format\" extension', '' ],", "Validation', 'All the literal values in the enum are included", "included as validations.' 'See the mapping for ``confloat`` below.' )", "in headings: v += f' {heading:{col_width - 2}} |' v", "other sub type, e.g. List[int].' ], [ 'Tuple[str, int]', 'array',", "extension', 'Suggested in OpenAPI.' ], [ 'FilePath', 'string', '{\"format\": \"file-path\"}',", "below.' ) ], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\",", "included in the schema.' ], [ 'BaseModel', 'object', '', 'JSON", "to the function (not defined) will not be included in", "shouldn\\'t use this declaration, as it would not be valid", "are valid keys for JSON, and so, only str is", "'{\"format\": \"binary\"}', 'OpenAPI', '' ], [ 'Decimal', 'number', '', 'JSON", "'', 'JSON Schema Core', '' ], [ 'list', 'array', '',", "[ 'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI', '' ], [ 'Decimal',", "], [ 'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON Schema Validation', ''", "schemas for OpenAPI, ' 'you shouldn\\'t use this declaration, as", "for the constraints, they are included as validations.' 'See the", "\"format\" extension', '' ], [ 'UUID5', 'string', '{\"format\": \"uuid5\"}', 'Pydantic", "+ '-' * col_width v += '+' with open('.tmp_schema_mappings.rst', 'w')", "Validation', '' ], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema", "'JSON Schema Validation', ( 'And equivalently for any other subfields", "'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [ 'ConstrainedDecimal',", "], [ 'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic standard \"format\" extension',", "'And equivalently for any other subfields for dicts. Have in", "'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic standard \"format\" extension', '' ],", "file directly not .tmp_schema_mappings.rst \"\"\" table = [ [ 'bool',", "], [ 'set', 'array', '{\"uniqueItems\": true}', 'JSON Schema Validation', ''", "\"\"\" table = [ [ 'bool', 'boolean', '', 'JSON Schema", "'object', '', 'JSON Schema Core', '' ], [ 'list', 'array',", "'And equivalently for any other set of subtypes. Note: If", "standard \"format\" extension', '' ], [ 'UUID4', 'string', '{\"format\": \"uuid4\"}',", "'Suggested in JSON Schema repository\\'s issues by maintainer.' ], [", "``ipaddress`` module', ], [ 'StrictStr', 'string', '', 'JSON Schema Core',", "], [ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', ''", "sub type, e.g. List[int].' ], [ 'Tuple[str, int]', 'array', '{\"items\":", "+= '+' + '-' * col_width v += '+\\n|' for", "\"uuid5\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UUID', 'string',", "v = '' col_width = 300 for _ in range(5):", "[ 'dict', 'object', '', 'JSON Schema Core', '' ], [", "the function (not defined) will not be included in the", "not be included in the schema.' ], [ 'BaseModel', 'object',", "and so, only str is valid as ' 'JSON Schema", "schema.' ], [ 'BaseModel', 'object', '', 'JSON Schema Core', 'All", "= [ 'Python type', 'JSON Schema Type', 'Additional JSON Schema',", "[ 'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON Schema Validation', '' ],", "although you can use other types as ' 'keys for", "in seconds (a ``float``), with Pydantic standard \"format\" extension', 'Suggested", ") ], [ 'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\":", "mapping for ``condecimal`` below.' ) ], [ 'condecimal(gt=1, ge=2, lt=6,", "'integer', '', 'JSON Schema Core', ( 'If the type has", "easier. Please edit this file directly not .tmp_schema_mappings.rst \"\"\" table", "'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic standard \"format\" extension', 'Suggested in", "'All the properties defined will be defined with standard JSON", "to JSON Schema mappings. Done like this rather than as", "], [ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\" extension',", "[ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\":", "headings = [ 'Python type', 'JSON Schema Type', 'Additional JSON", "will be defined with standard JSON Schema, including submodels.' ]", "only strings are valid keys for JSON, and so, only", "in the definition.' ], [ 'SecretStr', 'string', '{\"writeOnly\": true}', 'JSON", "'Json', 'string', '{\"format\": \"json-string\"}', 'Pydantic standard \"format\" extension', '' ],", "v += '\\n' for _ in range(5): v += '+'", "unions.' ], [ 'Enum', 'enum', '{\"enum\": [...]}', 'JSON Schema Validation',", "'time', 'string', '{\"format\": \"time\"}', 'JSON Schema Validation', '' ], [", "'+\\n|' for heading in headings: v += f' {heading:{col_width -", "Core', '' ], [ 'tuple', 'array', '', 'JSON Schema Core',", "Validation', '' ], [ 'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON Schema", "\"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON Schema Validation', 'Any", "than as a raw rst table to make future edits", "[ 'str', 'string', '', 'JSON Schema Core', '' ], [", "(a ``float``), with Pydantic standard \"format\" extension', 'Suggested in JSON", "declared for the constraints, they are included as validations. '", "v += '+' + '=' * col_width v += '+'", "Schema Validation', '' ], [ 'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference", "], [ 'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}',", "for any other subfields for unions.' ], [ 'Enum', 'enum',", "( 'And equivalently for any other subfields for dicts. Have", "'string', '{\"format\": \"path\"}', 'Pydantic standard \"format\" extension', '' ], [", "text in enumerate(row): text = f'``{text}``' if i < 3", "'' ], [ 'UUID1', 'string', '{\"format\": \"uuid1\"}', 'Pydantic standard \"format\"", "\"multipleOf\": 2}', 'JSON Schema Validation', 'Any argument not passed to", "2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON Schema Validation', 'Any argument", "passed to the function (not defined) will not be included", "Validation', '' ], [ 'time', 'string', '{\"format\": \"time\"}', 'JSON Schema", "'Pydantic standard \"format\" extension', 'IPv4 or IPv6 interface as used", "6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', '', 'Any argument", "the schema.' ], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema", "'JSON Schema Validation', '' ], [ 'SecretBytes', 'string', '{\"writeOnly\": true}',", "'JSON Schema Validation', '' ], [ 'DSN', 'string', '{\"format\": \"dsn\"}',", "or IPv6 network as used in ``ipaddress`` module', ], [", "\"uri\"}', 'JSON Schema Validation', '' ], [ 'DSN', 'string', '{\"format\":", "'Pydantic standard \"format\" extension', '' ], [ 'datetime', 'string', '{\"format\":", "declared for the constraints, they are included as validations.' 'See", "it is ' 'valid in JSON Schema).' ) ], [", "JSON, and so, only str is valid as ' 'JSON", "'JSON Schema Core', '' ], [ 'list', 'array', '', 'JSON", "extension', '' ], [ 'UrlStr', 'string', '{\"format\": \"uri\"}', 'JSON Schema", "are included in the definition.' ], [ 'SecretStr', 'string', '{\"writeOnly\":", "will not be included in the schema.' ], [ 'PositiveFloat',", "\"file-path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'DirectoryPath', 'string',", "Schema key types.' ) ], [ 'Union[str, int]', 'anyOf', '{\"anyOf\":", "they are included as validations. ' 'See the mapping for", "<reponame>NoAnyLove/pydantic #!/usr/bin/env python3 \"\"\" Build a table of Python /", "'JSON Schema Validation', 'Any argument not passed to the function", "'JSON Schema Validation', 'And equivalently for any other subfields for", "'JSON Schema Validation', '' ], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}',", "'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\":", "[ 'IPvAnyNetwork', 'string', '{\"format\": \"ipvanynetwork\"}', 'Pydantic standard \"format\" extension', 'IPv4", "lt=6, le=5, multiple_of=2)', 'integer', '{\"maximum\": 5, \"exclusiveMaximum\": 6, \"minimum\": 2,", "col_width v += '+' with open('.tmp_schema_mappings.rst', 'w') as f: f.write(v)", "] headings = [ 'Python type', 'JSON Schema Type', 'Additional", "( 'And equivalently for any other set of subtypes. Note:", "'{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [ 'NegativeFloat', 'number',", "Schema Validation', '' ], [ 'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic", "'Decimal', 'number', '', 'JSON Schema Core', '' ], [ 'UUID1',", "'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference in seconds (a ``float``), with", "'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [", "'EmailStr', 'string', '{\"format\": \"email\"}', 'JSON Schema Validation', '' ], [", "[ 'UUID', 'string', '{\"format\": \"uuid\"}', 'Pydantic standard \"format\" extension', 'Suggested", "extension', 'IPv4 or IPv6 network as used in ``ipaddress`` module',", "'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ], [", "'' ], [ 'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic standard \"format\"", "true}', 'JSON Schema Validation', '' ], [ 'List[str]', 'array', '{\"items\":", "future edits easier. Please edit this file directly not .tmp_schema_mappings.rst", "'{\"format\": \"name-email\"}', 'Pydantic standard \"format\" extension', '' ], [ 'UrlStr',", "], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', ''", "in the schema.' ], [ 'BaseModel', 'object', '', 'JSON Schema", "'' col_width = 300 for _ in range(5): v +=", "[ [ 'bool', 'boolean', '', 'JSON Schema Core', '' ],", "the properties defined will be defined with standard JSON Schema,", "'string', '', 'JSON Schema Core', '' ], [ 'float', 'number',", "JSON Schema repository\\'s issues by maintainer.' ], [ 'Json', 'string',", "min_length=2, max_length=10)', 'string', '{\"pattern\": \"^text$\", \"minLength\": 2, \"maxLength\": 10}', 'JSON", "], [ 'float', 'number', '', 'JSON Schema Core', '' ],", "dicts. Have in mind that although you can use other", "Have in mind that although you can use other types", "in mind that although you can use other types as", "or IPv6 interface as used in ``ipaddress`` module', ], [", "], [ 'list', 'array', '', 'JSON Schema Core', '' ],", "i < 3 and text else text v += f'", "in', 'Notes', ] v = '' col_width = 300 for", "Schema Validation', 'And equivalently for any other subfields for unions.'", "col_width v += '+' for row in table: v +=", "Pydantic, only strings are valid keys for JSON, and so,", "'JSON Schema Type', 'Additional JSON Schema', 'Defined in', 'Notes', ]", "{\"type\": \"integer\"}]}', 'JSON Schema Validation', ( 'And equivalently for any", "[ 'PositiveInt', 'integer', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ],", "Validation', '' ], [ 'DSN', 'string', '{\"format\": \"dsn\"}', 'Pydantic standard", "|' v += '\\n' for _ in range(5): v +=", "Validation', 'Any argument not passed to the function (not defined)", "they are included as validations.' 'See the mapping for ``confloat``", "\"format\" extension', 'IPv4 or IPv6 address as used in ``ipaddress``", "as used in ``ipaddress`` module', ], [ 'StrictStr', 'string', '',", "range(5): v += '+' + '-' * col_width v +=", "it would not be valid in OpenAPI (although it is", "'ConstrainedStr', 'string', '', 'JSON Schema Core', ( 'If the type", "validations. ' 'See the mapping for ``conint`` below.' ) ],", "5, \"exclusiveMaximum\": 6, \"minimum\": 2, \"exclusiveMinimum\": 1, \"multipleOf\": 2}', '',", "'' ], [ 'list', 'array', '', 'JSON Schema Core', ''", "'', 'JSON Schema Core', '' ], [ 'float', 'number', '',", "], [ 'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic standard \"format\" extension',", "in OpenAPI.' ], [ 'FilePath', 'string', '{\"format\": \"file-path\"}', 'Pydantic standard", "table to make future edits easier. Please edit this file", "argument not passed to the function (not defined) will not", "'{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [ 'ConstrainedDecimal', 'number',", "'string', '{\"format\": \"uri\"}', 'JSON Schema Validation', '' ], [ 'DSN',", "use this declaration, as it would not be valid in", "Schema Validation', '' ], [ 'NegativeFloat', 'number', '{\"exclusiveMaximum\": 0}', 'JSON", "[ 'time', 'string', '{\"format\": \"time\"}', 'JSON Schema Validation', '' ],", "the type has values declared for the constraints, they are", "\"uuid\"}', 'Pydantic standard \"format\" extension', 'Suggested in OpenAPI.' ], [", "Note: If using schemas for OpenAPI, ' 'you shouldn\\'t use", "\"format\" extension', '' ], [ 'Path', 'string', '{\"format\": \"path\"}', 'Pydantic", "``ipaddress`` module', ], [ 'IPvAnyInterface', 'string', '{\"format\": \"ipvanyinterface\"}', 'Pydantic standard", "], [ 'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON Schema Validation', ''", "Validation', '' ], [ 'SecretBytes', 'string', '{\"writeOnly\": true}', 'JSON Schema", "[ 'PositiveFloat', 'number', '{\"exclusiveMinimum\": 0}', 'JSON Schema Validation', '' ],", "1, \"multipleOf\": 2}', 'JSON Schema Validation', 'Any argument not passed", "'JSON Schema Core', '' ], [ 'int', 'integer', '', 'JSON", "'you shouldn\\'t use this declaration, as it would not be", "'date', 'string', '{\"format\": \"date\"}', 'JSON Schema Validation', '' ], [", "'{\"format\": \"email\"}', 'JSON Schema Validation', '' ], [ 'NameEmail', 'string',", "for OpenAPI, ' 'you shouldn\\'t use this declaration, as it", "values declared for the constraints, they are included as validations.", "'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)', 'number', '{\"maximum\": 5, \"exclusiveMaximum\": 6,", "'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', 'And", "'' ], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation',", "enumerate(row): text = f'``{text}``' if i < 3 and text", "like this rather than as a raw rst table to", "raw rst table to make future edits easier. Please edit", "dicts with Pydantic, only strings are valid keys for JSON,", "for ``constr`` below.' ) ], [ 'constr(regex=\\'^text$\\', min_length=2, max_length=10)', 'string',", "Validation', 'And equivalently for any other sub type, e.g. List[int].'", "\"date\"}', 'JSON Schema Validation', '' ], [ 'time', 'string', '{\"format\":", "Schema Validation', '' ], [ 'ConstrainedDecimal', 'number', '', 'JSON Schema", "Core', 'All the properties defined will be defined with standard", "], [ 'Tuple[str, int]', 'array', '{\"items\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}',", "'JSON Schema key types.' ) ], [ 'Union[str, int]', 'anyOf',", "'Union[str, int]', 'anyOf', '{\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema", "be included in the schema.' ], [ 'PositiveFloat', 'number', '{\"exclusiveMinimum\":", "( 'If the type has values declared for the constraints,", "extension', '' ], [ 'bytes', 'string', '{\"format\": \"binary\"}', 'OpenAPI', ''", "0}', 'JSON Schema Validation', '' ], [ 'ConstrainedDecimal', 'number', '',", "\"format\" extension', '' ], [ 'UUID3', 'string', '{\"format\": \"uuid3\"}', 'Pydantic", "'{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', '' ], [ 'ConstrainedFloat', 'number',", "'JSON Schema Core', '' ], [ 'float', 'number', '', 'JSON", "Validation', '' ], [ 'NameEmail', 'string', '{\"format\": \"name-email\"}', 'Pydantic standard", "Core', '' ], [ 'str', 'string', '', 'JSON Schema Core',", "Validation', '' ], [ 'date', 'string', '{\"format\": \"date\"}', 'JSON Schema", "'IPv4 or IPv6 address as used in ``ipaddress`` module', ],", "], [ 'ConstrainedInt', 'integer', '', 'JSON Schema Core', ( 'If", "'Pydantic standard \"format\" extension', '' ], [ 'bytes', 'string', '{\"format\":", "in JSON Schema).' ) ], [ 'Dict[str, int]', 'object', '{\"additionalProperties\":", "``confloat`` below.' ) ], [ 'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)',", "* col_width v += '+\\n|' for heading in headings: v", "not be included in the schema.' ], [ 'ConstrainedInt', 'integer',", "'{\"format\": \"path\"}', 'Pydantic standard \"format\" extension', '' ], [ 'datetime',", "2}} |' v += '\\n' for _ in range(5): v", "f' {text:{col_width - 2}} |' v += '\\n' for _", "constraints, they are included as validations. ' 'See the mapping", "'See the mapping for ``constr`` below.' ) ], [ 'constr(regex=\\'^text$\\',", "standard \"format\" extension', 'Suggested in OpenAPI.' ], [ 'FilePath', 'string',", "row in table: v += '\\n|' for i, text in", "\"exclusiveMinimum\": 1, \"multipleOf\": 2}', 'JSON Schema Validation', 'Any argument not", "'datetime', 'string', '{\"format\": \"date-time\"}', 'JSON Schema Validation', '' ], [", "[ 'bool', 'boolean', '', 'JSON Schema Core', '' ], [", "], [ 'Decimal', 'number', '', 'JSON Schema Core', '' ],", "standard \"format\" extension', '' ], [ 'datetime', 'string', '{\"format\": \"date-time\"}',", "\"string\"}, {\"type\": \"integer\"}]}', 'JSON Schema Validation', ( 'And equivalently for", "be defined with standard JSON Schema, including submodels.' ] ]", "Validation', '' ], [ 'ConstrainedFloat', 'number', '', 'JSON Schema Core',", "IPv6 address as used in ``ipaddress`` module', ], [ 'IPvAnyInterface',", "] v = '' col_width = 300 for _ in", "], [ 'NegativeInt', 'integer', '{\"exclusiveMaximum\": 0}', 'JSON Schema Validation', ''", "included as validations. ' 'See the mapping for ``conint`` below.'", "literal values in the enum are included in the definition.'", "'' ], [ 'IPvAnyAddress', 'string', '{\"format\": \"ipvanyaddress\"}', 'Pydantic standard \"format\"", "+= '+\\n|' for heading in headings: v += f' {heading:{col_width", "subfields for dicts. Have in mind that although you can", "'' ], [ 'Path', 'string', '{\"format\": \"path\"}', 'Pydantic standard \"format\"", "[ 'timedelta', 'number', '{\"format\": \"time-delta\"}', 'Difference in seconds (a ``float``)," ]
[ "nose.plugins.attrib import attr from hubspot3.test import helper from hubspot3.broadcast import", "BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for the HubSpot Broadcast API Python", "not have raised exception: {}\".format(e)) if __name__ == \"__main__\": unittest.main()", "the broadcast using different call bcast = self.client.get_broadcast(broadcast_guid) # Should", "= int(time.time() + 6000) * 1000 bcast = Broadcast( {", "tests http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True) if len(channels) == 0:", "older channels ensured to exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels) >", "channels ensured to exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels) > 0)", "broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"]", "hubspot3.broadcast import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for", "* 1000 bcast = Broadcast( { \"content\": content, \"triggerAt\": trigger_at,", "broadcast using different call bcast = self.client.get_broadcast(broadcast_guid) # Should have", "bcast = Broadcast( { \"content\": content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid,", "attr from hubspot3.test import helper from hubspot3.broadcast import Broadcast, BroadcastClient", "http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True) if len(channels) == 0: self.fail(\"Failed", "self.broadcast_guids = None def tearDown(self): # Cancel any broadcasts created", "unittest tests for the Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\"", "contains some unittest tests for the Broadcast API. Questions, comments:", "the Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client", "This file contains some unittest tests for the Broadcast API.", "portal 62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast =", "class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for the HubSpot Broadcast API", "len(channels) == 0: self.fail(\"Failed to find a publishable channel\") channel", "least 1 broadcast on the test portal 62515 broadcasts =", "broadcast on the test portal 62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts)", "trigger in the future trigger_at = int(time.time() + 6000) *", "{ \"content\": content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, } ) try:", "self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it is canceled self.broadcast_guids = []", "file contains some unittest tests for the Broadcast API. Questions,", "def test_get_broadcasts(self): # Should fetch at least 1 broadcast on", "hubspot3 unit tests http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True) if len(channels)", "self.fail(\"Should not have raised exception: {}\".format(e)) if __name__ == \"__main__\":", "for the HubSpot Broadcast API Python client. This file contains", "trigger_at = int(time.time() + 6000) * 1000 bcast = Broadcast(", "time import unittest from nose.plugins.attrib import attr from hubspot3.test import", "the future trigger_at = int(time.time() + 6000) * 1000 bcast", "the HubSpot Broadcast API Python client. This file contains some", "Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client =", "Should have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self):", "Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for the HubSpot", "trigger_at, \"channelGuid\": channel.channel_guid, } ) try: resp = self.client.create_broadcast(bcast) broadcast", "http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids = None", "> 0) @attr(\"api\") def test_create_broadcast(self): content = dict(body=\"Test hubspot3 unit", "find a publishable channel\") channel = channels[0] # Get a", "of the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self):", "channel.channel_guid, } ) try: resp = self.client.create_broadcast(bcast) broadcast = Broadcast(resp)", "broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"])", "= self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched", "== 0: self.fail(\"Failed to find a publishable channel\") channel =", "[] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e: self.fail(\"Should not have raised", "broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch", "except Exception as e: self.fail(\"Should not have raised exception: {}\".format(e))", "content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, } ) try: resp =", "Should fetch at least 1 broadcast on the test portal", "the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): #", "self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some", "self.assertTrue(len(channels) > 0) @attr(\"api\") def test_create_broadcast(self): content = dict(body=\"Test hubspot3", "channel\") channel = channels[0] # Get a trigger in the", "channels[0] # Get a trigger in the future trigger_at =", "self.client.get_broadcast(broadcast_guid) # Should have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\")", "exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\") def test_create_broadcast(self):", "self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): # Should fetch at", "= self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure", "= BroadcastClient(**helper.get_options()) self.broadcast_guids = None def tearDown(self): # Cancel any", "broadcasts created as part of the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast,", "1 broadcast on the test portal 62515 broadcasts = self.client.get_broadcasts(limit=1)", "self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch the", "self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it is canceled self.broadcast_guids =", "the test portal 62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0)", "0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid =", "self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): # Fetch older channels ensured to", "broadcast[\"broadcastGuid\"] # Re-fetch the broadcast using different call bcast =", "expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): # Fetch", "different call bcast = self.client.get_broadcast(broadcast_guid) # Should have expected fields", "tests for the HubSpot Broadcast API Python client. This file", "= broadcast[\"broadcastGuid\"] # Re-fetch the broadcast using different call bcast", "test portal 62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast", "comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids =", "def tearDown(self): # Cancel any broadcasts created as part of", "test_create_broadcast(self): content = dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\") channels =", "@attr(\"api\") def test_create_broadcast(self): content = dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\")", "import time import unittest from nose.plugins.attrib import attr from hubspot3.test", "# Fetch older channels ensured to exist channels = self.client.get_channels(current=True)", "try: resp = self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid)", "broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it is", "broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch the broadcast using different", "on the test portal 62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) >", "test_get_channels(self): # Fetch older channels ensured to exist channels =", "Python client. This file contains some unittest tests for the", "as e: self.fail(\"Should not have raised exception: {}\".format(e)) if __name__", "e: self.fail(\"Should not have raised exception: {}\".format(e)) if __name__ ==", "# Should have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def", "channels = self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\") def test_create_broadcast(self): content", "publish_only=True) if len(channels) == 0: self.fail(\"Failed to find a publishable", "Cancel any broadcasts created as part of the tests if", "is canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e:", "Broadcast( { \"content\": content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, } )", "import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for the", "0: self.fail(\"Failed to find a publishable channel\") channel = channels[0]", "to exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\") def", "broadcast.channel_guid) # Ensure it is canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid)", "= self.client.get_channels(current=True, publish_only=True) if len(channels) == 0: self.fail(\"Failed to find", "\"\"\" Unit tests for the HubSpot Broadcast API Python client.", "if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): # Should fetch", "self.assertTrue(len(broadcasts) > 0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\")", "Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it is canceled self.broadcast_guids", "API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client = BroadcastClient(**helper.get_options())", "= None def tearDown(self): # Cancel any broadcasts created as", "tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): # Should", "= broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] #", "any broadcasts created as part of the tests if self.broadcast_guids:", "Fetch older channels ensured to exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels)", "publishable channel\") channel = channels[0] # Get a trigger in", "Broadcast API Python client. This file contains some unittest tests", "Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids", "created as part of the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids))", "BroadcastClient(**helper.get_options()) self.broadcast_guids = None def tearDown(self): # Cancel any broadcasts", ") try: resp = self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid,", "import helper from hubspot3.broadcast import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\"", "part of the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def", "at least 1 broadcast on the test portal 62515 broadcasts", "0) @attr(\"api\") def test_create_broadcast(self): content = dict(body=\"Test hubspot3 unit tests", "import attr from hubspot3.test import helper from hubspot3.broadcast import Broadcast,", "6000) * 1000 bcast = Broadcast( { \"content\": content, \"triggerAt\":", "Unit tests for the HubSpot Broadcast API Python client. This", "int(time.time() + 6000) * 1000 bcast = Broadcast( { \"content\":", "future trigger_at = int(time.time() + 6000) * 1000 bcast =", "\"\"\" def setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids = None def", "1000 bcast = Broadcast( { \"content\": content, \"triggerAt\": trigger_at, \"channelGuid\":", "test_get_broadcasts(self): # Should fetch at least 1 broadcast on the", "= channels[0] # Get a trigger in the future trigger_at", "canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e: self.fail(\"Should", "# Cancel any broadcasts created as part of the tests", "tearDown(self): # Cancel any broadcasts created as part of the", "} ) try: resp = self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid)", "import unittest from nose.plugins.attrib import attr from hubspot3.test import helper", "using different call bcast = self.client.get_broadcast(broadcast_guid) # Should have expected", "= self.client.get_broadcast(broadcast_guid) # Should have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status)", "self.client.get_channels(current=True, publish_only=True) if len(channels) == 0: self.fail(\"Failed to find a", "= Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it is canceled", "= dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True)", "\"content\": content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, } ) try: resp", "= Broadcast( { \"content\": content, \"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, }", "self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): # Should fetch at least 1", "some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch the broadcast using", "setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids = None def tearDown(self): #", "self.fail(\"Failed to find a publishable channel\") channel = channels[0] #", "fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): # Fetch older", "in the future trigger_at = int(time.time() + 6000) * 1000", "def setUp(self): self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids = None def tearDown(self):", "client. This file contains some unittest tests for the Broadcast", "resp = self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) #", "+ 6000) * 1000 bcast = Broadcast( { \"content\": content,", "Re-fetch the broadcast using different call bcast = self.client.get_broadcast(broadcast_guid) #", "> 0) broadcast = broadcasts[0].to_dict() self.assertIsNotNone(broadcast[\"channelGuid\"]) print(\"\\n\\nFetched some broadcasts\") broadcast_guid", "a trigger in the future trigger_at = int(time.time() + 6000)", "\"triggerAt\": trigger_at, \"channelGuid\": channel.channel_guid, } ) try: resp = self.client.create_broadcast(bcast)", "self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): # Fetch older channels", "from nose.plugins.attrib import attr from hubspot3.test import helper from hubspot3.broadcast", "\"channelGuid\": channel.channel_guid, } ) try: resp = self.client.create_broadcast(bcast) broadcast =", "for the Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def setUp(self):", "if len(channels) == 0: self.fail(\"Failed to find a publishable channel\")", "a publishable channel\") channel = channels[0] # Get a trigger", "Ensure it is canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception", "from hubspot3.test import helper from hubspot3.broadcast import Broadcast, BroadcastClient class", "channels = self.client.get_channels(current=True, publish_only=True) if len(channels) == 0: self.fail(\"Failed to", "None def tearDown(self): # Cancel any broadcasts created as part", "list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\") def test_get_broadcasts(self): # Should fetch at least", "Get a trigger in the future trigger_at = int(time.time() +", "unittest from nose.plugins.attrib import attr from hubspot3.test import helper from", "self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\") def test_create_broadcast(self): content = dict(body=\"Test", "it is canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as", "= self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\") def test_create_broadcast(self): content =", "unit tests http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True) if len(channels) ==", "from hubspot3.broadcast import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests", "# Ensure it is canceled self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except", "to find a publishable channel\") channel = channels[0] # Get", "HubSpot Broadcast API Python client. This file contains some unittest", "have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): #", "# Should fetch at least 1 broadcast on the test", "= [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e: self.fail(\"Should not have", "Exception as e: self.fail(\"Should not have raised exception: {}\".format(e)) if", "print(\"\\n\\nFetched some broadcasts\") broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch the broadcast", "def test_get_channels(self): # Fetch older channels ensured to exist channels", "self.assertIsNotNone(bcast.channel_guid) self.assertIsNotNone(bcast.status) @attr(\"api\") def test_get_channels(self): # Fetch older channels ensured", "helper from hubspot3.broadcast import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit", "self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e: self.fail(\"Should not have raised exception:", "ensured to exist channels = self.client.get_channels(current=True) self.assertTrue(len(channels) > 0) @attr(\"api\")", "hubspot3.test import helper from hubspot3.broadcast import Broadcast, BroadcastClient class BroadcastClientTest(unittest.TestCase):", "tests for the Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group \"\"\" def", "def test_create_broadcast(self): content = dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\") channels", "self.client.create_broadcast(bcast) broadcast = Broadcast(resp) self.assertIsNotNone(broadcast.broadcast_guid) self.assertEqual(channel.channel_guid, broadcast.channel_guid) # Ensure it", "@attr(\"api\") def test_get_broadcasts(self): # Should fetch at least 1 broadcast", "channel = channels[0] # Get a trigger in the future", "self.broadcast_guids = [] self.broadcast_guids.append(broadcast.broadcast_guid) except Exception as e: self.fail(\"Should not", "some unittest tests for the Broadcast API. Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group", "62515 broadcasts = self.client.get_broadcasts(limit=1) self.assertTrue(len(broadcasts) > 0) broadcast = broadcasts[0].to_dict()", "content = dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\") channels = self.client.get_channels(current=True,", "API Python client. This file contains some unittest tests for", "dict(body=\"Test hubspot3 unit tests http://www.hubspot.com\") channels = self.client.get_channels(current=True, publish_only=True) if", "self.client = BroadcastClient(**helper.get_options()) self.broadcast_guids = None def tearDown(self): # Cancel", "fetch at least 1 broadcast on the test portal 62515", "call bcast = self.client.get_broadcast(broadcast_guid) # Should have expected fields self.assertIsNotNone(bcast.broadcast_guid)", "@attr(\"api\") def test_get_channels(self): # Fetch older channels ensured to exist", "# Re-fetch the broadcast using different call bcast = self.client.get_broadcast(broadcast_guid)", "BroadcastClient class BroadcastClientTest(unittest.TestCase): \"\"\" Unit tests for the HubSpot Broadcast", "as part of the tests if self.broadcast_guids: list(map(self.client.cancel_broadcast, self.broadcast_guids)) @attr(\"api\")", "broadcast_guid = broadcast[\"broadcastGuid\"] # Re-fetch the broadcast using different call", "# Get a trigger in the future trigger_at = int(time.time()", "bcast = self.client.get_broadcast(broadcast_guid) # Should have expected fields self.assertIsNotNone(bcast.broadcast_guid) self.assertIsNotNone(bcast.channel_guid)" ]
[ "value self.url = url self.filename = filename def get_file(self, tmp_dir:", "real work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\",", "# just download url_path = Path(url) if url_path.suffix.lower() == dst_path.suffix.lower():", "from pathlib import Path import pprint import shutil import sys", "contains {testfile_info.featurecount} rows.\" ) return testfile_path def download_samplefile( url: str,", "def __init__(self, value, url, filename): self._value_ = value self.url =", "rows.\" ) return testfile_path def download_samplefile( url: str, dst_name: str,", "if len(tmp_paths) == 1: tmp_path = tmp_paths[0] else: raise Exception(", "# Unzip unzippedzip_dir = dst_path.parent / tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\")", "\"\"\" # If the destination path is a directory, use", "len(tmp_paths) == 1: tmp_path = tmp_paths[0] else: raise Exception( f\"Should", "data for benchmarking geo operations. \"\"\" import enum import logging", "= filename def get_file(self, tmp_dir: Path) -> Path: testfile_path =", "as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for the file tmp_paths =", "coding: utf-8 -*- \"\"\" Module to prepare test data for", "else: raise Exception( f\"Should find 1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\"", "if tmp_path.suffix == \".zip\": # Unzip unzippedzip_dir = dst_path.parent /", "logger = logging.getLogger(__name__) ################################################################################ # The real work ################################################################################ class", "-*- coding: utf-8 -*- \"\"\" Module to prepare test data", "a directory, use the default file name dst_path = prepare_dst_path(dst_name,", "urllib.request.urlretrieve(url, dst_path) else: # The file downloaded is different that", "file to dest_path. If it is zipped, it will be", "== \".zip\": # Unzip unzippedzip_dir = dst_path.parent / tmp_path.stem logger.info(f\"Unzip", ") testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\"", "TestFile(enum.Enum): AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 =", "download dst_dir (Path): the dir to downloaded the sample file", "default tmp location will be used. Defaults to None. Returns:", "# If the temp file is a .zip file, unzip", "file. \"\"\" # If the destination path is a directory,", "= gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\" ) return", ") COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self,", "Path: testfile_path = download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info =", "return dst_path def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None): if", "in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1: tmp_path =", "dst_name. Args: url (str): the url of the file to", "to a file with the same suffix as the dst_path,", "# converting will need to be done tmp_dir = dst_path.parent", "prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None): if dst_dir is None:", "dst_path, # just download url_path = Path(url) if url_path.suffix.lower() ==", "the path to the downloaded sample file. \"\"\" # If", "<filename>benchmark/benchmarks/testdata.py # -*- coding: utf-8 -*- \"\"\" Module to prepare", "else: # The file downloaded is different that the destination", "pprint import shutil import sys import tempfile from typing import", "url, filename): self._value_ = value self.url = url self.filename =", "logger.info(f\"Download tmp data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If the", "file to. If it is None, a dir in the", "import Optional import urllib.request import zipfile # Add path so", "logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) #", "If the url points to a file with the same", "be unzipped. If needed, it will be converted to the", "zipped, it will be unzipped. If needed, it will be", "filename def get_file(self, tmp_dir: Path) -> Path: testfile_path = download_samplefile(", "\".zip\": # Unzip unzippedzip_dir = dst_path.parent / tmp_path.stem logger.info(f\"Unzip to", "converting will need to be done tmp_dir = dst_path.parent /", "import logging from pathlib import Path import pprint import shutil", "tmp_path.suffix == \".zip\": # Unzip unzippedzip_dir = dst_path.parent / tmp_path.stem", "################################################################################ # The real work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 =", "tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp file to {dst_path}\") gfo.makevalid(tmp_path,", "the url of the file to download dst_dir (Path): the", "tmp_dir = dst_path.parent / \"tmp\" try: # Remove tmp dir", "( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self, value, url, filename):", "urllib.request.urlretrieve(url, tmp_path) # If the temp file is a .zip", "if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name: str, dst_dir: Optional[Path]", "file to download dst_dir (Path): the dir to downloaded the", "tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1: tmp_path = tmp_paths[0] else: raise", "destination path is a directory, use the default file name", "it will be unzipped. If needed, it will be converted", "to download dst_dir (Path): the dir to downloaded the sample", "(str): the url of the file to download dst_dir (Path):", "the file tmp_paths = [] for suffix in [\".shp\", \".gpkg\"]:", "# The file downloaded is different that the destination wanted,", "dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else: # The file", "# If the sample file already exists, return if dst_path.exists():", "value, url, filename): self._value_ = value self.url = url self.filename", "Add path so the benchmark packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))", "work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\",", "zip_ref.extractall(unzippedzip_dir) # Look for the file tmp_paths = [] for", "tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download file tmp_path = tmp_dir", "= ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES = ( 2,", "in the default tmp location will be used. Defaults to", "test data for benchmarking geo operations. \"\"\" import enum import", "Unzip unzippedzip_dir = dst_path.parent / tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with", "sample file to dest_path. If it is zipped, it will", "the downloaded sample file. \"\"\" # If the destination path", "file tmp_paths = [] for suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\")))", "dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name: str,", "Make sure the destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If", "= [] for suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths)", "to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If the temp file is", "import geofileops as gfo ################################################################################ # Some inits ################################################################################ logger", "be done tmp_dir = dst_path.parent / \"tmp\" try: # Remove", "benchmark packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo", "= download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path) logger.debug(", "file, unzip to dir if tmp_path.suffix == \".zip\": # Unzip", "= url self.filename = filename def get_file(self, tmp_dir: Path) ->", "prepare test data for benchmarking geo operations. \"\"\" import enum", "import zipfile # Add path so the benchmark packages are", "dst_name: str, dst_dir: Optional[Path] = None ) -> Path: \"\"\"", "gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\" ) return testfile_path", "f\"Should find 1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix", "dst_path.parent / \"tmp\" try: # Remove tmp dir if it", "dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount}", "the default file name dst_path = prepare_dst_path(dst_name, dst_dir) # If", "url of the file to download dst_dir (Path): the dir", "dst_path.exists(): return dst_path # Make sure the destination directory exists", "pathlib import Path import pprint import shutil import sys import", "finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name: str, dst_dir:", "str, dst_name: str, dst_dir: Optional[Path] = None ) -> Path:", "file to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return", "downloaded sample file. \"\"\" # If the destination path is", "be converted to the file type as determined by the", "COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self, value,", "\"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", )", "tmp file to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir)", "testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\" )", "# Some inits ################################################################################ logger = logging.getLogger(__name__) ################################################################################ # The", "is a directory, use the default file name dst_path =", "tmp_dir: Path) -> Path: testfile_path = download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir", "[\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1: tmp_path = tmp_paths[0]", "logging.getLogger(__name__) ################################################################################ # The real work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018", "= tmp_paths[0] else: raise Exception( f\"Should find 1 geofile, found", "the suffix of dst_name. Args: url (str): the url of", "{self.name} contains {testfile_info.featurecount} rows.\" ) return testfile_path def download_samplefile( url:", "None, a dir in the default tmp location will be", "already exists, return if dst_path.exists(): return dst_path # Make sure", "\"agriprc_2018.gpkg\", ) AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES", "file already exists, return if dst_path.exists(): return dst_path # Make", "dir if tmp_path.suffix == \".zip\": # Unzip unzippedzip_dir = dst_path.parent", "destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If the url points", "\"\"\" Download a sample file to dest_path. If it is", "some # converting will need to be done tmp_dir =", "import urllib.request import zipfile # Add path so the benchmark", "logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else: # The file downloaded", "1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix == tmp_path.suffix:", "a sample file to dest_path. If it is zipped, it", "downloaded is different that the destination wanted, so some #", "wanted, so some # converting will need to be done", "Exception( f\"Should find 1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if", "str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo ################################################################################ # Some inits ################################################################################", "from typing import Optional import urllib.request import zipfile # Add", "the sample file already exists, return if dst_path.exists(): return dst_path", "Optional import urllib.request import zipfile # Add path so the", "The file downloaded is different that the destination wanted, so", "import pprint import shutil import sys import tempfile from typing", "url self.filename = filename def get_file(self, tmp_dir: Path) -> Path:", "suffix as the dst_path, # just download url_path = Path(url)", "suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1: tmp_path", "download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile", "with the same suffix as the dst_path, # just download", "the same suffix as the dst_path, # just download url_path", "{unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for", "the default tmp location will be used. Defaults to None.", "find 1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix ==", "sys import tempfile from typing import Optional import urllib.request import", "/ tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as zip_ref:", "tmp_path = tmp_paths[0] else: raise Exception( f\"Should find 1 geofile,", "the file type as determined by the suffix of dst_name.", "== 1: tmp_path = tmp_paths[0] else: raise Exception( f\"Should find", "AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES = (", "= dst_path.parent / \"tmp\" try: # Remove tmp dir if", "/ \"tmp\" try: # Remove tmp dir if it exists", "/ f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) #", "Module to prepare test data for benchmarking geo operations. \"\"\"", "################################################################################ logger = logging.getLogger(__name__) ################################################################################ # The real work ################################################################################", "as determined by the suffix of dst_name. Args: url (str):", "tmp_path) # If the temp file is a .zip file,", "geofileops as gfo ################################################################################ # Some inits ################################################################################ logger =", "tmp data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If the temp", "return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name else: return dst_dir /", "def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None): if dst_dir is", "Path(url) if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path)", "If the destination path is a directory, use the default", "f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If", "to dir if tmp_path.suffix == \".zip\": # Unzip unzippedzip_dir =", "-*- \"\"\" Module to prepare test data for benchmarking geo", "the dir to downloaded the sample file to. If it", ".zip file, unzip to dir if tmp_path.suffix == \".zip\": #", "dst_name=self.filename, dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name} contains", "== dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else: # The", "# Remove tmp dir if it exists already if tmp_dir.exists():", "the sample file to. If it is None, a dir", "None. Returns: Path: the path to the downloaded sample file.", "prepare_dst_path(dst_name, dst_dir) # If the sample file already exists, return", "unzippedzip_dir = dst_path.parent / tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path,", "with zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for the", "str, dst_dir: Optional[Path] = None ) -> Path: \"\"\" Download", "by the suffix of dst_name. Args: url (str): the url", "dst_path = prepare_dst_path(dst_name, dst_dir) # If the sample file already", "will be unzipped. If needed, it will be converted to", "\"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", )", "need to be done tmp_dir = dst_path.parent / \"tmp\" try:", "# Download file tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp", "gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name:", "if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else:", "default file name dst_path = prepare_dst_path(dst_name, dst_dir) # If the", "so the benchmark packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops", "dir in the default tmp location will be used. Defaults", "self._value_ = value self.url = url self.filename = filename def", "# If the url points to a file with the", "for the file tmp_paths = [] for suffix in [\".shp\",", "{len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path) else:", "to prepare test data for benchmarking geo operations. \"\"\" import", ") return testfile_path def download_samplefile( url: str, dst_name: str, dst_dir:", "testfile_path = download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path)", "done tmp_dir = dst_path.parent / \"tmp\" try: # Remove tmp", "dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp file to", "\"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self, value, url, filename): self._value_ =", "zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for the file tmp_paths = []", "[] for suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) ==", "def get_file(self, tmp_dir: Path) -> Path: testfile_path = download_samplefile( url=self.url,", "dst_path) else: # The file downloaded is different that the", "If it is None, a dir in the default tmp", "of the file to download dst_dir (Path): the dir to", "tmp dir if it exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True,", "of dst_name. Args: url (str): the url of the file", "geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path,", "None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name else: return dst_dir", ") -> Path: \"\"\" Download a sample file to dest_path.", "import enum import logging from pathlib import Path import pprint", "= value self.url = url self.filename = filename def get_file(self,", "\"communes.gpkg\", ) def __init__(self, value, url, filename): self._value_ = value", "dst_dir: Optional[Path] = None ) -> Path: \"\"\" Download a", "a dir in the default tmp location will be used.", "url: str, dst_name: str, dst_dir: Optional[Path] = None ) ->", "# The real work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 = (", "it is zipped, it will be unzipped. If needed, it", "dst_path # Make sure the destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True)", "geo operations. \"\"\" import enum import logging from pathlib import", "logging from pathlib import Path import pprint import shutil import", "2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self, value, url, filename): self._value_", "name dst_path = prepare_dst_path(dst_name, dst_dir) # If the sample file", "tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] =", "= Path(url) if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url,", "1: tmp_path = tmp_paths[0] else: raise Exception( f\"Should find 1", "shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download file tmp_path = tmp_dir /", "exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If the url points to a", "import tempfile from typing import Optional import urllib.request import zipfile", "is different that the destination wanted, so some # converting", ") AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES =", "as gfo ################################################################################ # Some inits ################################################################################ logger = logging.getLogger(__name__)", "directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If the url points to", "url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else: #", ") def __init__(self, value, url, filename): self._value_ = value self.url", "f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\" ) return testfile_path def download_samplefile(", "\"\"\" import enum import logging from pathlib import Path import", "to downloaded the sample file to. If it is None,", "sample file to. If it is None, a dir in", "logger.info(f\"Convert tmp file to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists():", "if it exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) #", "temp file is a .zip file, unzip to dir if", "= dst_path.parent / tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\")", "Remove tmp dir if it exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir)", "typing import Optional import urllib.request import zipfile # Add path", "None): if dst_dir is None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" /", "Returns: Path: the path to the downloaded sample file. \"\"\"", "type as determined by the suffix of dst_name. Args: url", "the destination wanted, so some # converting will need to", "for suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1:", "is None, a dir in the default tmp location will", "the url points to a file with the same suffix", "zipfile # Add path so the benchmark packages are found", "Args: url (str): the url of the file to download", "# Add path so the benchmark packages are found sys.path.insert(0,", "import shutil import sys import tempfile from typing import Optional", "= None): if dst_dir is None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\"", "inits ################################################################################ logger = logging.getLogger(__name__) ################################################################################ # The real work", "sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo ################################################################################ # Some inits", "file type as determined by the suffix of dst_name. Args:", "dir if it exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True)", "url (str): the url of the file to download dst_dir", "gfo ################################################################################ # Some inits ################################################################################ logger = logging.getLogger(__name__) ################################################################################", "= ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 = ( 1,", "dst_path.parent / tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as", "\".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if len(tmp_paths) == 1: tmp_path = tmp_paths[0] else:", "file is a .zip file, unzip to dir if tmp_path.suffix", "needed, it will be converted to the file type as", "{tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If the temp file is a", "are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo ################################################################################ #", "(Path): the dir to downloaded the sample file to. If", "= None ) -> Path: \"\"\" Download a sample file", "= prepare_dst_path(dst_name, dst_dir) # If the sample file already exists,", "a file with the same suffix as the dst_path, #", "Path: the path to the downloaded sample file. \"\"\" #", "download url_path = Path(url) if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to", "str, dst_dir: Optional[Path] = None): if dst_dir is None: return", "determined by the suffix of dst_name. Args: url (str): the", "-> Path: \"\"\" Download a sample file to dest_path. If", "enum import logging from pathlib import Path import pprint import", "utf-8 -*- \"\"\" Module to prepare test data for benchmarking", "__init__(self, value, url, filename): self._value_ = value self.url = url", "to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path", "exist_ok=True) # Download file tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download", "the file to download dst_dir (Path): the dir to downloaded", "that the destination wanted, so some # converting will need", "Download file tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data", "converted to the file type as determined by the suffix", "try: # Remove tmp dir if it exists already if", "url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info = gfo.get_layerinfo(testfile_path) logger.debug( f\"TestFile {self.name}", "\"agriprc_2019.gpkg\", ) COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def", "different that the destination wanted, so some # converting will", "return if dst_path.exists(): return dst_path # Make sure the destination", "exist_ok=True) # If the url points to a file with", "dst_path) else: logger.info(f\"Convert tmp file to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally:", "is None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name else: return", "tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path)", "is zipped, it will be unzipped. If needed, it will", "{dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if tmp_dir.exists(): shutil.rmtree(tmp_dir) return dst_path def", "Path import pprint import shutil import sys import tempfile from", "dir to downloaded the sample file to. If it is", "benchmarking geo operations. \"\"\" import enum import logging from pathlib", "to be done tmp_dir = dst_path.parent / \"tmp\" try: #", "dst_dir is None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name else:", "the destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If the url", "it will be converted to the file type as determined", "If the temp file is a .zip file, unzip to", "If the sample file already exists, return if dst_path.exists(): return", "dst_dir (Path): the dir to downloaded the sample file to.", "path is a directory, use the default file name dst_path", "( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\",", "the destination path is a directory, use the default file", "sample file. \"\"\" # If the destination path is a", "-> Path: testfile_path = download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir ) testfile_info", "self.url = url self.filename = filename def get_file(self, tmp_dir: Path)", "import sys import tempfile from typing import Optional import urllib.request", "for benchmarking geo operations. \"\"\" import enum import logging from", "tmp location will be used. Defaults to None. Returns: Path:", "it is None, a dir in the default tmp location", "{testfile_info.featurecount} rows.\" ) return testfile_path def download_samplefile( url: str, dst_name:", "1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\",", "Download a sample file to dest_path. If it is zipped,", "destination wanted, so some # converting will need to be", "Path: \"\"\" Download a sample file to dest_path. If it", "if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download file tmp_path =", "as the dst_path, # just download url_path = Path(url) if", "directory, use the default file name dst_path = prepare_dst_path(dst_name, dst_dir)", "file downloaded is different that the destination wanted, so some", "get_file(self, tmp_dir: Path) -> Path: testfile_path = download_samplefile( url=self.url, dst_name=self.filename,", "testfile_path def download_samplefile( url: str, dst_name: str, dst_dir: Optional[Path] =", "if dst_path.exists(): return dst_path # Make sure the destination directory", "import Path import pprint import shutil import sys import tempfile", "= tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to {tmp_path}\") urllib.request.urlretrieve(url,", "0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 = ( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\",", "gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp file to {dst_path}\") gfo.makevalid(tmp_path, dst_path)", "it exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download", "to the downloaded sample file. \"\"\" # If the destination", "exists already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download file", "sure the destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) # If the", "to dest_path. If it is zipped, it will be unzipped.", "unzipped. If needed, it will be converted to the file", "same suffix as the dst_path, # just download url_path =", "shutil.rmtree(tmp_dir) return dst_path def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None):", "to the file type as determined by the suffix of", "Some inits ################################################################################ logger = logging.getLogger(__name__) ################################################################################ # The real", "filename): self._value_ = value self.url = url self.filename = filename", "url_path = Path(url) if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download to {dst_path}\")", "will need to be done tmp_dir = dst_path.parent / \"tmp\"", "The real work ################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 = ( 0,", "tempfile from typing import Optional import urllib.request import zipfile #", "found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo ################################################################################ # Some", "{dst_path}\") urllib.request.urlretrieve(url, dst_path) else: # The file downloaded is different", "be used. Defaults to None. Returns: Path: the path to", "dst_dir: Optional[Path] = None): if dst_dir is None: return Path(tempfile.gettempdir())", "tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to {tmp_path}\")", "will be converted to the file type as determined by", "the temp file is a .zip file, unzip to dir", "the dst_path, # just download url_path = Path(url) if url_path.suffix.lower()", "just download url_path = Path(url) if url_path.suffix.lower() == dst_path.suffix.lower(): logger.info(f\"Download", "If it is zipped, it will be unzipped. If needed,", "tmp_paths = [] for suffix in [\".shp\", \".gpkg\"]: tmp_paths.extend(list(unzippedzip_dir.rglob(f\"*{suffix}\"))) if", "shutil import sys import tempfile from typing import Optional import", "\"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for the file tmp_paths", "# -*- coding: utf-8 -*- \"\"\" Module to prepare test", "to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look", "file with the same suffix as the dst_path, # just", "zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir) # Look for the file", "== tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp file to {dst_path}\")", "def download_samplefile( url: str, dst_name: str, dst_dir: Optional[Path] = None", "Optional[Path] = None): if dst_dir is None: return Path(tempfile.gettempdir()) /", "used. Defaults to None. Returns: Path: the path to the", "( 1, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip\", \"agriprc_2019.gpkg\", ) COMMUNES = ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\",", "the benchmark packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as", "sample file already exists, return if dst_path.exists(): return dst_path #", "is a .zip file, unzip to dir if tmp_path.suffix ==", "found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path)", "a .zip file, unzip to dir if tmp_path.suffix == \".zip\":", "urllib.request import zipfile # Add path so the benchmark packages", "If needed, it will be converted to the file type", "# If the destination path is a directory, use the", "################################################################################ class TestFile(enum.Enum): AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", )", "class TestFile(enum.Enum): AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019", "AGRIPRC_2018 = ( 0, \"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip\", \"agriprc_2018.gpkg\", ) AGRIPRC_2019 = (", "packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import geofileops as gfo ################################################################################", "download_samplefile( url: str, dst_name: str, dst_dir: Optional[Path] = None )", "logger.debug( f\"TestFile {self.name} contains {testfile_info.featurecount} rows.\" ) return testfile_path def", "suffix of dst_name. Args: url (str): the url of the", "points to a file with the same suffix as the", "file name dst_path = prepare_dst_path(dst_name, dst_dir) # If the sample", "tmp_paths[0] else: raise Exception( f\"Should find 1 geofile, found {len(tmp_paths)}:", "\\n{pprint.pformat(tmp_paths)}\" ) if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert", "if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp file", "= logging.getLogger(__name__) ################################################################################ # The real work ################################################################################ class TestFile(enum.Enum):", "downloaded the sample file to. If it is None, a", "Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name else: return dst_dir / dst_name", "path so the benchmark packages are found sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent)) import", "self.filename = filename def get_file(self, tmp_dir: Path) -> Path: testfile_path", "Path) -> Path: testfile_path = download_samplefile( url=self.url, dst_name=self.filename, dst_dir=tmp_dir )", "to {dst_path}\") urllib.request.urlretrieve(url, dst_path) else: # The file downloaded is", "return testfile_path def download_samplefile( url: str, dst_name: str, dst_dir: Optional[Path]", "data to {tmp_path}\") urllib.request.urlretrieve(url, tmp_path) # If the temp file", "tmp_dir.mkdir(parents=True, exist_ok=True) # Download file tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\"", "Look for the file tmp_paths = [] for suffix in", "raise Exception( f\"Should find 1 geofile, found {len(tmp_paths)}: \\n{pprint.pformat(tmp_paths)}\" )", "to. If it is None, a dir in the default", "else: logger.info(f\"Convert tmp file to {dst_path}\") gfo.makevalid(tmp_path, dst_path) finally: if", "to None. Returns: Path: the path to the downloaded sample", "None ) -> Path: \"\"\" Download a sample file to", "return dst_path # Make sure the destination directory exists dst_path.parent.mkdir(parents=True,", "exists, return if dst_path.exists(): return dst_path # Make sure the", "Optional[Path] = None ) -> Path: \"\"\" Download a sample", "# Make sure the destination directory exists dst_path.parent.mkdir(parents=True, exist_ok=True) #", "will be used. Defaults to None. Returns: Path: the path", "# Look for the file tmp_paths = [] for suffix", "dst_dir) # If the sample file already exists, return if", "unzip to dir if tmp_path.suffix == \".zip\": # Unzip unzippedzip_dir", "\"\"\" Module to prepare test data for benchmarking geo operations.", "url points to a file with the same suffix as", "Defaults to None. Returns: Path: the path to the downloaded", ") if dst_path.suffix == tmp_path.suffix: gfo.move(tmp_path, dst_path) else: logger.info(f\"Convert tmp", "file tmp_path = tmp_dir / f\"{dst_path.stem}{url_path.suffix.lower()}\" logger.info(f\"Download tmp data to", "dest_path. If it is zipped, it will be unzipped. If", "if dst_dir is None: return Path(tempfile.gettempdir()) / \"geofileops_sampledata\" / dst_name", "\"tmp\" try: # Remove tmp dir if it exists already", "dst_path def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None): if dst_dir", "dst_path.parent.mkdir(parents=True, exist_ok=True) # If the url points to a file", "= ( 2, \"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip\", \"communes.gpkg\", ) def __init__(self, value, url,", "location will be used. Defaults to None. Returns: Path: the", "use the default file name dst_path = prepare_dst_path(dst_name, dst_dir) #", "so some # converting will need to be done tmp_dir", "################################################################################ # Some inits ################################################################################ logger = logging.getLogger(__name__) ################################################################################ #", "already if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(parents=True, exist_ok=True) # Download file tmp_path", "operations. \"\"\" import enum import logging from pathlib import Path", "tmp_path.stem logger.info(f\"Unzip to {unzippedzip_dir}\") with zipfile.ZipFile(tmp_path, \"r\") as zip_ref: zip_ref.extractall(unzippedzip_dir)", "path to the downloaded sample file. \"\"\" # If the" ]
[ "\"DATA\", \"STATIONS\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for", "@click.option('--ref_dir', required=True, help=\"the reference specfem directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the", "\"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per in depth_perturbation_list: sh.mkdir(\"-p\",", "sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # mv DATA", "depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\")", "= obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2] # there are some", "join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) # ln in", "sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir,", "cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\",", "get cmts names cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names", "cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir,", "cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\",", "\"DATA\")) # cp and ln files in DATA toln =", "join(main_dir, \"ref\")) # refine the structure in ref sh.rm(\"-rf\", join(main_dir,", "cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per", "\"*\")) cmt_names = [item.split(\"/\")[-1] for item in cmt_dirs] # mkdirs", "\"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\")) # mkdir DATA", "\"*\")) for cmt_file in cmt_names: event = obspy.read_events(cmt_file)[0] # gcmt_id", "sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) # refine the structure in ref", "# cp and ln files in DATA toln = [\"cemRequest\",", "\"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) # cp and ln files", "utils to upper level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\",", "depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list) if __name__ == \"__main__\": main()", "to upper level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"),", "ref to working dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name,", "in cmt_dirs] # mkdirs for cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\",", "depth relocation directory \"\"\" import obspy import sh import numpy", "import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\"))", "I'd like to read in the event again event_this_depth =", "\"cmts_generated\")) for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\"))", "f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join( main_dir, \"work\",", "\"work\", cmt_name, f\"d{depth_per}\")) # cp ref to working dirs sh.cp(\"-r\",", "\"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\",", "join from glob import glob import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir,", "main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list = [float(item) for item", "\"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get cmts names cmt_dirs", "depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # cp", "\"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in cmt_names:", "cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\",", "cmt_names = [item.split(\"/\")[-1] for item in cmt_dirs] # mkdirs for", "output_dir, depth_perturbation_list): # get cmts names cmt_dirs = glob(join(main_dir, \"cmts\",", "np import click from os.path import join from glob import", "event = obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2] # there are", "f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\",", "toln_work = [\"utils\"] for lnfile in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\",", "refine the structure in ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\",", "working directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): #", "generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\")) for cmt_file in cmt_names:", "to working dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\"))", "@click.option('--cmts_dir', required=True, help=\"the cmt solution directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the", "\"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\",", "OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for", "the structure in ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir,", "sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir,", "required=True, help=\"the output directory in scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the", "sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name in cmt_names:", "reference specfem directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the cmt solution directory\",", "join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\",", "sh import numpy as np import click from os.path import", "\"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\")) #", "always problem in copy event, so here I'd like to", "\"DATA\", lnfile)) # ln in work files toln_work = [\"utils\"]", "[float(item) for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir", "depth_perturbation): depth_perturbation_list = [float(item) for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir,", "from glob import glob import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list):", "the event again event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth = event.copy()", "DATA toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\",", "DATA and utils back to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\",", "# ln in work files toln_work = [\"utils\"] for lnfile", "\"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) # mv DATA and utils", "depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\",", "join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\",", "depth_perturbation_list = [float(item) for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir,", "# ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) # refine the structure", "glob import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir,", "depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) # refine the structure in", "directory in scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the reference specfem directory\",", "required=True, help=\"the depth perturbation, use somthing like -3,-1,5 (in km)\",", "\"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) #", "dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # mv", "there are some problems in changing names gcmt_id = cmt_file.split(\"/\")[-1]", "copy event, so here I'd like to read in the", "join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\",", "\"DATA\", \"Par_file\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir,", "earthquake depth relocation directory \"\"\" import obspy import sh import", "f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name,", "setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir", "for depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir,", "f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the", "ref_dir, cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\",", "depth perturbation, use somthing like -3,-1,5 (in km)\", type=str) def", "\"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list)", "sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) # mv", "use somthing like -3,-1,5 (in km)\", type=str) def main(main_dir, output_dir,", "\"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1] for item in cmt_dirs] #", "files toln_work = [\"utils\"] for lnfile in toln_work: sh.ln(\"-s\", join(main_dir,", "names cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1]", "f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): #", "cmt_name, f\"d{depth_per}\", lnfile)) # mkdir and ln DATABASE_MPI and OUTPUT_FILES", "help=\"the reference specfem directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the cmt solution", "like f\"{generated_cmts_dir}/d-3\" have already been created for depth_per in depth_perturbation_list:", "toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\",", "for lnfile in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join( main_dir,", "setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir) # ref", "sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) # mv DATA and utils to", "join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"),", "\"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir,", "gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): # main", "cmt_name, f\"d{depth_per}\", \"DATA\")) # cp and ln files in DATA", "ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\",", "obspy import sh import numpy as np import click from", "cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name,", "\"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join(", "depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # cp ref to", "import click from os.path import join from glob import glob", "utils back to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir,", "level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) #", "join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\",", "numpy as np import click from os.path import join from", "# there are always problem in copy event, so here", "ln files in DATA toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\",", "def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list = [float(item) for", "sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) # cmts", "f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join( main_dir, \"work\",", "f\"d{depth_per}\", gcmt_id) # there are always problem in copy event,", "f\"d{depth_per}\")) # cp ref to working dirs sh.cp(\"-r\", join(main_dir, \"ref\"),", "depth_per in depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there", "\"utils\")) # mkdir DATA in work directory for cmt_name in", "\"STATIONS\")) for lnfile in toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile),", "item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir,", "in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile in toln: sh.ln(\"-s\", join(main_dir, \"ref\",", "def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\")) for cmt_file", "main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) # cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\"))", "and ln files in DATA toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\",", "\"cmts_generated\", f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir,", "directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get", "[\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\",", "sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\",", "and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\"))", "to read in the event again event_this_depth = obspy.read_events(cmt_file)[0] #", "join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command()", "in ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\"))", "\"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the main working", "cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per in depth_perturbation_list:", "\"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\"))", "mkdirs for cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per", "\"ref\", \"utils\")) # mkdir DATA in work directory for cmt_name", "\"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name in cmt_names: for depth_per", "= [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\",", "copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\")) for", "in the event again event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth =", "generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\")", "been created for depth_per in depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\",", "\"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir,", "cmt_name)) for depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\"))", "import sh import numpy as np import click from os.path", "in DATA toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\",", "cmt_file.split(\"/\")[-1] # assume dirs like f\"{generated_cmts_dir}/d-3\" have already been created", "join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir,", "\"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\"))", "specfem directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the cmt solution directory\", type=str)", "directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the depth perturbation, use somthing like", "\"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile in toln: sh.ln(\"-s\",", "format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir)", "cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\",", "join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per in", "def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir) #", "depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) # cp", "\"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) # cmts sh.mkdir(\"-p\",", "working directory\", type=str) @click.option('--output_dir', required=True, help=\"the output directory in scratch\",", "-3,-1,5 (in km)\", type=str) def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation):", "are always problem in copy event, so here I'd like", "directory\", type=str) @click.option('--output_dir', required=True, help=\"the output directory in scratch\", type=str)", "mv DATA and utils to upper level sh.mv(join(main_dir, \"ref\", \"DATA\"),", "\"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the main working directory\", type=str) @click.option('--output_dir',", "type=str) @click.option('--cmts_dir', required=True, help=\"the cmt solution directory\", type=str) @click.option('--depth_perturbation', required=True,", "cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join( main_dir,", "files in DATA toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\",", "join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\", join(main_dir, \"work\"))", "toln = [\"cemRequest\", \"crust1.0\", \"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\",", "sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\", join(main_dir,", "\"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list) if", "event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth = event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per", "work files toln_work = [\"utils\"] for lnfile in toln_work: sh.ln(\"-s\",", "\"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # mv DATA and utils", "\"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) # cp and ln files in", "# assume dirs like f\"{generated_cmts_dir}/d-3\" have already been created for", "output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name in", "so here I'd like to read in the event again", "there are always problem in copy event, so here I'd", "import glob import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names =", "join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile)) # mkdir and ln", "\"\"\" import obspy import sh import numpy as np import", "in depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there are", "main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile in toln:", "= join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir,", "f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name,", "join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir,", "\"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join(", "upper level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir)", "\"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir,", "glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1] for item in", "required=True, help=\"the reference specfem directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the cmt", "= [\"utils\"] for lnfile in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile),", "\"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\",", "cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True,", "f\"d{depth_per}\", lnfile)) # mkdir and ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\",", "cmts names cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names =", "cmt_dirs] # mkdirs for cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name))", "\"ref\", \"DATA\", \"STATIONS\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\"))", "sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for", "for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\",", "depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name,", "cmt_name, f\"d{depth_per}\")) # cp ref to working dirs sh.cp(\"-r\", join(main_dir,", "= event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id)", "\"work\", cmt_name, f\"d{depth_per}\")) # mv DATA and utils back to", "names gcmt_id = cmt_file.split(\"/\")[-1] # assume dirs like f\"{generated_cmts_dir}/d-3\" have", "event.resource_id.id.split(\"/\")[-2] # there are some problems in changing names gcmt_id", "help=\"the depth perturbation, use somthing like -3,-1,5 (in km)\", type=str)", "for cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per in", "main_dir) # ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) # refine the", "# sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # cp ref to working", "cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile in toln: sh.ln(\"-s\", join(main_dir,", "setup earthquake depth relocation directory \"\"\" import obspy import sh", "in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name,", "= obspy.read_events(cmt_file)[0] # event_this_depth = event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per #", "in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name,", "depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir,", "\"tests\")) # mv DATA and utils to upper level sh.mv(join(main_dir,", "in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working directory", "type=str) @click.option('--depth_perturbation', required=True, help=\"the depth perturbation, use somthing like -3,-1,5", "generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list):", "# there are some problems in changing names gcmt_id =", "in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\",", "join(main_dir, \"ref\", \"utils\")) # mkdir DATA in work directory for", "# gcmt_id = event.resource_id.id.split(\"/\")[-2] # there are some problems in", "join(main_dir, \"ref\", \"DATA\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\",", "cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join( main_dir,", "mv DATA and utils back to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir,", "\"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"]", "\"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\",", "cmt_names: event = obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2] # there", "lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) # ln", "scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the reference specfem directory\", type=str) @click.option('--cmts_dir',", "# refine the structure in ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\"))", "have already been created for depth_per in depth_perturbation_list: generated_name =", "ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list = [float(item) for item in depth_perturbation.split(\",\")]", "# cp ref to working dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir,", "\"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for", "output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list = [float(item) for item in", "\"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir,", "obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2] # there are some problems", "and ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\"))", "structure in ref sh.rm(\"-rf\", join(main_dir, \"ref\", \"DATABASES_MPI\")) sh.rm(\"-rf\", join(main_dir, \"ref\",", "mkdir and ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir,", "are some problems in changing names gcmt_id = cmt_file.split(\"/\")[-1] #", "glob import glob import copy def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names", "for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\",", "in toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile), join( main_dir, \"work\",", "sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"))", "\"ref\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile)) # mkdir", "for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\",", "already been created for depth_per in depth_perturbation_list: generated_name = join(generated_cmts_dir,", "\"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\",", "f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name,", "output directory in scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the reference specfem", "main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"),", "in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join( main_dir, \"work\", cmt_name,", "sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) # refine", "\"DATA\"), main_dir) sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) # cmts sh.mkdir(\"-p\", join(main_dir,", "join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\",", "depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\",", "join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\",", "depth_perturbation_list): # get cmts names cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\",", "\"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\",", "directory \"\"\" import obspy import sh import numpy as np", "problem in copy event, so here I'd like to read", "join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name in cmt_names: for", "sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # cp ref to working dirs", "import numpy as np import click from os.path import join", "= glob(join(cmts_dir, \"*\")) for cmt_file in cmt_names: event = obspy.read_events(cmt_file)[0]", "assume dirs like f\"{generated_cmts_dir}/d-3\" have already been created for depth_per", "= [item.split(\"/\")[-1] for item in cmt_dirs] # mkdirs for cmt_name", "event again event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth = event.copy() event_this_depth.origins[0].depth", "@click.option('--output_dir', required=True, help=\"the output directory in scratch\", type=str) @click.option('--ref_dir', required=True,", "\"utils\"), join(main_dir, \"ref\", \"utils\")) # mkdir DATA in work directory", "join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there are always problem in copy", "type=str) def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list = [float(item)", "click from os.path import join from glob import glob import", "for depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) #", "back to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"),", "os.path import join from glob import glob import copy def", "sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get cmts", "\"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in cmt_names: for depth_per", "sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\"))", "solution directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the depth perturbation, use somthing", "# mkdir and ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\",", "event, so here I'd like to read in the event", "help=\"the output directory in scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the reference", "main working directory\", type=str) @click.option('--output_dir', required=True, help=\"the output directory in", "\"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def", "\"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in cmt_names: for depth_per in depth_perturbation_list:", "\"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\",", "[\"utils\"] for lnfile in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join(", "depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there are always", "cmt_file in cmt_names: event = obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2]", "f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir,", "\"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\",", "event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name,", "join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # mv DATA and utils back", "main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) # ln in work", "lnfile in toln_work: sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join( main_dir, \"work\",", "item in cmt_dirs] # mkdirs for cmt_name in cmt_names: sh.mkdir(join(main_dir,", "depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\", ref_dir, join(main_dir,", "lnfile in toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile), join( main_dir,", "sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\")) # mkdir DATA in work", "ln in work files toln_work = [\"utils\"] for lnfile in", "sh.ln(\"-s\", join(main_dir, \"ref\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile))", "cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\",", "f\"d{depth_per}\", \"DATA\", lnfile)) # ln in work files toln_work =", "\"work\", cmt_name, f\"d{depth_per}\", lnfile)) # mkdir and ln DATABASE_MPI and", "@click.option('--main_dir', required=True, help=\"the main working directory\", type=str) @click.option('--output_dir', required=True, help=\"the", "\"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in", "join(main_dir, \"ref\", \"tests\")) # mv DATA and utils to upper", "= glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1] for item", "\"ref\")) # refine the structure in ref sh.rm(\"-rf\", join(main_dir, \"ref\",", "working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list) if __name__ ==", "\"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\")) # mkdir DATA in", "f\"d{depth_per}\")) # mv DATA and utils back to ref sh.mv(join(main_dir,", "cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the main working directory\",", "\"ref\", \"DATA\", \"Par_file\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\"))", "DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir,", "for item in cmt_dirs] # mkdirs for cmt_name in cmt_names:", "cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"))", "lnfile)) # mkdir and ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir)", "= join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files(", "working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts(", "\"cmts\", \"cmts_raw\")) sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per in depth_perturbation_list:", "join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # mv DATA and", "\"Zhao_JP_model\"] for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.cp(join(main_dir,", "join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir,", "# event_this_depth = event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name, generated_cmts_dir,", "cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\"))", "gcmt_id = cmt_file.split(\"/\")[-1] # assume dirs like f\"{generated_cmts_dir}/d-3\" have already", "\"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in cmt_names: for", "and utils to upper level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir) sh.mv(join(main_dir,", "join(output_dir, \"OUTPUT_FILES\")) for cmt_name in cmt_names: for depth_per in depth_perturbation_list:", "sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\",", "\"ref\", \"DATA\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile))", "\"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir,", "for cmt_file in cmt_names: event = obspy.read_events(cmt_file)[0] # gcmt_id =", "somthing like -3,-1,5 (in km)\", type=str) def main(main_dir, output_dir, ref_dir,", "problems in changing names gcmt_id = cmt_file.split(\"/\")[-1] # assume dirs", "generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there are always problem", "like to read in the event again event_this_depth = obspy.read_events(cmt_file)[0]", "type=str) @click.option('--ref_dir', required=True, help=\"the reference specfem directory\", type=str) @click.option('--cmts_dir', required=True,", "event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\",", "= [float(item) for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list)", "working dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) #", "as np import click from os.path import join from glob", "\"work\", cmt_name)) for depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name,", "in work directory for cmt_name in cmt_names: for depth_per in", "sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\"))", "1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir,", "\"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\",", "<reponame>ziyixi/SeisScripts \"\"\" setup earthquake depth relocation directory \"\"\" import obspy", "f\"d{depth_per}\", \"DATA\")) # cp and ln files in DATA toln", "in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) # cp and", "toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile), join( main_dir, \"work\", cmt_name,", "\"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "import join from glob import glob import copy def generate_new_cmtsolution_files(cmts_dir,", "\"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name in cmt_names: for depth_per in", "generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list) if __name__", "generated_cmts_dir, depth_perturbation_list) setup_structure_after_generat_cmts( main_dir, output_dir, depth_perturbation_list) if __name__ == \"__main__\":", "\"utils\"), main_dir) # cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir,", "join(main_dir, \"ref\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile)) #", "sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"),", "ln DATABASE_MPI and OUTPUT_FILES sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\",", "cp ref to working dirs sh.cp(\"-r\", join(main_dir, \"ref\"), join(main_dir, \"work\",", "f\"{generated_cmts_dir}/d-3\" have already been created for depth_per in depth_perturbation_list: generated_name", "sh.mkdir(\"-p\", output_dir) sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name", "main sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\")) #", "to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir,", "join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\",", "sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\")) for cmt_name in cmt_names: for depth_per in", "type=str) @click.option('--output_dir', required=True, help=\"the output directory in scratch\", type=str) @click.option('--ref_dir',", "# main sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\", ref_dir, join(main_dir, \"ref\"))", "f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the main working directory\", type=str)", "\"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\"))", "\"ref\", \"utils\"), main_dir) # cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir,", "for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir,", "km)\", type=str) def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list =", "f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get cmts names cmt_dirs =", "lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile)) # mkdir and", "help=\"the main working directory\", type=str) @click.option('--output_dir', required=True, help=\"the output directory", "for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) #", "in changing names gcmt_id = cmt_file.split(\"/\")[-1] # assume dirs like", "ref_dir, join(main_dir, \"ref\")) # refine the structure in ref sh.rm(\"-rf\",", "\"\"\" setup earthquake depth relocation directory \"\"\" import obspy import", "main_dir, \"work\", cmt_name, f\"d{depth_per}\", lnfile)) # mkdir and ln DATABASE_MPI", "created for depth_per in depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id)", "\"Par_file\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\",", "cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) # ln in work files toln_work", "required=True, help=\"the cmt solution directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the depth", "and utils back to ref sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\"))", "perturbation, use somthing like -3,-1,5 (in km)\", type=str) def main(main_dir,", "cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name,", "[item.split(\"/\")[-1] for item in cmt_dirs] # mkdirs for cmt_name in", "\"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir',", "@click.option('--depth_perturbation', required=True, help=\"the depth perturbation, use somthing like -3,-1,5 (in", "import obspy import sh import numpy as np import click", "here I'd like to read in the event again event_this_depth", "(in km)\", type=str) def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation): depth_perturbation_list", "cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\",", "= cmt_file.split(\"/\")[-1] # assume dirs like f\"{generated_cmts_dir}/d-3\" have already been", "ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir = join(main_dir, \"cmts\", \"cmts_generated\") working_cmts_dir =", "# mv DATA and utils to upper level sh.mv(join(main_dir, \"ref\",", "cmts_dir, depth_perturbation): depth_perturbation_list = [float(item) for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir,", "\"cmts\", \"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1] for item in cmt_dirs]", "generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\")) for cmt_file in", "\"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"CMTSOLUTION\"))", "for item in depth_perturbation.split(\",\")] setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list) generated_cmts_dir =", "read in the event again event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth", "\"DATA\", \"Par_file\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join( main_dir, \"work\", cmt_name,", "join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get cmts names", "cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\")) cmt_names = [item.split(\"/\")[-1] for", "in depth_perturbation_list: sh.mkdir(\"-p\", join(output_dir, \"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\",", "in cmt_names: event = obspy.read_events(cmt_file)[0] # gcmt_id = event.resource_id.id.split(\"/\")[-2] #", "for depth_per in depth_perturbation_list: generated_name = join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) #", "sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\",", "# cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\", \"cmts_raw\"))", "join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"OUTPUT_FILES\")) @click.command() @click.option('--main_dir', required=True, help=\"the main", "join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\"),", "# mkdirs for cmt_name in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for", "in copy event, so here I'd like to read in", "\"crust2.0\", \"crustmap\", \"epcrust\", \"eucrust-07\", \"GLL\", \"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\",", "cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per in depth_perturbation_list: # sh.mkdir(join(main_dir,", "\"DATABASES_MPI\", cmt_name, f\"d{depth_per}\")) sh.mkdir(\"-p\", join(output_dir, \"OUTPUT_FILES\", cmt_name, f\"d{depth_per}\")) sh.ln(\"-s\", join(output_dir,", "sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir,", "\"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\", \"topo_bathy\", \"Zhao_JP_model\"] for cmt_name", "in depth_perturbation_list: # sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\")) # cp ref", "in cmt_names: for depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\",", "# mkdir DATA in work directory for cmt_name in cmt_names:", "cp and ln files in DATA toln = [\"cemRequest\", \"crust1.0\",", "from os.path import join from glob import glob import copy", "cmt_names: for depth_per in depth_perturbation_list: sh.cp(join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\", cmt_name),", "join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile in", "obspy.read_events(cmt_file)[0] # event_this_depth = event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name,", "cmt_name, f\"d{depth_per}\")) # mv DATA and utils back to ref", "in scratch\", type=str) @click.option('--ref_dir', required=True, help=\"the reference specfem directory\", type=str)", "like -3,-1,5 (in km)\", type=str) def main(main_dir, output_dir, ref_dir, cmts_dir,", "main_dir) # cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\", cmts_dir, join(main_dir, \"cmts\",", "cmts_dir, depth_perturbation_list): # main sh.mkdir(\"-p\", main_dir) # ref sh.cp(\"-r\", ref_dir,", "\"OUTPUT_FILES\")) for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(\"-p\",", "DATA in work directory for cmt_name in cmt_names: for depth_per", "\"DATA\", \"CMTSOLUTION\")) sh.cp(join(main_dir, \"ref\", \"DATA\", \"Par_file\"), join( main_dir, \"work\", cmt_name,", "\"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) # ln in work files", "sh.rm(\"-rf\", join(main_dir, \"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir,", "\"heterogen\", \"Lebedev_sea99\", \"Montagner_model\", \"old\", \"PPM\", \"QRFSI12\", \"s20rts\", \"s362ani\", \"s40rts\", \"Simons_model\",", "relocation directory \"\"\" import obspy import sh import numpy as", "sh.cp(join(main_dir, \"ref\", \"DATA\", \"STATIONS\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\",", "cmt_name, f\"d{depth_per}\"), join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATABASES_MPI\")) sh.ln(\"-s\", join(output_dir, \"OUTPUT_FILES\",", "print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir, cmts_dir,", "glob(join(cmts_dir, \"*\")) for cmt_file in cmt_names: event = obspy.read_events(cmt_file)[0] #", "depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working directory sh.mkdir(\"-p\",", "for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) #", "sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\",", "= join(generated_cmts_dir, f\"d{depth_per}\", gcmt_id) # there are always problem in", "\"ref\", \"EXAMPLES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"OUTPUT_FILES\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"doc\"))", "# print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def setup_basic_structure(main_dir, ref_dir,", "for lnfile in toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\", lnfile), join(", "in cmt_names: sh.mkdir(join(main_dir, \"work\", cmt_name)) for depth_per in depth_perturbation_list: #", "sh.mv(join(main_dir, \"DATA\"), join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\"))", "some problems in changing names gcmt_id = cmt_file.split(\"/\")[-1] # assume", "cmt_names = glob(join(cmts_dir, \"*\")) for cmt_file in cmt_names: event =", "lnfile)) # ln in work files toln_work = [\"utils\"] for", "directory for cmt_name in cmt_names: for depth_per in depth_perturbation_list: sh.mkdir(join(main_dir,", "depth_perturbation_list: sh.mkdir(join(main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\")) # cp and ln", "\"STATIONS\"), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", \"STATIONS\")) for lnfile", "work directory for cmt_name in cmt_names: for depth_per in depth_perturbation_list:", "\"DATA\", lnfile), join( main_dir, \"work\", cmt_name, f\"d{depth_per}\", \"DATA\", lnfile)) #", "\"cmts\", \"cmts_generated\")) for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\",", "gcmt_id) # there are always problem in copy event, so", "mkdir DATA in work directory for cmt_name in cmt_names: for", "help=\"the cmt solution directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the depth perturbation,", "= event.resource_id.id.split(\"/\")[-2] # there are some problems in changing names", "again event_this_depth = obspy.read_events(cmt_file)[0] # event_this_depth = event.copy() event_this_depth.origins[0].depth +=", "event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\")", "in work files toln_work = [\"utils\"] for lnfile in toln_work:", "depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\", \"cmts_generated\", f\"d{depth_per}\")) # working", "depth_perturbation_list): cmt_names = glob(join(cmts_dir, \"*\")) for cmt_file in cmt_names: event", "cmt solution directory\", type=str) @click.option('--depth_perturbation', required=True, help=\"the depth perturbation, use", "join(main_dir, \"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) # mv DATA", "# working directory sh.mkdir(\"-p\", join(main_dir, \"work\")) def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list):", "# get cmts names cmt_dirs = glob(join(main_dir, \"cmts\", \"cmts_raw\", \"*\"))", "DATA and utils to upper level sh.mv(join(main_dir, \"ref\", \"DATA\"), main_dir)", "\"DATA\", \"STATIONS\")) for lnfile in toln: sh.ln(\"-s\", join(main_dir, \"ref\", \"DATA\",", "gcmt_id = event.resource_id.id.split(\"/\")[-2] # there are some problems in changing", "dirs like f\"{generated_cmts_dir}/d-3\" have already been created for depth_per in", "\"cmts_generated\") working_cmts_dir = join(main_dir, \"cmts\", \"cmts_raw\") generate_new_cmtsolution_files( working_cmts_dir, generated_cmts_dir, depth_perturbation_list)", "join(main_dir, \"ref\", \"DATA\")) sh.mv(join(main_dir, \"utils\"), join(main_dir, \"ref\", \"utils\")) # mkdir", "@click.command() @click.option('--main_dir', required=True, help=\"the main working directory\", type=str) @click.option('--output_dir', required=True,", "required=True, help=\"the main working directory\", type=str) @click.option('--output_dir', required=True, help=\"the output", "\"ref\", \"doc\")) sh.rm(\"-rf\", join(main_dir, \"ref\", \"tests\")) # mv DATA and", "join(main_dir, \"cmts\", \"cmts_generated\")) for depth_per in depth_perturbation_list: sh.mkdir(\"-p\", join(main_dir, \"cmts\",", "sh.mv(join(main_dir, \"ref\", \"utils\"), main_dir) # cmts sh.mkdir(\"-p\", join(main_dir, \"cmts\")) sh.cp(\"-r\",", "setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list): # get cmts names cmt_dirs = glob(join(main_dir,", "directory\", type=str) @click.option('--cmts_dir', required=True, help=\"the cmt solution directory\", type=str) @click.option('--depth_perturbation',", "changing names gcmt_id = cmt_file.split(\"/\")[-1] # assume dirs like f\"{generated_cmts_dir}/d-3\"", "\"ref\", \"tests\")) # mv DATA and utils to upper level", "# mv DATA and utils back to ref sh.mv(join(main_dir, \"DATA\"),", "+= 1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\", gcmt_id) event_this_depth.write(generated_name, format=\"CMTSOLUTION\") def", "event_this_depth = event.copy() event_this_depth.origins[0].depth += 1000.0*depth_per # print(generated_name, generated_cmts_dir, f\"d{depth_per}\"," ]
[ "[np.inf, -np.inf])) @staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type):", "unit = _Unit class _Ignore(object): \"\"\"Ignore type used for schemas", "== np.inf or result == -np.inf: # this is 5x", "value to convert by casting to_type : type valid data", "value, which can be one of many types \"\"\" if", "2.0 (the \"License\"); # you may not use this file", "float: float64, int: int32, long: int64, str: unicode, #list: vector,", "Data Types \"\"\" # TODO - consider server providing types,", "dateutil.parser as datetime_parser # Chose python's datetime over numpy.datetime64 because", "used when type is indeterminate\"\"\" pass unknown = _Unknown #", "handling for missing values return None elif _DataTypes.is_primitive_type(to_type) and type(value)", "result == -np.inf: # this is 5x faster than calling", "-------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if", "numpy.datetime64 because of time zone support and string serialization #", "} # build reverse map string -> type _primitive_str_to_type_table =", "numpy thing, so that vectors of size 1 will still", "data): return [_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda", "elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization return value", "_primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases @staticmethod def", "from datetime import datetime import dateutil.parser as datetime_parser # Chose", "= np.float32 float64 = np.float64 int32 = np.int32 int64 =", "server providing types, similar to commands __all__ = ['valid_data_types', 'ignore',", "def value_is_string(value): \"\"\"get bool indication that value is a string,", "valid_data_type = _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except KeyError: # complex", "data_type.is_complex_type except AttributeError: return False @staticmethod def is_primitive_alias_type(data_type): return data_type", "to the given type. None is always returned as None", "return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases @staticmethod def value_is_string(value):", "type represented by the string Examples -------- >>> valid_data_types.get_from_string('unicode') unicode", "float32: return get_float_constructor(to_type) if to_type == datetime: return datetime_constructor def", "cast Returns ------- results : object the value cast to", "is None or (type(value) in [float32, float64, float] and (np.isnan(value)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Intel Corporation # # Licensed under the Apache License, Version", "or unicode\"\"\" return isinstance(value, basestring) @staticmethod def value_is_missing_value(value): return value", "TODO float32: \"float32\", float64: \"float64\", int32: \"int32\", int64: \"int64\", #list:", "def float_constructor(value): result = ft(value) if np.isnan(result) or result ==", "to_type(value) return constructor @staticmethod def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for", "for key, value in obj.items()]) if isinstance(obj, list): return [numpy_to_bson_friendly(item)", "= json.loads(value) except: value = [np.float64(item.strip()) for item in value.split(',')", "-------- >>> valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5, str) '4.5' >>>", "for schemas during file import\"\"\" pass unit = _Unit class", "TODO float32: 0.0, float64: 0.0, int32: 0, int64: 0, unicode:", "\"\"\" trusted_analytics definitions for Data Types \"\"\" # TODO -", ">>> valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None,", "to type %s\\n\" % to_type) + str(e)) @staticmethod def datetime_from_iso(iso_string):", "the constructor for the to_type\"\"\" try: return to_type.constructor except AttributeError:", "can create numpy objects from x using: numpy.datatime64(x.isoformat()) class _Vector(object):", "_DataTypes.get_constructor(to_type) result = constructor(value) return None if _DataTypes.value_is_missing_value(result) else result", "except ValueError: return False def __repr__(self): aliases = \"\\n(and aliases:", "special constructor for floating point types which handles nan, inf,", "-------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str] except KeyError:", "= _Unit class _Ignore(object): \"\"\"Ignore type used for schemas during", "type used when type is indeterminate\"\"\" pass unknown = _Unknown", "t) for t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float:", "[\"vector(n)\"])) + aliases @staticmethod def value_is_string(value): \"\"\"get bool indication that", "def datetime_constructor(value): \"\"\"Creates special constructor for datetime parsing\"\"\" if valid_data_types.value_is_string(value):", "# http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be, UDFs can create numpy", "numpy.datatime64(x.isoformat()) class _Vector(object): base_type = np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def", "_Unit(object): \"\"\"Ignore type used for schemas during file import\"\"\" pass", "can be one of many types \"\"\" if value is", "vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in", "_DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except KeyError: # complex data types", "_DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an object and convert it to", "'datetime'] import numpy as np import json import re #", "!= length: raise ValueError(\"Could not construct vector in Python Client.", "use this file except in compliance with the License. #", "in Python Client. Expected vector of length %s, but received", "'int32', 'int64', 'vector', 'unit', 'datetime'] import numpy as np import", "s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float: float64, int: int32,", "vector = _Vector class _Unit(object): \"\"\"Ignore type used for schemas", "= { #bool: False, TODO float32: 0.0, float64: 0.0, int32:", "Provides functions with define and operate on supported data types.", "# complex data types should use their repr return repr(valid_data_type)", "np.float32 float64 = np.float64 int32 = np.int32 int64 = np.int64", "is a string, whether str or unicode\"\"\" return isinstance(value, basestring)", "that can be serialized to bson if neccessary.\"\"\" if isinstance(obj,", "[np.float64(item.strip()) for item in value.split(',') if item] array = np.array(value,", "if isinstance(obj, float32) or isinstance(obj, float64): return float(obj) if isinstance(obj,", "object from ISO 8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types = _DataTypes()", "given type (often it will return the same type) Parameters", "def __init__(self, length): self.length = int(length) self.is_complex_type = True self.constructor", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "an object and convert it to a type that can", "be, UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat())", "License. # You may obtain a copy of the License", "@staticmethod def validate_data(schema, data): return [_DataTypes.cast(value, data_type) for value, data_type", "standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for name, t in schema] @staticmethod", "or value in [np.inf, -np.inf])) @staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys()", "return True except ValueError: return False def __repr__(self): aliases =", "valid data type or type that may be aliased for", "valid_data_types.cast(np.inf, float32) None \"\"\" if _DataTypes.value_is_missing_value(value): # Special handling for", "file import\"\"\" pass ignore = _Ignore class _Unknown(object): \"\"\"Unknown type", "= dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table =", "_primitive_str_to_type_table[data_type_str] except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return", "providing types, similar to commands __all__ = ['valid_data_types', 'ignore', 'unknown',", "------- result : type type represented by the string Examples", "under the License is distributed on an \"AS IS\" BASIS,", "value is None: return None try: # first try numpy's", "License for the specific language governing permissions and # limitations", "received length %d\" % (length, len(array))) return array return constructor", "return value is None or (type(value) in [float32, float64, float]", "def get_from_type(data_type): \"\"\" Returns the data type for the given", "None: return None try: # first try numpy's constructor array", "float64 = np.float64 int32 = np.int32 int64 = np.int64 from", "\"\"\"Unknown type used when type is indeterminate\"\"\" pass unknown =", "map string -> type _primitive_str_to_type_table = dict([(s, t) for t,", "---------- data_type : type valid data type or type that", "which handles nan, inf, -inf\"\"\" ft = float_type def float_constructor(value):", "Parameters ---------- value : object value to convert by casting", "a valid data type; if invalid, a ValueError is raised", "False, TODO float32: 0.0, float64: 0.0, int32: 0, int64: 0,", "types float32 = np.float32 float64 = np.float64 int32 = np.int32", "python's datetime over numpy.datetime64 because of time zone support and", "return None return to_type(value) return constructor @staticmethod def standardize_schema(schema): return", "TODO #dict: \"dict\", TODO float32: \"float32\", float64: \"float64\", int32: \"int32\",", "Here's a long thread discussing numpy's datetime64 timezone problem: #", "@staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\" Returns", "values return None elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: #", "- consider server providing types, similar to commands __all__ =", "the base class default method raise the TypeError return obj", "bool indication that value is a string, whether str or", "validate_data(schema, data): return [_DataTypes.cast(value, data_type) for value, data_type in zip(data,", "for given type Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if", "int32: \"int32\", int64: \"int64\", #list: \"list\", TODO unicode: \"unicode\", ignore:", "it will return the same type) Parameters ---------- data_type :", "need be, UDFs can create numpy objects from x using:", "def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\" % self.length", "unicode: \"\", #datetime: \"datetime\", } def get_float_constructor(float_type): \"\"\"Creates special constructor", "for floating point types which handles nan, inf, -inf\"\"\" ft", "AttributeError: if to_type == float64 or to_type == float32: return", "[(name, _DataTypes.get_from_type(t)) for name, t in schema] @staticmethod def validate_data(schema,", "value_is_missing_value(value): return value is None or (type(value) in [float32, float64,", ": object value to convert by casting to_type : type", "Types \"\"\" # TODO - consider server providing types, similar", "_Ignore(object): \"\"\"Ignore type used for schemas during file import\"\"\" pass", "long: int64, str: unicode, #list: vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__,", "than calling np.isfinite() return None return ft(value) return float_constructor def", "by casting to_type : type valid data type to use", "ValueError is raised Returns ------- result : str string representation", ">>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type)", "dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = {", "limitations under the License. # \"\"\" trusted_analytics definitions for Data", "# also support json or comma-sep string if valid_data_types.value_is_string(value): try:", "in compliance with the License. # You may obtain a", "#list: \"list\", TODO unicode: \"unicode\", ignore: \"ignore\", datetime: \"datetime\", }", "in schema] @staticmethod def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except KeyError:", "constructor(value) return None if _DataTypes.value_is_missing_value(result) else result except Exception as", "Examples -------- >>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try:", "np.float64 int32 = np.int32 int64 = np.int64 from datetime import", "software # distributed under the License is distributed on an", "aliases: %s)\" % (\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for alias,", "zip(data, map(lambda t: t[1], schema))] @staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type)", "return None try: # first try numpy's constructor array =", "def constructor(value): \"\"\" Creates a numpy array from a value,", "self.length vector = _Vector class _Unit(object): \"\"\"Ignore type used for", "None: return None return to_type(value) return constructor @staticmethod def standardize_schema(schema):", "== vector: return [] if data_type == datetime: return datetime.now()", "-np.inf])) @staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\"", "length %s, but received length %d\" % (length, len(array))) return", "ValueError(\"Could not construct vector in Python Client. Expected vector of", "try: value = json.loads(value) except: value = [np.float64(item.strip()) for item", "else: try: return datetime(*value) except: raise TypeError(\"cannot convert type to", "is not a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets", "dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = {", "or result == np.inf or result == -np.inf: # this", "if neccessary.\"\"\" if isinstance(obj, float32) or isinstance(obj, float64): return float(obj)", "\"\"\" # TODO - consider server providing types, similar to", "def is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\"", "= self.length def constructor(value): \"\"\" Creates a numpy array from", "def constructor(value): if value is None: return None return to_type(value)", "_Vector(object): base_type = np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length):", "_primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\" Returns the data type for", "vector.base_type): return obj.tolist() if isinstance(obj, datetime): return obj.isoformat() if isinstance(obj,", "Parameters ---------- data_type : type valid data type; if invalid,", "# Optimization return value try: constructor = _DataTypes.get_constructor(to_type) result =", "except KeyError: # complex data types should use their repr", "isinstance(obj, list): return [numpy_to_bson_friendly(item) for item in obj] # Let", "Chose python's datetime over numpy.datetime64 because of time zone support", "_primitive_type_to_default_value[data_type] except KeyError: if data_type == vector: return [] if", "special constructor for datetime parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else:", "if invalid, a ValueError is raised Returns ------- result :", "return data_type raise ValueError(\"Unsupported type %s\" % data_type) @staticmethod def", "if valid_data_types.value_is_string(value): try: value = json.loads(value) except: value = [np.float64(item.strip())", "type to use for the cast Returns ------- results :", "return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()]) if isinstance(obj,", "'unit', 'datetime'] import numpy as np import json import re", "\"float64\", int32: \"int32\", int64: \"int64\", #list: \"list\", TODO unicode: \"unicode\",", "length if len(array) != length: raise ValueError(\"Could not construct vector", "\"\", #datetime: \"datetime\", } def get_float_constructor(float_type): \"\"\"Creates special constructor for", "a type that can be serialized to bson if neccessary.\"\"\"", "bson if neccessary.\"\"\" if isinstance(obj, float32) or isinstance(obj, float64): return", "value.split(',') if item] array = np.array(value, dtype=np.float64) # ensures the", "= _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except KeyError: # complex data", "return int(obj) if isinstance(obj, vector.base_type): return obj.tolist() if isinstance(obj, datetime):", ">>> valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf, float32) None \"\"\" if", "type string representation Parameters ---------- data_type_str : str valid data", "+ str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime object from ISO", "for alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"]))", "%s, but received length %d\" % (length, len(array))) return array", "return [] if data_type == datetime: return datetime.now() raise ValueError(\"Unable", "return to_type.constructor except AttributeError: if to_type == float64 or to_type", "None \"\"\" if _DataTypes.value_is_missing_value(value): # Special handling for missing values", "Let the base class default method raise the TypeError return", "If need be, UDFs can create numpy objects from x", "and # limitations under the License. # \"\"\" trusted_analytics definitions", "map types to their string identifier _primitive_type_to_str_table = { #bool:", "datetime_parser # Chose python's datetime over numpy.datetime64 because of time", "value is a string, whether str or unicode\"\"\" return isinstance(value,", "@staticmethod def is_primitive_type(data_type): return data_type in _primitive_type_to_str_table or data_type in", "= np.int64 from datetime import datetime import dateutil.parser as datetime_parser", "results : object the value cast to the to_type Examples", "valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None, str)", "Parameters ---------- data_type : type valid data type or type", "---------- data_type_str : str valid data type str; if invalid,", "isinstance(obj, int32): return int(obj) if isinstance(obj, vector.base_type): return obj.tolist() if", "# TODO - consider server providing types, similar to commands", "support json or comma-sep string if valid_data_types.value_is_string(value): try: value =", "similar to commands __all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64',", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "base_type = np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length", "else: raise array = np.atleast_1d(array) # numpy thing, so that", "try: constructor = _DataTypes.get_constructor(to_type) result = constructor(value) return None if", "class _Ignore(object): \"\"\"Ignore type used for schemas during file import\"\"\"", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "# limitations under the License. # \"\"\" trusted_analytics definitions for", "np import json import re # alias numpy types float32", "array is entirely made of doubles else: raise array =", "time zone support and string serialization # Here's a long", "except: # also support json or comma-sep string if valid_data_types.value_is_string(value):", "# Copyright (c) 2015 Intel Corporation # # Licensed under", "\"float32\", float64: \"float64\", int32: \"int32\", int64: \"int64\", #list: \"list\", TODO", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "type used for schemas during file import\"\"\" pass unit =", "is indeterminate\"\"\" pass unknown = _Unknown # map types to", "str: unicode, #list: vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__, t) for", "#list: vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t", "to in writing, software # distributed under the License is", "False @staticmethod def is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table @staticmethod def", "type Parameters ---------- data_type : type valid data type; if", "# See the License for the specific language governing permissions", "repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\" Returns the data type for", "to_type: # Optimization return value try: constructor = _DataTypes.get_constructor(to_type) result", "type %s\\n\" % to_type) + str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create", ": type type represented by the string Examples -------- >>>", "but received length %d\" % (length, len(array))) return array return", "definitions for Data Types \"\"\" # TODO - consider server", "= np.float64 int32 = np.int32 int64 = np.int64 from datetime", "numpy_to_bson_friendly(obj): \"\"\"take an object and convert it to a type", "# Let the base class default method raise the TypeError", "language governing permissions and # limitations under the License. #", "valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try: return datetime(*value) except: raise TypeError(\"cannot", "or agreed to in writing, software # distributed under the", "value_is_string(value): \"\"\"get bool indication that value is a string, whether", "'vector', 'unit', 'datetime'] import numpy as np import json import", "unknown = _Unknown # map types to their string identifier", "float_type def float_constructor(value): result = ft(value) if np.isnan(result) or result", "required by applicable law or agreed to in writing, software", "from a value, which can be one of many types", "as datetime_parser # Chose python's datetime over numpy.datetime64 because of", "t[1], schema))] @staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name, data_type", "[numpy_to_bson_friendly(item) for item in obj] # Let the base class", "import\"\"\" pass unit = _Unit class _Ignore(object): \"\"\"Ignore type used", "point types which handles nan, inf, -inf\"\"\" ft = float_type", "# this is 5x faster than calling np.isfinite() return None", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# build reverse map string -> type _primitive_str_to_type_table = dict([(s,", "int32, long: int64, str: unicode, #list: vector, } _primitive_alias_str_to_type_table =", "find default value for data type %s (invalid data type)\"", "item in obj] # Let the base class default method", "with the License. # You may obtain a copy of", "used for schemas during file import\"\"\" pass ignore = _Ignore", "string serialization # Here's a long thread discussing numpy's datetime64", "get_float_constructor(float_type): \"\"\"Creates special constructor for floating point types which handles", "if isinstance(obj, int32): return int(obj) if isinstance(obj, vector.base_type): return obj.tolist()", "permissions and # limitations under the License. # \"\"\" trusted_analytics", "float64 or to_type == float32: return get_float_constructor(to_type) if to_type ==", "representation Parameters ---------- data_type_str : str valid data type str;", "return value try: constructor = _DataTypes.get_constructor(to_type) result = constructor(value) return", "np.atleast_1d(array) # numpy thing, so that vectors of size 1", "t in schema] @staticmethod def validate_data(schema, data): return [_DataTypes.cast(value, data_type)", "return datetime_constructor def constructor(value): if value is None: return None", "@staticmethod def validate(data_type): \"\"\"Raises a ValueError if data_type is not", "Parameters ---------- data_type_str : str valid data type str; if", "in obj] # Let the base class default method raise", "data type or type that may be aliased for a", "\"\"\"get bool indication that value is a string, whether str", "or _DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported type %s\" % data_type)", "data type for the given type string representation Parameters ----------", "valid_data_types.get_from_string('unicode') unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str] except KeyError: try: return", "_DataTypes.get_from_type(t)) for name, t in schema] @staticmethod def validate_data(schema, data):", "compliance with the License. # You may obtain a copy", "% data_type) @staticmethod def cast(value, to_type): \"\"\" Returns the given", "agreed to in writing, software # distributed under the License", "None return to_type(value) return constructor @staticmethod def standardize_schema(schema): return [(name,", "type valid data type; if invalid, a ValueError is raised", "int64, str: unicode, #list: vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__, t)", "\", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases @staticmethod def value_is_string(value): \"\"\"get", "isinstance(obj, float64): return float(obj) if isinstance(obj, int32): return int(obj) if", "a ValueError if data_type is not a valid data_type\"\"\" _DataTypes.get_from_type(data_type)", "distributed under the License is distributed on an \"AS IS\"", "ignore = _Ignore class _Unknown(object): \"\"\"Unknown type used when type", "_Ignore class _Unknown(object): \"\"\"Unknown type used when type is indeterminate\"\"\"", "of length %s, but received length %d\" % (length, len(array)))", "import dateutil.parser as datetime_parser # Chose python's datetime over numpy.datetime64", "string if valid_data_types.value_is_string(value): try: value = json.loads(value) except: value =", "\"dict\", TODO float32: \"float32\", float64: \"float64\", int32: \"int32\", int64: \"int64\",", "as e: raise ValueError((\"Unable to cast to type %s\\n\" %", "express or implied. # See the License for the specific", "or result == -np.inf: # this is 5x faster than", "isinstance(obj, datetime): return obj.isoformat() if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value))", "except in compliance with the License. # You may obtain", "return float(obj) if isinstance(obj, int32): return int(obj) if isinstance(obj, vector.base_type):", "return data_type.is_complex_type except AttributeError: return False @staticmethod def is_primitive_alias_type(data_type): return", "data_type) @staticmethod def validate(data_type): \"\"\"Raises a ValueError if data_type is", "return [(name, _DataTypes.get_from_type(t)) for name, t in schema] @staticmethod def", "= _DataTypes.get_constructor(to_type) result = constructor(value) return None if _DataTypes.value_is_missing_value(result) else", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "0, int64: 0, unicode: \"\", #datetime: \"datetime\", } def get_float_constructor(float_type):", "valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or", "not use this file except in compliance with the License.", "(np.isnan(value) or value in [np.inf, -np.inf])) @staticmethod def get_primitive_data_types(): return", "float32: 0.0, float64: 0.0, int32: 0, int64: 0, unicode: \"\",", "datetime import datetime import dateutil.parser as datetime_parser # Chose python's", "schemas during file import\"\"\" pass ignore = _Ignore class _Unknown(object):", "\"bool\", TODO #bytearray: \"bytearray\", TODO #dict: \"dict\", TODO float32: \"float32\",", "in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases @staticmethod", "\"\"\"gets the constructor for the to_type\"\"\" try: return to_type.constructor except", "type string '%s' \" % data_type_str) @staticmethod def is_primitive_type(data_type): return", "zone support and string serialization # Here's a long thread", "\"ignore\", datetime: \"datetime\", } # build reverse map string ->", ">>> valid_data_types.get_from_string('unicode') unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str] except KeyError: try:", "TypeError(\"cannot convert type to the datetime\") class _DataTypes(object): \"\"\" Provides", "try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return vector.get_from_string(data_type_str) except: raise", "writing, software # distributed under the License is distributed on", "list): return [numpy_to_bson_friendly(item) for item in obj] # Let the", "data type %s (invalid data type)\" % data_type) @staticmethod def", "_Unknown(object): \"\"\"Unknown type used when type is indeterminate\"\"\" pass unknown", "the array is entirely made of doubles except: # also", "if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in", "you may not use this file except in compliance with", "be one of many types \"\"\" if value is None:", "support and string serialization # Here's a long thread discussing", ": str string representation Examples -------- >>> valid_data_types.to_string(float32) 'float32' \"\"\"", "== datetime: return datetime.now() raise ValueError(\"Unable to find default value", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "from x using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type = np.ndarray re_pattern", "create numpy objects from x using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type", "first try numpy's constructor array = np.array(value, dtype=np.float64) # ensures", "Exception as e: raise ValueError((\"Unable to cast to type %s\\n\"", "def __repr__(self): aliases = \"\\n(and aliases: %s)\" % (\", \".join(sorted([\"%s->%s\"", "%s\" % data_type) @staticmethod def validate(data_type): \"\"\"Raises a ValueError if", "by the string Examples -------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\" try:", "return _primitive_type_to_str_table[valid_data_type] except KeyError: # complex data types should use", "ft = float_type def float_constructor(value): result = ft(value) if np.isnan(result)", "vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type string '%s' \" % data_type_str)", "raised Returns ------- result : type valid data type for", "constructor for floating point types which handles nan, inf, -inf\"\"\"", "[float32, float64, float] and (np.isnan(value) or value in [np.inf, -np.inf]))", "for missing values return None elif _DataTypes.is_primitive_type(to_type) and type(value) is", "type to the datetime\") class _DataTypes(object): \"\"\" Provides functions with", "None >>> valid_data_types.cast(np.inf, float32) None \"\"\" if _DataTypes.value_is_missing_value(value): # Special", "raise array = np.atleast_1d(array) # numpy thing, so that vectors", "is_primitive_type(data_type): return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table @staticmethod", "using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type = np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\")", "key, value in obj.items()]) if isinstance(obj, list): return [numpy_to_bson_friendly(item) for", "return obj.isoformat() if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key,", "get_constructor(to_type): \"\"\"gets the constructor for the to_type\"\"\" try: return to_type.constructor", "constructor @staticmethod def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for name, t", "#bytearray: \"bytearray\", TODO #dict: \"dict\", TODO float32: \"float32\", float64: \"float64\",", "\"\"\" def __contains__(self, item): try: self.validate(item) return True except ValueError:", "type used for schemas during file import\"\"\" pass ignore =", "= _Ignore class _Unknown(object): \"\"\"Unknown type used when type is", "CONDITIONS OF ANY KIND, either express or implied. # See", "import datetime import dateutil.parser as datetime_parser # Chose python's datetime", "'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime'] import numpy as", "unicode: \"unicode\", ignore: \"ignore\", datetime: \"datetime\", } # build reverse", "#bool: False, TODO float32: 0.0, float64: 0.0, int32: 0, int64:", "re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length = int(length) self.is_complex_type", "_primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table", "the string representation of the given type Parameters ---------- data_type", "\"\"\" if value is None: return None try: # first", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "data_type is not a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type):", "_primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try: return data_type.is_complex_type except AttributeError: return", "def validate(data_type): \"\"\"Raises a ValueError if data_type is not a", "constructor(value): if value is None: return None return to_type(value) return", "for value, data_type in zip(data, map(lambda t: t[1], schema))] @staticmethod", "valid data type to use for the cast Returns -------", "e: raise ValueError((\"Unable to cast to type %s\\n\" % to_type)", "a numpy array from a value, which can be one", "nan, inf, -inf\"\"\" ft = float_type def float_constructor(value): result =", ": type valid data type for given type Examples --------", "type. None is always returned as None Parameters ---------- value", "also support json or comma-sep string if valid_data_types.value_is_string(value): try: value", "Expected vector of length %s, but received length %d\" %", "result = ft(value) if np.isnan(result) or result == np.inf or", "= constructor(value) return None if _DataTypes.value_is_missing_value(result) else result except Exception", "dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()]) if isinstance(obj, list):", "object the value cast to the to_type Examples -------- >>>", "for item in value.split(',') if item] array = np.array(value, dtype=np.float64)", "or (type(value) in [float32, float64, float] and (np.isnan(value) or value", "to the to_type Examples -------- >>> valid_data_types.cast(3, float64) 3.0 >>>", "# Here's a long thread discussing numpy's datetime64 timezone problem:", "the given type (often it will return the same type)", "# Special handling for missing values return None elif _DataTypes.is_primitive_type(to_type)", "schema))] @staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name, data_type in", "int32 = np.int32 int64 = np.int64 from datetime import datetime", "obj.items()]) if isinstance(obj, list): return [numpy_to_bson_friendly(item) for item in obj]", "their string identifier _primitive_type_to_str_table = { #bool: \"bool\", TODO #bytearray:", "the array is entirely made of doubles else: raise array", "@staticmethod def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for name, t in", "return datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an object", "use for the cast Returns ------- results : object the", "given value cast to the given type. None is always", "'float64', 'int32', 'int64', 'vector', 'unit', 'datetime'] import numpy as np", "set encoding=utf-8 # # Copyright (c) 2015 Intel Corporation #", "= ft(value) if np.isnan(result) or result == np.inf or result", "return datetime(*value) except: raise TypeError(\"cannot convert type to the datetime\")", "a ValueError is raised Returns ------- result : str string", "get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\" Returns the string", "# Chose python's datetime over numpy.datetime64 because of time zone", "item): try: self.validate(item) return True except ValueError: return False def", "Returns ------- result : type valid data type for given", "\"bytearray\", TODO #dict: \"dict\", TODO float32: \"float32\", float64: \"float64\", int32:", "basestring) @staticmethod def value_is_missing_value(value): return value is None or (type(value)", "of many types \"\"\" if value is None: return None", "= _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an object and convert it", "Examples -------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str] except", "pass unit = _Unit class _Ignore(object): \"\"\"Ignore type used for", "for schemas during file import\"\"\" pass ignore = _Ignore class", "type str; if invalid, a ValueError is raised Returns -------", "if item] array = np.array(value, dtype=np.float64) # ensures the array", "np.inf or result == -np.inf: # this is 5x faster", "dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()]) if", "# first try numpy's constructor array = np.array(value, dtype=np.float64) #", "of size 1 will still have dimension and length if", "float64: \"float64\", int32: \"int32\", int64: \"int64\", #list: \"list\", TODO unicode:", "return to_type(value) return constructor @staticmethod def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t))", "get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema] @staticmethod def", "% to_type) + str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime object", "if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported type %s\"", "OR CONDITIONS OF ANY KIND, either express or implied. #", "__init__(self, length): self.length = int(length) self.is_complex_type = True self.constructor =", "represented by the string Examples -------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\"", "is None: return None return to_type(value) return constructor @staticmethod def", "def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except KeyError: if data_type ==", "cast to the given type. None is always returned as", "indication that value is a string, whether str or unicode\"\"\"", "result = constructor(value) return None if _DataTypes.value_is_missing_value(result) else result except", "if _DataTypes.value_is_missing_value(result) else result except Exception as e: raise ValueError((\"Unable", "= float_type def float_constructor(value): result = ft(value) if np.isnan(result) or", "json or comma-sep string if valid_data_types.value_is_string(value): try: value = json.loads(value)", "(invalid data type)\" % data_type) @staticmethod def cast(value, to_type): \"\"\"", "the License is distributed on an \"AS IS\" BASIS, #", ">>> valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf,", "schema] @staticmethod def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except KeyError: if", "self.validate(item) return True except ValueError: return False def __repr__(self): aliases", "thing, so that vectors of size 1 will still have", "return False def __repr__(self): aliases = \"\\n(and aliases: %s)\" %", "ValueError is raised Returns ------- result : type valid data", "try: return to_type.constructor except AttributeError: if to_type == float64 or", "int(obj) if isinstance(obj, vector.base_type): return obj.tolist() if isinstance(obj, datetime): return", "the datetime\") class _DataTypes(object): \"\"\" Provides functions with define and", "governing permissions and # limitations under the License. # \"\"\"", "is raised Returns ------- result : type type represented by", "it to a type that can be serialized to bson", "% self.length vector = _Vector class _Unit(object): \"\"\"Ignore type used", "datetime: return datetime.now() raise ValueError(\"Unable to find default value for", "} _primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()])", "return ft(value) return float_constructor def datetime_constructor(value): \"\"\"Creates special constructor for", "because of time zone support and string serialization # Here's", "Returns ------- result : str string representation Examples -------- >>>", "cast to type %s\\n\" % to_type) + str(e)) @staticmethod def", "for name, data_type in schema] @staticmethod def get_default_type_value(data_type): try: return", "np.isnan(result) or result == np.inf or result == -np.inf: #", "result except Exception as e: raise ValueError((\"Unable to cast to", "+ aliases @staticmethod def value_is_string(value): \"\"\"get bool indication that value", "supported data types. \"\"\" def __contains__(self, item): try: self.validate(item) return", "len(array))) return array return constructor @staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1))", "alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) +", "type type represented by the string Examples -------- >>> valid_data_types.get_from_string('unicode')", "_Unit class _Ignore(object): \"\"\"Ignore type used for schemas during file", "(type(value) in [float32, float64, float] and (np.isnan(value) or value in", "or to_type == float32: return get_float_constructor(to_type) if to_type == datetime:", "data_type == datetime: return datetime.now() raise ValueError(\"Unable to find default", "always returned as None Parameters ---------- value : object value", "value try: constructor = _DataTypes.get_constructor(to_type) result = constructor(value) return None", "and string serialization # Here's a long thread discussing numpy's", ": type valid data type; if invalid, a ValueError is", "str string representation Examples -------- >>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type", "int32: 0, int64: 0, unicode: \"\", #datetime: \"datetime\", } def", "calling np.isfinite() return None return ft(value) return float_constructor def datetime_constructor(value):", "this is 5x faster than calling np.isfinite() return None return", "---------- data_type : type valid data type; if invalid, a", "in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try: return data_type.is_complex_type except AttributeError:", "np.isfinite() return None return ft(value) return float_constructor def datetime_constructor(value): \"\"\"Creates", "return constructor @staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return", "if isinstance(obj, list): return [numpy_to_bson_friendly(item) for item in obj] #", "@staticmethod def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except KeyError: if data_type", "isinstance(value, basestring) @staticmethod def value_is_missing_value(value): return value is None or", "float64, float] and (np.isnan(value) or value in [np.inf, -np.inf])) @staticmethod", "law or agreed to in writing, software # distributed under", "_primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value", "int(length) self.is_complex_type = True self.constructor = self._get_constructor() def _get_constructor(self): length", "= self._get_constructor() def _get_constructor(self): length = self.length def constructor(value): \"\"\"", "@staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\" %", "%s\\n\" % to_type) + str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime", "and (np.isnan(value) or value in [np.inf, -np.inf])) @staticmethod def get_primitive_data_types():", "result == np.inf or result == -np.inf: # this is", "http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be, UDFs can create numpy objects", "_primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool: False, TODO float32: 0.0, float64:", "float32 = np.float32 float64 = np.float64 int32 = np.int32 int64", "data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type):", "json import re # alias numpy types float32 = np.float32", ": str valid data type str; if invalid, a ValueError", "\"\"\" Returns the string representation of the given type Parameters", "return _primitive_str_to_type_table[data_type_str] except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try:", "name, data_type in schema] @staticmethod def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type]", "pass ignore = _Ignore class _Unknown(object): \"\"\"Unknown type used when", "KeyError: if data_type == vector: return [] if data_type ==", "return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table @staticmethod def", "int32): return int(obj) if isinstance(obj, vector.base_type): return obj.tolist() if isinstance(obj,", "3.0 >>> valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None, str) None >>>", ": type valid data type or type that may be", "\"\"\" Returns the data type for the given type string", "to convert by casting to_type : type valid data type", "or type that may be aliased for a valid data", "% (\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for alias, data_type in", "ignore: \"ignore\", datetime: \"datetime\", } # build reverse map string", "{ float: float64, int: int32, long: int64, str: unicode, #list:", "_DataTypes(object): \"\"\" Provides functions with define and operate on supported", "float64): return float(obj) if isinstance(obj, int32): return int(obj) if isinstance(obj,", "to use for the cast Returns ------- results : object", "def is_primitive_type(data_type): return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table", "2015 Intel Corporation # # Licensed under the Apache License,", "get_from_string(data_type_str): \"\"\" Returns the data type for the given type", "a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets the constructor", "if data_type == vector: return [] if data_type == datetime:", "@staticmethod def cast(value, to_type): \"\"\" Returns the given value cast", "data_type : type valid data type; if invalid, a ValueError", "@staticmethod def is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type):", "if isinstance(obj, datetime): return obj.isoformat() if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key),", "may obtain a copy of the License at # #", "return [numpy_to_bson_friendly(item) for item in obj] # Let the base", "value in obj.items()]) if isinstance(obj, list): return [numpy_to_bson_friendly(item) for item", "in value.split(',') if item] array = np.array(value, dtype=np.float64) # ensures", "\"\"\"Creates special constructor for datetime parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value)", "Returns the string representation of the given type Parameters ----------", "\"datetime\", } # build reverse map string -> type _primitive_str_to_type_table", "pass unknown = _Unknown # map types to their string", "string -> type _primitive_str_to_type_table = dict([(s, t) for t, s", "string Examples -------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str]", "raise ValueError(\"Unable to find default value for data type %s", "ValueError if data_type is not a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod", "value is None: return None return to_type(value) return constructor @staticmethod", "still have dimension and length if len(array) != length: raise", "in [float32, float64, float] and (np.isnan(value) or value in [np.inf,", "array = np.atleast_1d(array) # numpy thing, so that vectors of", "their repr return repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\" Returns the", "x using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type = np.ndarray re_pattern =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "discussing numpy's datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need", "invalid, a ValueError is raised Returns ------- result : type", "KeyError: try: return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type string '%s'", "[_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda t: t[1],", "len(array) != length: raise ValueError(\"Could not construct vector in Python", "_DataTypes.value_is_missing_value(result) else result except Exception as e: raise ValueError((\"Unable to", "casting to_type : type valid data type to use for", "\"\"\"Ignore type used for schemas during file import\"\"\" pass ignore", "Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type]", "may not use this file except in compliance with the", "so that vectors of size 1 will still have dimension", "schemas during file import\"\"\" pass unit = _Unit class _Ignore(object):", "data types. \"\"\" def __contains__(self, item): try: self.validate(item) return True", "and length if len(array) != length: raise ValueError(\"Could not construct", "try: self.validate(item) return True except ValueError: return False def __repr__(self):", "ft(value) if np.isnan(result) or result == np.inf or result ==", "for data type %s (invalid data type)\" % data_type) @staticmethod", "obj] # Let the base class default method raise the", "# numpy thing, so that vectors of size 1 will", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "np.array(value, dtype=np.float64) # ensures the array is entirely made of", "this file except in compliance with the License. # You", "obj.isoformat() if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value", "# ensures the array is entirely made of doubles else:", "try: return _primitive_type_to_default_value[data_type] except KeyError: if data_type == vector: return", "return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema] @staticmethod def get_default_type_value(data_type):", "ValueError((\"Unable to cast to type %s\\n\" % to_type) + str(e))", "return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\" % self.length vector =", "\"\"\"Ignore type used for schemas during file import\"\"\" pass unit", "when type is indeterminate\"\"\" pass unknown = _Unknown # map", "in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\" Returns the data type", "to_type == datetime: return datetime_constructor def constructor(value): if value is", "raise ValueError((\"Unable to cast to type %s\\n\" % to_type) +", "to commands __all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32',", "import numpy as np import json import re # alias", "length: raise ValueError(\"Could not construct vector in Python Client. Expected", "raise ValueError(\"Could not construct vector in Python Client. Expected vector", "# \"\"\" trusted_analytics definitions for Data Types \"\"\" # TODO", "string identifier _primitive_type_to_str_table = { #bool: \"bool\", TODO #bytearray: \"bytearray\",", "{ #bool: \"bool\", TODO #bytearray: \"bytearray\", TODO #dict: \"dict\", TODO", "-> type _primitive_str_to_type_table = dict([(s, t) for t, s in", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Creates a numpy array from a value, which can be", "doubles else: raise array = np.atleast_1d(array) # numpy thing, so", "int64: \"int64\", #list: \"list\", TODO unicode: \"unicode\", ignore: \"ignore\", datetime:", "import re # alias numpy types float32 = np.float32 float64", "+ [\"vector(n)\"])) + aliases @staticmethod def value_is_string(value): \"\"\"get bool indication", "True except ValueError: return False def __repr__(self): aliases = \"\\n(and", "= [np.float64(item.strip()) for item in value.split(',') if item] array =", "\"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except KeyError: #", "# # Licensed under the Apache License, Version 2.0 (the", "Examples -------- >>> valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5, str) '4.5'", "numpy objects from x using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type =", "float_constructor(value): result = ft(value) if np.isnan(result) or result == np.inf", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "a value, which can be one of many types \"\"\"", "return [_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda t:", "TODO unicode: \"unicode\", ignore: \"ignore\", datetime: \"datetime\", } # build", "vim: set encoding=utf-8 # # Copyright (c) 2015 Intel Corporation", "except: raise TypeError(\"cannot convert type to the datetime\") class _DataTypes(object):", "thread discussing numpy's datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If", "return None return ft(value) return float_constructor def datetime_constructor(value): \"\"\"Creates special", "string '%s' \" % data_type_str) @staticmethod def is_primitive_type(data_type): return data_type", "_DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets the constructor for the to_type\"\"\"", "try numpy's constructor array = np.array(value, dtype=np.float64) # ensures the", "data_type in zip(data, map(lambda t: t[1], schema))] @staticmethod def get_default_data_for_schema(schema):", "if to_type == float64 or to_type == float32: return get_float_constructor(to_type)", "get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except KeyError: if data_type == vector:", "numpy's constructor array = np.array(value, dtype=np.float64) # ensures the array", "UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat()) class", "5x faster than calling np.isfinite() return None return ft(value) return", "whether str or unicode\"\"\" return isinstance(value, basestring) @staticmethod def value_is_missing_value(value):", "of time zone support and string serialization # Here's a", "aliases = \"\\n(and aliases: %s)\" % (\", \".join(sorted([\"%s->%s\" % (alias.__name__,", "in schema] @staticmethod def validate_data(schema, data): return [_DataTypes.cast(value, data_type) for", "str) None >>> valid_data_types.cast(np.inf, float32) None \"\"\" if _DataTypes.value_is_missing_value(value): #", "return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported", "length %d\" % (length, len(array))) return array return constructor @staticmethod", "ft(value) return float_constructor def datetime_constructor(value): \"\"\"Creates special constructor for datetime", "ValueError(\"Unsupported type string '%s' \" % data_type_str) @staticmethod def is_primitive_type(data_type):", "if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type", "for name, t in schema] @staticmethod def validate_data(schema, data): return", "the to_type Examples -------- >>> valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5,", "aliased for a valid data type; if invalid, a ValueError", "isinstance(obj, float32) or isinstance(obj, float64): return float(obj) if isinstance(obj, int32):", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "[] if data_type == datetime: return datetime.now() raise ValueError(\"Unable to", "value = json.loads(value) except: value = [np.float64(item.strip()) for item in", "-------- >>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try: return", ">>> valid_data_types.cast(np.inf, float32) None \"\"\" if _DataTypes.value_is_missing_value(value): # Special handling", "convert type to the datetime\") class _DataTypes(object): \"\"\" Provides functions", "# map types to their string identifier _primitive_type_to_str_table = {", "\"unicode\", ignore: \"ignore\", datetime: \"datetime\", } # build reverse map", "= { #bool: \"bool\", TODO #bytearray: \"bytearray\", TODO #dict: \"dict\",", "data type str; if invalid, a ValueError is raised Returns", "obj.tolist() if isinstance(obj, datetime): return obj.isoformat() if isinstance(obj, dict): return", "a long thread discussing numpy's datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html", "is raised Returns ------- result : str string representation Examples", "except AttributeError: return False @staticmethod def is_primitive_alias_type(data_type): return data_type in", "return \"vector(%d)\" % self.length vector = _Vector class _Unit(object): \"\"\"Ignore", "def get_from_string(data_type_str): \"\"\" Returns the data type for the given", "is_complex_type(data_type): try: return data_type.is_complex_type except AttributeError: return False @staticmethod def", "the same type) Parameters ---------- data_type : type valid data", "alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool: False, TODO", "return isinstance(value, basestring) @staticmethod def value_is_missing_value(value): return value is None", "in zip(data, map(lambda t: t[1], schema))] @staticmethod def get_default_data_for_schema(schema): return", "value, data_type in zip(data, map(lambda t: t[1], schema))] @staticmethod def", "define and operate on supported data types. \"\"\" def __contains__(self,", "\"\"\" try: return _primitive_str_to_type_table[data_type_str] except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except", "handles nan, inf, -inf\"\"\" ft = float_type def float_constructor(value): result", "data type for the given type (often it will return", "cast to the to_type Examples -------- >>> valid_data_types.cast(3, float64) 3.0", "\"\"\" Creates a numpy array from a value, which can", "t) for alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool:", "type valid data type to use for the cast Returns", "str or unicode\"\"\" return isinstance(value, basestring) @staticmethod def value_is_missing_value(value): return", "in [np.inf, -np.inf])) @staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def", "floating point types which handles nan, inf, -inf\"\"\" ft =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "__contains__(self, item): try: self.validate(item) return True except ValueError: return False", "type(value) is to_type: # Optimization return value try: constructor =", "type for the given type string representation Parameters ---------- data_type_str", "str valid data type str; if invalid, a ValueError is", "0.0, float64: 0.0, int32: 0, int64: 0, unicode: \"\", #datetime:", "ValueError is raised Returns ------- result : type type represented", "data type)\" % data_type) @staticmethod def cast(value, to_type): \"\"\" Returns", "\"\"\" if _DataTypes.value_is_missing_value(value): # Special handling for missing values return", "ValueError: return False def __repr__(self): aliases = \"\\n(and aliases: %s)\"", "@staticmethod def value_is_string(value): \"\"\"get bool indication that value is a", "or implied. # See the License for the specific language", "def __contains__(self, item): try: self.validate(item) return True except ValueError: return", "for a valid data type; if invalid, a ValueError is", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "constructor(value): \"\"\" Creates a numpy array from a value, which", "size 1 will still have dimension and length if len(array)", "def get_constructor(to_type): \"\"\"gets the constructor for the to_type\"\"\" try: return", "# If need be, UDFs can create numpy objects from", "constructor for the to_type\"\"\" try: return to_type.constructor except AttributeError: if", "t: t[1], schema))] @staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name,", "will return the same type) Parameters ---------- data_type : type", "to_type : type valid data type to use for the", "def datetime_from_iso(iso_string): \"\"\"create datetime object from ISO 8601 string\"\"\" return", "get_float_constructor(to_type) if to_type == datetime: return datetime_constructor def constructor(value): if", "isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()])", "datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be, UDFs", "to find default value for data type %s (invalid data", "not a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "self._get_constructor() def _get_constructor(self): length = self.length def constructor(value): \"\"\" Creates", "\"vector(%d)\" % self.length vector = _Vector class _Unit(object): \"\"\"Ignore type", "------- result : str string representation Examples -------- >>> valid_data_types.to_string(float32)", "def is_complex_type(data_type): try: return data_type.is_complex_type except AttributeError: return False @staticmethod", "raise ValueError(\"Unsupported type %s\" % data_type) @staticmethod def validate(data_type): \"\"\"Raises", "to the datetime\") class _DataTypes(object): \"\"\" Provides functions with define", "return data_type in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\" Returns the", "@staticmethod def to_string(data_type): \"\"\" Returns the string representation of the", "if value is None: return None try: # first try", "numpy_to_bson_friendly(value)) for key, value in obj.items()]) if isinstance(obj, list): return", "except: value = [np.float64(item.strip()) for item in value.split(',') if item]", "the License. # \"\"\" trusted_analytics definitions for Data Types \"\"\"", "data_type raise ValueError(\"Unsupported type %s\" % data_type) @staticmethod def validate(data_type):", "@staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime object from ISO 8601 string\"\"\"", "type _primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()])", "0, unicode: \"\", #datetime: \"datetime\", } def get_float_constructor(float_type): \"\"\"Creates special", "\"\"\" Returns the given value cast to the given type.", "(alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys()", "over numpy.datetime64 because of time zone support and string serialization", "Returns the data type for the given type string representation", "the string Examples -------- >>> valid_data_types.get_from_string('unicode') unicode \"\"\" try: return", "(the \"License\"); # you may not use this file except", "#bool: \"bool\", TODO #bytearray: \"bytearray\", TODO #dict: \"dict\", TODO float32:", "numpy types float32 = np.float32 float64 = np.float64 int32 =", "# you may not use this file except in compliance", "int64 = np.int64 from datetime import datetime import dateutil.parser as", "file import\"\"\" pass unit = _Unit class _Ignore(object): \"\"\"Ignore type", "= _Unknown # map types to their string identifier _primitive_type_to_str_table", "be aliased for a valid data type; if invalid, a", "for the given type string representation Parameters ---------- data_type_str :", "# vim: set encoding=utf-8 # # Copyright (c) 2015 Intel", "convert by casting to_type : type valid data type to", "dimension and length if len(array) != length: raise ValueError(\"Could not", "item] array = np.array(value, dtype=np.float64) # ensures the array is", "#datetime: \"datetime\", } def get_float_constructor(float_type): \"\"\"Creates special constructor for floating", "on supported data types. \"\"\" def __contains__(self, item): try: self.validate(item)", "with define and operate on supported data types. \"\"\" def", "length): self.length = int(length) self.is_complex_type = True self.constructor = self._get_constructor()", "datetime parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try: return datetime(*value)", "repr return repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\" Returns the data", "can be serialized to bson if neccessary.\"\"\" if isinstance(obj, float32)", "\"\"\" Returns the data type for the given type (often", "(c) 2015 Intel Corporation # # Licensed under the Apache", "and type(value) is to_type: # Optimization return value try: constructor", "\"int32\", int64: \"int64\", #list: \"list\", TODO unicode: \"unicode\", ignore: \"ignore\",", "float32) None \"\"\" if _DataTypes.value_is_missing_value(value): # Special handling for missing", "datetime): return obj.isoformat() if isinstance(obj, dict): return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for", "= ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit',", "value cast to the given type. None is always returned", "True self.constructor = self._get_constructor() def _get_constructor(self): length = self.length def", "ensures the array is entirely made of doubles except: #", "self.constructor = self._get_constructor() def _get_constructor(self): length = self.length def constructor(value):", "numpy array from a value, which can be one of", "_Vector class _Unit(object): \"\"\"Ignore type used for schemas during file", "made of doubles except: # also support json or comma-sep", "def cast(value, to_type): \"\"\" Returns the given value cast to", "serialized to bson if neccessary.\"\"\" if isinstance(obj, float32) or isinstance(obj,", "return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\" Returns the string representation", "\"\"\"Creates special constructor for floating point types which handles nan,", "is raised Returns ------- result : type valid data type", "# # Unless required by applicable law or agreed to", "= np.int32 int64 = np.int64 from datetime import datetime import", "------- results : object the value cast to the to_type", "constructor @staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\"", "raised Returns ------- result : str string representation Examples --------", "if data_type is not a valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def", "# alias numpy types float32 = np.float32 float64 = np.float64", "to_type == float64 or to_type == float32: return get_float_constructor(to_type) if", "(length, len(array))) return array return constructor @staticmethod def get_from_string(data_type_str): return", "#dict: \"dict\", TODO float32: \"float32\", float64: \"float64\", int32: \"int32\", int64:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "== datetime: return datetime_constructor def constructor(value): if value is None:", "for alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool: False,", "operate on supported data types. \"\"\" def __contains__(self, item): try:", "during file import\"\"\" pass unit = _Unit class _Ignore(object): \"\"\"Ignore", "Version 2.0 (the \"License\"); # you may not use this", "build reverse map string -> type _primitive_str_to_type_table = dict([(s, t)", "get_from_type(data_type): \"\"\" Returns the data type for the given type", "is entirely made of doubles except: # also support json", "if data_type == datetime: return datetime.now() raise ValueError(\"Unable to find", "Returns the given value cast to the given type. None", "array return constructor @staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self):", "the given type. None is always returned as None Parameters", "missing values return None elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type:", "default value for data type %s (invalid data type)\" %", "json.loads(value) except: value = [np.float64(item.strip()) for item in value.split(',') if", "consider server providing types, similar to commands __all__ = ['valid_data_types',", "a ValueError is raised Returns ------- result : type type", "np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length = int(length)", "valid_data_types.value_is_string(value): try: value = json.loads(value) except: value = [np.float64(item.strip()) for", "-np.inf: # this is 5x faster than calling np.isfinite() return", "value in [np.inf, -np.inf])) @staticmethod def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod", "class _Unknown(object): \"\"\"Unknown type used when type is indeterminate\"\"\" pass", "int64: 0, unicode: \"\", #datetime: \"datetime\", } def get_float_constructor(float_type): \"\"\"Creates", "type for given type Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\"", "map(lambda t: t[1], schema))] @staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for", "array = np.array(value, dtype=np.float64) # ensures the array is entirely", "vector in Python Client. Expected vector of length %s, but", "value is None or (type(value) in [float32, float64, float] and", "except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return vector.get_from_string(data_type_str)", "float32) or isinstance(obj, float64): return float(obj) if isinstance(obj, int32): return", "valid data type for given type Examples -------- >>> valid_data_types.get_from_type(int)", "length = self.length def constructor(value): \"\"\" Creates a numpy array", "implied. # See the License for the specific language governing", "= _Vector class _Unit(object): \"\"\"Ignore type used for schemas during", "_primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float: float64, int: int32, long: int64,", "is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\" Returns", "ISO 8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj):", "under the Apache License, Version 2.0 (the \"License\"); # you", "to_type == float32: return get_float_constructor(to_type) if to_type == datetime: return", "valid data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets the constructor for", "encoding=utf-8 # # Copyright (c) 2015 Intel Corporation # #", "for Data Types \"\"\" # TODO - consider server providing", "representation of the given type Parameters ---------- data_type : type", "data_type_str : str valid data type str; if invalid, a", "Returns ------- result : type type represented by the string", "Returns the data type for the given type (often it", "if to_type == datetime: return datetime_constructor def constructor(value): if value", "except KeyError: if data_type == vector: return [] if data_type", "== float64 or to_type == float32: return get_float_constructor(to_type) if to_type", "_primitive_type_to_str_table = { #bool: \"bool\", TODO #bytearray: \"bytearray\", TODO #dict:", "License. # \"\"\" trusted_analytics definitions for Data Types \"\"\" #", "type %s\" % data_type) @staticmethod def validate(data_type): \"\"\"Raises a ValueError", "of the given type Parameters ---------- data_type : type valid", "by applicable law or agreed to in writing, software #", "vector: return [] if data_type == datetime: return datetime.now() raise", "None elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization return", "reverse map string -> type _primitive_str_to_type_table = dict([(s, t) for", "type) Parameters ---------- data_type : type valid data type or", "data_type\"\"\" _DataTypes.get_from_type(data_type) @staticmethod def get_constructor(to_type): \"\"\"gets the constructor for the", "% (length, len(array))) return array return constructor @staticmethod def get_from_string(data_type_str):", "if value is None: return None return to_type(value) return constructor", "type that may be aliased for a valid data type;", "object value to convert by casting to_type : type valid", "to cast to type %s\\n\" % to_type) + str(e)) @staticmethod", "long thread discussing numpy's datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html #", "numpy's datetime64 timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be,", "return the same type) Parameters ---------- data_type : type valid", "_primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported type", "\"\"\"Raises a ValueError if data_type is not a valid data_type\"\"\"", "# ensures the array is entirely made of doubles except:", "in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool: False, TODO float32: 0.0,", "is 5x faster than calling np.isfinite() return None return ft(value)", "return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type string '%s' \" %", "None try: # first try numpy's constructor array = np.array(value,", "given type. None is always returned as None Parameters ----------", "float64: 0.0, int32: 0, int64: 0, unicode: \"\", #datetime: \"datetime\",", "datetime_parser.parse(value) else: try: return datetime(*value) except: raise TypeError(\"cannot convert type", "valid data type; if invalid, a ValueError is raised Returns", "re # alias numpy types float32 = np.float32 float64 =", "return datetime.now() raise ValueError(\"Unable to find default value for data", "0.0, int32: 0, int64: 0, unicode: \"\", #datetime: \"datetime\", }", "that vectors of size 1 will still have dimension and", "raised Returns ------- result : type type represented by the", "datetime over numpy.datetime64 because of time zone support and string", "entirely made of doubles except: # also support json or", "_get_constructor(self): length = self.length def constructor(value): \"\"\" Creates a numpy", "_Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\" % self.length vector = _Vector", "['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime']", "problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be, UDFs can create", "that value is a string, whether str or unicode\"\"\" return", "None Parameters ---------- value : object value to convert by", "serialization # Here's a long thread discussing numpy's datetime64 timezone", "8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take", "have dimension and length if len(array) != length: raise ValueError(\"Could", "= np.array(value, dtype=np.float64) # ensures the array is entirely made", "class _Unit(object): \"\"\"Ignore type used for schemas during file import\"\"\"", "# # Copyright (c) 2015 Intel Corporation # # Licensed", "---------- value : object value to convert by casting to_type", "convert it to a type that can be serialized to", "datetime: \"datetime\", } # build reverse map string -> type", "t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value = { #bool: False, TODO float32:", "_DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported type %s\" %", "return get_float_constructor(to_type) if to_type == datetime: return datetime_constructor def constructor(value):", "data type; if invalid, a ValueError is raised Returns -------", "inf, -inf\"\"\" ft = float_type def float_constructor(value): result = ft(value)", "functions with define and operate on supported data types. \"\"\"", "string representation Examples -------- >>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type =", "complex data types should use their repr return repr(valid_data_type) @staticmethod", "to bson if neccessary.\"\"\" if isinstance(obj, float32) or isinstance(obj, float64):", "__all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector',", "class _Vector(object): base_type = np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self,", "\"\"\"take an object and convert it to a type that", "data_type) @staticmethod def cast(value, to_type): \"\"\" Returns the given value", "to_type Examples -------- >>> valid_data_types.cast(3, float64) 3.0 >>> valid_data_types.cast(4.5, str)", "isinstance(obj, vector.base_type): return obj.tolist() if isinstance(obj, datetime): return obj.isoformat() if", "valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an object and convert", "if np.isnan(result) or result == np.inf or result == -np.inf:", "t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float: float64, int:", "= np.atleast_1d(array) # numpy thing, so that vectors of size", "datetime(*value) except: raise TypeError(\"cannot convert type to the datetime\") class", "alias numpy types float32 = np.float32 float64 = np.float64 int32", "except: raise ValueError(\"Unsupported type string '%s' \" % data_type_str) @staticmethod", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "is to_type: # Optimization return value try: constructor = _DataTypes.get_constructor(to_type)", "ensures the array is entirely made of doubles else: raise", "one of many types \"\"\" if value is None: return", "made of doubles else: raise array = np.atleast_1d(array) # numpy", "is None: return None try: # first try numpy's constructor", "Unless required by applicable law or agreed to in writing,", "given type string representation Parameters ---------- data_type_str : str valid", ">>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type]", "datetime object from ISO 8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types =", "neccessary.\"\"\" if isinstance(obj, float32) or isinstance(obj, float64): return float(obj) if", "_primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\" Returns the string representation of", ": type valid data type to use for the cast", "type; if invalid, a ValueError is raised Returns ------- result", "constructor = _DataTypes.get_constructor(to_type) result = constructor(value) return None if _DataTypes.value_is_missing_value(result)", "def to_string(data_type): \"\"\" Returns the string representation of the given", "to_string(data_type): \"\"\" Returns the string representation of the given type", "return array return constructor @staticmethod def get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def", "%s)\" % (\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for alias, data_type", "'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime'] import numpy", "return datetime_parser.parse(value) else: try: return datetime(*value) except: raise TypeError(\"cannot convert", "(\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()])))", "float(obj) if isinstance(obj, int32): return int(obj) if isinstance(obj, vector.base_type): return", "the specific language governing permissions and # limitations under the", "data_type == vector: return [] if data_type == datetime: return", "array is entirely made of doubles except: # also support", "to_type.constructor except AttributeError: if to_type == float64 or to_type ==", "TODO #bytearray: \"bytearray\", TODO #dict: \"dict\", TODO float32: \"float32\", float64:", "_primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type", "array from a value, which can be one of many", "== -np.inf: # this is 5x faster than calling np.isfinite()", "applicable law or agreed to in writing, software # distributed", "types \"\"\" if value is None: return None try: #", "% (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \",", "for item in obj] # Let the base class default", "self.is_complex_type = True self.constructor = self._get_constructor() def _get_constructor(self): length =", "return float_constructor def datetime_constructor(value): \"\"\"Creates special constructor for datetime parsing\"\"\"", "the given type string representation Parameters ---------- data_type_str : str", "many types \"\"\" if value is None: return None try:", "type valid data type or type that may be aliased", "for the to_type\"\"\" try: return to_type.constructor except AttributeError: if to_type", "return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported", "import json import re # alias numpy types float32 =", "result : type type represented by the string Examples --------", "types, similar to commands __all__ = ['valid_data_types', 'ignore', 'unknown', 'float32',", "value for data type %s (invalid data type)\" % data_type)", "schema] @staticmethod def validate_data(schema, data): return [_DataTypes.cast(value, data_type) for value,", "@staticmethod def get_from_string(data_type_str): \"\"\" Returns the data type for the", "invalid, a ValueError is raised Returns ------- result : str", "to_type\"\"\" try: return to_type.constructor except AttributeError: if to_type == float64", "datetime\") class _DataTypes(object): \"\"\" Provides functions with define and operate", "try: return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type string '%s' \"", "commands __all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64',", "in writing, software # distributed under the License is distributed", "= \"\\n(and aliases: %s)\" % (\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type))", "string, whether str or unicode\"\"\" return isinstance(value, basestring) @staticmethod def", "vectors of size 1 will still have dimension and length", "data_type in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try: return data_type.is_complex_type except", "return False @staticmethod def is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table @staticmethod", "value : object value to convert by casting to_type :", "type)\" % data_type) @staticmethod def cast(value, to_type): \"\"\" Returns the", "timezone problem: # http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html # If need be, UDFs can", "% data_type) @staticmethod def validate(data_type): \"\"\"Raises a ValueError if data_type", "None is always returned as None Parameters ---------- value :", "try: # first try numpy's constructor array = np.array(value, dtype=np.float64)", "types to their string identifier _primitive_type_to_str_table = { #bool: \"bool\",", "if _DataTypes.value_is_missing_value(value): # Special handling for missing values return None", "\" % data_type_str) @staticmethod def is_primitive_type(data_type): return data_type in _primitive_type_to_str_table", "types should use their repr return repr(valid_data_type) @staticmethod def get_from_string(data_type_str):", "be serialized to bson if neccessary.\"\"\" if isinstance(obj, float32) or", "return obj.tolist() if isinstance(obj, datetime): return obj.isoformat() if isinstance(obj, dict):", "if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try: return datetime(*value) except: raise", "parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try: return datetime(*value) except:", "self.length def constructor(value): \"\"\" Creates a numpy array from a", "ValueError(\"Unable to find default value for data type %s (invalid", "None if _DataTypes.value_is_missing_value(result) else result except Exception as e: raise", "if len(array) != length: raise ValueError(\"Could not construct vector in", "from ISO 8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def", "Returns ------- results : object the value cast to the", "construct vector in Python Client. Expected vector of length %s,", "Copyright (c) 2015 Intel Corporation # # Licensed under the", "def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for name, t in schema]", "same type) Parameters ---------- data_type : type valid data type", "doubles except: # also support json or comma-sep string if", "to_type) + str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime object from", "return _primitive_type_to_default_value[data_type] except KeyError: if data_type == vector: return []", "result : type valid data type for given type Examples", "def __repr__(self): return \"vector(%d)\" % self.length vector = _Vector class", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "item in value.split(',') if item] array = np.array(value, dtype=np.float64) #", "@staticmethod def get_constructor(to_type): \"\"\"gets the constructor for the to_type\"\"\" try:", "License, Version 2.0 (the \"License\"); # you may not use", "_primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try: return", "type that can be serialized to bson if neccessary.\"\"\" if", "------- result : type valid data type for given type", "or comma-sep string if valid_data_types.value_is_string(value): try: value = json.loads(value) except:", "# You may obtain a copy of the License at", "None return ft(value) return float_constructor def datetime_constructor(value): \"\"\"Creates special constructor", "valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except", "unicode\"\"\" return isinstance(value, basestring) @staticmethod def value_is_missing_value(value): return value is", "objects from x using: numpy.datatime64(x.isoformat()) class _Vector(object): base_type = np.ndarray", "data_type_str) @staticmethod def is_primitive_type(data_type): return data_type in _primitive_type_to_str_table or data_type", "the data type for the given type (often it will", "value cast to the to_type Examples -------- >>> valid_data_types.cast(3, float64)", "get_from_string(data_type_str): return _Vector(_Vector.re_pattern.match(data_type_str).group(1)) def __repr__(self): return \"vector(%d)\" % self.length vector", "valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf, float32) None \"\"\" if _DataTypes.value_is_missing_value(value):", "return None elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization", "if isinstance(obj, vector.base_type): return obj.tolist() if isinstance(obj, datetime): return obj.isoformat()", "try: return _primitive_type_to_str_table[valid_data_type] except KeyError: # complex data types should", "\".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases @staticmethod def value_is_string(value): \"\"\"get bool", "the given value cast to the given type. None is", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"list\", TODO unicode: \"unicode\", ignore: \"ignore\", datetime: \"datetime\", } #", "entirely made of doubles else: raise array = np.atleast_1d(array) #", "a ValueError is raised Returns ------- result : type valid", "data types should use their repr return repr(valid_data_type) @staticmethod def", "the value cast to the to_type Examples -------- >>> valid_data_types.cast(3,", "Optimization return value try: constructor = _DataTypes.get_constructor(to_type) result = constructor(value)", "string representation of the given type Parameters ---------- data_type :", "_primitive_alias_type_to_type_table = { float: float64, int: int32, long: int64, str:", "faster than calling np.isfinite() return None return ft(value) return float_constructor", "for the given type (often it will return the same", "def validate_data(schema, data): return [_DataTypes.cast(value, data_type) for value, data_type in", "datetime import dateutil.parser as datetime_parser # Chose python's datetime over", "TODO - consider server providing types, similar to commands __all__", "unicode, #list: vector, } _primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias,", "'int64', 'vector', 'unit', 'datetime'] import numpy as np import json", "np.int32 int64 = np.int64 from datetime import datetime import dateutil.parser", "of doubles else: raise array = np.atleast_1d(array) # numpy thing,", "identifier _primitive_type_to_str_table = { #bool: \"bool\", TODO #bytearray: \"bytearray\", TODO", "_primitive_type_to_default_value = { #bool: False, TODO float32: 0.0, float64: 0.0,", "str; if invalid, a ValueError is raised Returns ------- result", "type %s (invalid data type)\" % data_type) @staticmethod def cast(value,", "'4.5' >>> valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf, float32) None \"\"\"", "\".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return", "under the License. # \"\"\" trusted_analytics definitions for Data Types", "(often it will return the same type) Parameters ---------- data_type", "np.int64 from datetime import datetime import dateutil.parser as datetime_parser #", "should use their repr return repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\"", "class _DataTypes(object): \"\"\" Provides functions with define and operate on", "@staticmethod def get_from_type(data_type): \"\"\" Returns the data type for the", "type valid data type for given type Examples -------- >>>", "@staticmethod def value_is_missing_value(value): return value is None or (type(value) in", "the License for the specific language governing permissions and #", "Special handling for missing values return None elif _DataTypes.is_primitive_type(to_type) and", "and convert it to a type that can be serialized", "vector of length %s, but received length %d\" % (length,", "the cast Returns ------- results : object the value cast", "type (often it will return the same type) Parameters ----------", "except AttributeError: if to_type == float64 or to_type == float32:", "Apache License, Version 2.0 (the \"License\"); # you may not", "import\"\"\" pass ignore = _Ignore class _Unknown(object): \"\"\"Unknown type used", "\"int64\", #list: \"list\", TODO unicode: \"unicode\", ignore: \"ignore\", datetime: \"datetime\",", "object and convert it to a type that can be", "either express or implied. # See the License for the", "string representation Parameters ---------- data_type_str : str valid data type", "str) '4.5' >>> valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf, float32) None", "result : str string representation Examples -------- >>> valid_data_types.to_string(float32) 'float32'", "is entirely made of doubles else: raise array = np.atleast_1d(array)", "Client. Expected vector of length %s, but received length %d\"", "valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None, str) None >>> valid_data_types.cast(np.inf, float32)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "or isinstance(obj, float64): return float(obj) if isinstance(obj, int32): return int(obj)", "may be aliased for a valid data type; if invalid,", "used for schemas during file import\"\"\" pass unit = _Unit", "during file import\"\"\" pass ignore = _Ignore class _Unknown(object): \"\"\"Unknown", "that may be aliased for a valid data type; if", "\"\\n(and aliases: %s)\" % (\", \".join(sorted([\"%s->%s\" % (alias.__name__, self.to_string(data_type)) for", "type or type that may be aliased for a valid", "type for the given type (often it will return the", "str(e)) @staticmethod def datetime_from_iso(iso_string): \"\"\"create datetime object from ISO 8601", "def get_float_constructor(float_type): \"\"\"Creates special constructor for floating point types which", "1 will still have dimension and length if len(array) !=", "given type Parameters ---------- data_type : type valid data type;", "= { float: float64, int: int32, long: int64, str: unicode,", "for datetime parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try: return", "cast(value, to_type): \"\"\" Returns the given value cast to the", "to a type that can be serialized to bson if", "or data_type in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try: return data_type.is_complex_type", "-inf\"\"\" ft = float_type def float_constructor(value): result = ft(value) if", "= re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length = int(length) self.is_complex_type =", "try: return _primitive_str_to_type_table[data_type_str] except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError:", "will still have dimension and length if len(array) != length:", "data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() + [\"vector(n)\"])) + aliases", "_DataTypes.is_complex_type(data_type): return data_type raise ValueError(\"Unsupported type %s\" % data_type) @staticmethod", "to their string identifier _primitive_type_to_str_table = { #bool: \"bool\", TODO", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length = int(length) self.is_complex_type = True", "raise ValueError(\"Unsupported type string '%s' \" % data_type_str) @staticmethod def", "datetime.now() raise ValueError(\"Unable to find default value for data type", "types. \"\"\" def __contains__(self, item): try: self.validate(item) return True except", "and operate on supported data types. \"\"\" def __contains__(self, item):", "numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type):", "data type for given type Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32", "as np import json import re # alias numpy types", "for the cast Returns ------- results : object the value", "type is indeterminate\"\"\" pass unknown = _Unknown # map types", "float32: \"float32\", float64: \"float64\", int32: \"int32\", int64: \"int64\", #list: \"list\",", "constructor array = np.array(value, dtype=np.float64) # ensures the array is", "not construct vector in Python Client. Expected vector of length", "def value_is_missing_value(value): return value is None or (type(value) in [float32,", "else result except Exception as e: raise ValueError((\"Unable to cast", "in obj.items()]) if isinstance(obj, list): return [numpy_to_bson_friendly(item) for item in", "def get_primitive_data_types(): return _primitive_type_to_str_table.keys() @staticmethod def to_string(data_type): \"\"\" Returns the", "def _get_constructor(self): length = self.length def constructor(value): \"\"\" Creates a", "\"\"\" if _DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return", "numpy as np import json import re # alias numpy", "= dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()]) _primitive_type_to_default_value =", "type Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type): return", "float] and (np.isnan(value) or value in [np.inf, -np.inf])) @staticmethod def", "ValueError(\"Unsupported type %s\" % data_type) @staticmethod def validate(data_type): \"\"\"Raises a", "as None Parameters ---------- value : object value to convert", "name, t in schema] @staticmethod def validate_data(schema, data): return [_DataTypes.cast(value,", "float_constructor def datetime_constructor(value): \"\"\"Creates special constructor for datetime parsing\"\"\" if", "trusted_analytics definitions for Data Types \"\"\" # TODO - consider", "__repr__(self): aliases = \"\\n(and aliases: %s)\" % (\", \".join(sorted([\"%s->%s\" %", "except KeyError: try: return vector.get_from_string(data_type_str) except: raise ValueError(\"Unsupported type string", "Python Client. Expected vector of length %s, but received length", "types which handles nan, inf, -inf\"\"\" ft = float_type def", "of doubles except: # also support json or comma-sep string", "a string, whether str or unicode\"\"\" return isinstance(value, basestring) @staticmethod", "representation Examples -------- >>> valid_data_types.to_string(float32) 'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type)", "use their repr return repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\" Returns", "value = [np.float64(item.strip()) for item in value.split(',') if item] array", "_Unknown # map types to their string identifier _primitive_type_to_str_table =", "for t, s in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float: float64,", "datetime_from_iso(iso_string): \"\"\"create datetime object from ISO 8601 string\"\"\" return datetime_parser.parse(iso_string)", "<reponame>blbarker/atk # vim: set encoding=utf-8 # # Copyright (c) 2015", "data_type) for value, data_type in zip(data, map(lambda t: t[1], schema))]", "raise TypeError(\"cannot convert type to the datetime\") class _DataTypes(object): \"\"\"", "datetime_constructor(value): \"\"\"Creates special constructor for datetime parsing\"\"\" if valid_data_types.value_is_string(value): return", "self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()]))) return \", \".join(sorted(_primitive_str_to_type_table.keys() +", "\"License\"); # you may not use this file except in", "given type Examples -------- >>> valid_data_types.get_from_type(int) numpy.int32 \"\"\" if _DataTypes.is_primitive_alias_type(data_type):", "data_type in _primitive_alias_type_to_type_table @staticmethod def get_from_type(data_type): \"\"\" Returns the data", "def numpy_to_bson_friendly(obj): \"\"\"take an object and convert it to a", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "== float32: return get_float_constructor(to_type) if to_type == datetime: return datetime_constructor", "datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an object and", "float64, int: int32, long: int64, str: unicode, #list: vector, }", "dtype=np.float64) # ensures the array is entirely made of doubles", "unicode \"\"\" try: return _primitive_str_to_type_table[data_type_str] except KeyError: try: return _primitive_alias_str_to_type_table[data_type_str]", "\"\"\" Provides functions with define and operate on supported data", "the given type Parameters ---------- data_type : type valid data", "which can be one of many types \"\"\" if value", "KeyError: # complex data types should use their repr return", "data_type in schema] @staticmethod def get_default_type_value(data_type): try: return _primitive_type_to_default_value[data_type] except", "# distributed under the License is distributed on an \"AS", "return constructor @staticmethod def standardize_schema(schema): return [(name, _DataTypes.get_from_type(t)) for name,", "float64) 3.0 >>> valid_data_types.cast(4.5, str) '4.5' >>> valid_data_types.cast(None, str) None", "# Unless required by applicable law or agreed to in", "try: return datetime(*value) except: raise TypeError(\"cannot convert type to the", "'%s' \" % data_type_str) @staticmethod def is_primitive_type(data_type): return data_type in", "valid data type str; if invalid, a ValueError is raised", "data_type : type valid data type or type that may", "constructor for datetime parsing\"\"\" if valid_data_types.value_is_string(value): return datetime_parser.parse(value) else: try:", "None or (type(value) in [float32, float64, float] and (np.isnan(value) or", "%s (invalid data type)\" % data_type) @staticmethod def cast(value, to_type):", "'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime'] import", "= int(length) self.is_complex_type = True self.constructor = self._get_constructor() def _get_constructor(self):", "_DataTypes.is_primitive_alias_type(data_type): return _primitive_alias_type_to_type_table[data_type] if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type): return data_type raise", "AttributeError: return False @staticmethod def is_primitive_alias_type(data_type): return data_type in _primitive_alias_type_to_type_table", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "_primitive_type_to_str_table[valid_data_type] except KeyError: # complex data types should use their", "in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table @staticmethod def is_complex_type(data_type): try:", "datetime_constructor def constructor(value): if value is None: return None return", "datetime: return datetime_constructor def constructor(value): if value is None: return", "\"datetime\", } def get_float_constructor(float_type): \"\"\"Creates special constructor for floating point", "= True self.constructor = self._get_constructor() def _get_constructor(self): length = self.length", "is always returned as None Parameters ---------- value : object", "= np.ndarray re_pattern = re.compile(r\"^vector\\((\\d+)\\)$\") def __init__(self, length): self.length =", "self.length = int(length) self.is_complex_type = True self.constructor = self._get_constructor() def", "_DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization return value try:", "False def __repr__(self): aliases = \"\\n(and aliases: %s)\" % (\",", "You may obtain a copy of the License at #", "aliases @staticmethod def value_is_string(value): \"\"\"get bool indication that value is", "the data type for the given type string representation Parameters", "'float32' \"\"\" valid_data_type = _DataTypes.get_from_type(data_type) try: return _primitive_type_to_str_table[valid_data_type] except KeyError:", "KeyError: try: return _primitive_alias_str_to_type_table[data_type_str] except KeyError: try: return vector.get_from_string(data_type_str) except:", "validate(data_type): \"\"\"Raises a ValueError if data_type is not a valid", "to_type): \"\"\" Returns the given value cast to the given", "return None if _DataTypes.value_is_missing_value(result) else result except Exception as e:", "string\"\"\" return datetime_parser.parse(iso_string) valid_data_types = _DataTypes() def numpy_to_bson_friendly(obj): \"\"\"take an", "in _primitive_type_to_str_table.iteritems()]) _primitive_alias_type_to_type_table = { float: float64, int: int32, long:", "Corporation # # Licensed under the Apache License, Version 2.0", "return repr(valid_data_type) @staticmethod def get_from_string(data_type_str): \"\"\" Returns the data type", "%d\" % (length, len(array))) return array return constructor @staticmethod def", "__repr__(self): return \"vector(%d)\" % self.length vector = _Vector class _Unit(object):", "the to_type\"\"\" try: return to_type.constructor except AttributeError: if to_type ==", "_DataTypes.value_is_missing_value(value): # Special handling for missing values return None elif", "returned as None Parameters ---------- value : object value to", "the Apache License, Version 2.0 (the \"License\"); # you may", "\"\"\"create datetime object from ISO 8601 string\"\"\" return datetime_parser.parse(iso_string) valid_data_types", "int: int32, long: int64, str: unicode, #list: vector, } _primitive_alias_str_to_type_table", "except Exception as e: raise ValueError((\"Unable to cast to type", "% data_type_str) @staticmethod def is_primitive_type(data_type): return data_type in _primitive_type_to_str_table or", "@staticmethod def is_complex_type(data_type): try: return data_type.is_complex_type except AttributeError: return False", "@staticmethod def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema]", "{ #bool: False, TODO float32: 0.0, float64: 0.0, int32: 0,", "} def get_float_constructor(float_type): \"\"\"Creates special constructor for floating point types", "comma-sep string if valid_data_types.value_is_string(value): try: value = json.loads(value) except: value", "try: return data_type.is_complex_type except AttributeError: return False @staticmethod def is_primitive_alias_type(data_type):", "indeterminate\"\"\" pass unknown = _Unknown # map types to their", "def get_default_data_for_schema(schema): return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema] @staticmethod", "[_DataTypes.get_default_type_value(data_type) for name, data_type in schema] @staticmethod def get_default_type_value(data_type): try:", ": object the value cast to the to_type Examples --------", "data type to use for the cast Returns ------- results" ]
[ "# Avoid \"AttributeError: # 'LTAnno' object has no attribute 'fontname'\"", "objDict(ltanno) ''' class DocFragment: def __init__(self, text, fontname, size): self.text", "child.fontname fontSize = child.size # Avoid \"AttributeError: # 'LTAnno' object", "col = None cols = 0 if self.colStarts is not", "LTAnno, LAParams, LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To use the aggregator", "try: # from PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from", "for \\\"colStarts\\\".\") # if isinstance(child, LTChar): ''' try: fontName =", "= child.size frag = frag_dict( child.get_text(), child.fontname, child.size, ) fragments.append(frag)", "child_str = '' fontSize = None fontName = None fontSizes", "chunk = DocChunk( page_number, col, item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments,", "size): self.text = text self.fontname = fontname self.size = size", "print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0 def receive_layout(self, ltpage): def render(item,", "fontname and size. \"\"\" ffn = fragment.fontname ffs = fragment.size", "frag_dict, ) def ltannoDict(ltanno): return objDict(ltanno) ''' class DocFragment: def", "self.fontname = fontname self.size = size def sameStyle(self, fragment): \"\"\"", "= fragment.fontname ffs = fragment.size return (ffs == self.size) and", "= 0 def receive_layout(self, ltpage): def render(item, page_number): if isinstance(item,", "None fontName = None fontSizes = [] fontNames = []", "None cols = 0 if self.colStarts is not None: cols", "self.page_number) self.page_number += 1 self.chunks = sorted(self.chunks, key = lambda", "size. \"\"\" ffn = fragment.fontname ffs = fragment.size return (ffs", "is not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0 def receive_layout(self,", "fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno): child_str += child.get_text()", "class is based on PDFPageDetailedAggregator from lindblandro's Oct 4 '13", "continue...\") \"\"\" fontSize = None fontName = None col =", "same_style, frag_dict, ) def ltannoDict(ltanno): return objDict(ltanno) ''' class DocFragment:", "pdfminer.converter import PDFPageAggregator from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams,", "text self.fontname = fontname self.size = size def sameStyle(self, fragment):", "{} parts {}\" \"\".format(child_str, warnings, fontNames, fontSizes, parts)) input(\"Press enter", "4 '13 at 10:33 answer edited by slushy Feb 4", "child.get_text(), child.fontname, child.size, ) fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp)", "def ltannoDict(ltanno): return objDict(ltanno) ''' class DocFragment: def __init__(self, text,", "fontSize is not None: if fontSize != child.size: warnings.append(\"mixed fontSize\")", "= 0 elif (cols == 2): col = 0 col2Min", "and (len(strp) > 0) if fontName is not None: if", "item.bbox[0] >= col2Min: col = 1 # Index [1] is", "edited by slushy Feb 4 '14 at 23:41 at <https://stackoverflow.com/a/19179114>", "aggregator (required for generating chunks.json)\" \" you must first install", "__init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks", "first install the following module for Python:\") prerr(\" pdfminer\") exit(1)", "slushy Feb 4 '14 at 23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using-", "= frag_dict( child.get_text(), child.fontname, child.size, ) fragments.append(frag) # fontNames.append(fontName) #", "fontSize = None fontName = None fontSizes = [] fontNames", "annotations = [] for child in item: strp = None", "isinstance(item, LTTextLine): child_str = '' fontSize = None fontName =", "rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks =", "must first install the following module for Python:\") prerr(\" pdfminer\")", "ModuleNotFoundError: prerr(\"To use the aggregator (required for generating chunks.json)\" \"", "(same as None) or 2\" \" is implemented for \\\"colStarts\\\".\")", "col, item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk)", "len(warnings) > 0: \"\"\" print(\"Warnings in \\\"{}\\\":\" \" {}: fonts", "[] fragments = [] annotations = [] for child in", "# if isinstance(child, LTChar): ''' try: fontName = child.fontname fontSize", "as None) or 2\" \" is implemented for \\\"colStarts\\\".\") #", "+= child.get_text() strp = child.get_text().strip() # and (len(strp) > 0)", "math try: # from PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines", "use the aggregator (required for generating chunks.json)\" \" you must", "fragment): \"\"\" Is same fontname and size. \"\"\" ffn =", "None) or (cols == 1): col = 0 elif (cols", "Python:\") prerr(\" pdfminer\") exit(1) try: input = raw_input except NameError:", "install the following module for Python:\") prerr(\" pdfminer\") exit(1) try:", "raise ValueError(\"Only a list of length 1 (same as None)", "for Python:\") prerr(\" pdfminer\") exit(1) try: input = raw_input except", "LTChar): child_str += child.get_text() strp = child.get_text().strip() # and (len(strp)", "= sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1)) self.result", "child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for child", "enter to continue...\") \"\"\" fontSize = None fontName = None", "else: raise ValueError(\"Only a list of length 1 (same as", "PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks = [] self.colStarts = colStarts", "if (cols is None) or (cols == 1): col =", "# from PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfparser", ") fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno):", "in item: strp = None if isinstance(child, LTChar): child_str +=", "''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is based on PDFPageDetailedAggregator", "pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine except ModuleNotFoundError:", "PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LTPage,", "math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min: col = 1 # Index", "child in item: render(child, page_number) return render(ltpage, self.page_number) self.page_number +=", "same fontname and size. \"\"\" ffn = fragment.fontname ffs =", "return (ffs == self.size) and (ffn == self.fontname) def clean(self):", "None: cols = len(self.colStarts) if (cols is None) or (cols", "def __init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)", "child.size, ) fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif isinstance(child,", "2. else: raise ValueError(\"Only a list of length 1 (same", "= None fontName = None fontSizes = [] fontNames =", "is column 2. else: raise ValueError(\"Only a list of length", "# Python 3 pass # TODO: from srd import (", "if item.bbox[0] >= col2Min: col = 1 # Index [1]", "warnings.append(\"mixed fontSize\") fontName = child.fontname fontSize = child.size frag =", "LTPage) or isinstance(item, LTTextBox): for child in item: render(child, page_number)", "by slushy Feb 4 '14 at 23:41 at <https://stackoverflow.com/a/19179114> on", "fragment.size return (ffs == self.size) and (ffn == self.fontname) def", "LTChar): ''' try: fontName = child.fontname fontSize = child.size #", "child_str += child.get_text() strp = child.get_text().strip() # and (len(strp) >", "clean_frag, same_style, frag_dict, ) def ltannoDict(ltanno): return objDict(ltanno) ''' class", "[] warnings = [] parts = [] fragments = []", "= [] for child in item: strp = None if", "self.text = clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is", "1 (same as None) or 2\" \" is implemented for", "= colStarts if self.colStarts is not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number", "import math try: # from PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument,", "child.fontname, child.size, ) fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif", "sameStyle(self, fragment): \"\"\" Is same fontname and size. \"\"\" ffn", "elif isinstance(item, LTTextLine): child_str = '' fontSize = None fontName", "exit(1) try: input = raw_input except NameError: # Python 3", "if len(warnings) > 0: \"\"\" print(\"Warnings in \\\"{}\\\":\" \" {}:", "ffn = fragment.fontname ffs = fragment.size return (ffs == self.size)", "= '' fontSize = None fontName = None fontSizes =", "== 2): col = 0 col2Min = math.floor(self.colStarts[1]) if item.bbox[0]", "= None cols = 0 if self.colStarts is not None:", "ltannoDict(ltanno): return objDict(ltanno) ''' class DocFragment: def __init__(self, text, fontname,", "{}\".format(len(self.colStarts))) self.page_number = 0 def receive_layout(self, ltpage): def render(item, page_number):", "PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator", "= child.size # Avoid \"AttributeError: # 'LTAnno' object has no", "from srd import ( objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style,", "child.get_text() strp = child.get_text().strip() # and (len(strp) > 0) if", "'fontname'\" except AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise", "fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for child in", "self.chunks = [] self.colStarts = colStarts if self.colStarts is not", "'LTAnno' object has no attribute 'fontname'\" except AttributeError as ex:", "fontNames = [] warnings = [] parts = [] fragments", "= child.get_text().strip() # and (len(strp) > 0) if fontName is", "not None: if fontName != child.fontname: warnings.append(\"mixed fontName\") if fontSize", "if self.colStarts is not None: cols = len(self.colStarts) if (cols", "class DocFragment: def __init__(self, text, fontname, size): self.text = text", "warnings, fontNames, fontSizes, parts)) input(\"Press enter to continue...\") \"\"\" fontSize", "# fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno): child_str +=", "class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is based on PDFPageDetailedAggregator from", "sizes {} parts {}\" \"\".format(child_str, warnings, fontNames, fontSizes, parts)) input(\"Press", "AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex '''", "self.text = text self.fontname = fontname self.size = size def", "print(\"dir(child): {}\".format(dir(child))) raise ex ''' chunk = DocChunk( page_number, col,", "child.get_text().strip() # and (len(strp) > 0) if fontName is not", "2\" \" is implemented for \\\"colStarts\\\".\") # if isinstance(child, LTChar):", "size def sameStyle(self, fragment): \"\"\" Is same fontname and size.", "is None) or (cols == 1): col = 0 elif", "ffs = fragment.size return (ffs == self.size) and (ffn ==", "is not None: cols = len(self.colStarts) if (cols is None)", "fontName != child.fontname: warnings.append(\"mixed fontName\") if fontSize is not None:", "self.size) and (ffn == self.fontname) def clean(self): self.text = clean_frag_text(self.text)", "\"\"\" This class is based on PDFPageDetailedAggregator from lindblandro's Oct", ">= col2Min: col = 1 # Index [1] is column", "strp = None if isinstance(child, LTChar): child_str += child.get_text() strp", "child.get_text().strip() annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip() if child_str: if len(warnings)", "parts = [] fragments = [] annotations = [] for", "parts.append(strp) elif isinstance(child, LTAnno): child_str += child.get_text() strp = child.get_text().strip()", "item: render(child, page_number) elif isinstance(item, LTTextLine): child_str = '' fontSize", "fontName = child.fontname fontSize = child.size # Avoid \"AttributeError: #", "render(child, page_number) elif isinstance(item, LTTextLine): child_str = '' fontSize =", "' '.join(child_str.split()).strip() if child_str: if len(warnings) > 0: \"\"\" print(\"Warnings", "elif (cols == 2): col = 0 col2Min = math.floor(self.colStarts[1])", "None: if fontSize != child.size: warnings.append(\"mixed fontSize\") fontName = child.fontname", "\"\"\" fontSize = None fontName = None col = None", "= DocChunk( page_number, col, item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations,", "fragment.fontname ffs = fragment.size return (ffs == self.size) and (ffn", "page_number, col, item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments()", "LAParams, LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To use the aggregator (required", "3 pass # TODO: from srd import ( objDict, BBox,", "frag_dict( child.get_text(), child.fontname, child.size, ) fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize)", "import PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import", "LTChar, LTAnno, LAParams, LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To use the", "def render(item, page_number): if isinstance(item, LTPage) or isinstance(item, LTTextBox): for", "annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip() if child_str: if len(warnings) >", "child in item: strp = None if isinstance(child, LTChar): child_str", "receive_layout(self, ltpage): def render(item, page_number): if isinstance(item, LTPage) or isinstance(item,", "is based on PDFPageDetailedAggregator from lindblandro's Oct 4 '13 at", "(cols is None) or (cols == 1): col = 0", "= child.get_text().strip() annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip() if child_str: if", "pdfminer\") exit(1) try: input = raw_input except NameError: # Python", "Feb 4 '14 at 23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>.", "4 '14 at 23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\"", "laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks = [] self.colStarts", "pageno=pageno, laparams=laparams) self.chunks = [] self.colStarts = colStarts if self.colStarts", "{}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex ''' chunk = DocChunk( page_number,", "= [] fontNames = [] warnings = [] parts =", "self.colStarts = colStarts if self.colStarts is not None: print(\"columns: {}\".format(len(self.colStarts)))", "fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for child in item:", "[] parts = [] fragments = [] annotations = []", "of length 1 (same as None) or 2\" \" is", "PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LTPage, LTChar,", "at 10:33 answer edited by slushy Feb 4 '14 at", "23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr,", "from PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfparser import", "None if isinstance(child, LTChar): child_str += child.get_text() strp = child.get_text().strip()", "import PDFPageAggregator from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox,", "LTTextLine except ModuleNotFoundError: prerr(\"To use the aggregator (required for generating", "from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfparser import PDFParser from", "except NameError: # Python 3 pass # TODO: from srd", "lindblandro's Oct 4 '13 at 10:33 answer edited by slushy", "0 if self.colStarts is not None: cols = len(self.colStarts) if", "fragments = [] annotations = [] for child in item:", "not None: if fontSize != child.size: warnings.append(\"mixed fontSize\") fontName =", "= 0 if self.colStarts is not None: cols = len(self.colStarts)", "page_number) return render(ltpage, self.page_number) self.page_number += 1 self.chunks = sorted(self.chunks,", "self.colStarts is not None: cols = len(self.colStarts) if (cols is", "\\\"{}\\\":\" \" {}: fonts {} sizes {} parts {}\" \"\".format(child_str,", "= [] parts = [] fragments = [] annotations =", "= 1 # Index [1] is column 2. else: raise", "{}\" \"\".format(child_str, warnings, fontNames, fontSizes, parts)) input(\"Press enter to continue...\")", "fontname, size): self.text = text self.fontname = fontname self.size =", "fontName = child.fontname fontSize = child.size frag = frag_dict( child.get_text(),", "\"\"\" print(\"Warnings in \\\"{}\\\":\" \" {}: fonts {} sizes {}", "LTTextBox): for child in item: render(child, page_number) elif isinstance(item, LTTextLine):", "for child in item: strp = None if isinstance(child, LTChar):", "None: if fontName != child.fontname: warnings.append(\"mixed fontName\") if fontSize is", "at 23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self,", "if fontName is not None: if fontName != child.fontname: warnings.append(\"mixed", "annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for child in item: render(child, page_number)", "col = 0 elif (cols == 2): col = 0", "from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from", "def clean(self): self.text = clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This", "'13 at 10:33 answer edited by slushy Feb 4 '14", "fontName is not None: if fontName != child.fontname: warnings.append(\"mixed fontName\")", "if isinstance(child, LTChar): child_str += child.get_text() strp = child.get_text().strip() #", "import ( objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict, )", "chunks.json)\" \" you must first install the following module for", "> 0: \"\"\" print(\"Warnings in \\\"{}\\\":\" \" {}: fonts {}", "is not None: if fontName != child.fontname: warnings.append(\"mixed fontName\") if", "== self.size) and (ffn == self.fontname) def clean(self): self.text =", "import PDFDocument, PDFNoOutlines from pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp import", "from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine except", "!= child.size: warnings.append(\"mixed fontSize\") fontName = child.fontname fontSize = child.size", "DocChunk, clean_frag_text, clean_frag, same_style, frag_dict, ) def ltannoDict(ltanno): return objDict(ltanno)", "strp = child.get_text().strip() # and (len(strp) > 0) if fontName", "\"\"\" ffn = fragment.fontname ffs = fragment.size return (ffs ==", "try: fontName = child.fontname fontSize = child.size # Avoid \"AttributeError:", "= [] fragments = [] annotations = [] for child", "PDFPageAggregator from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine", ") chunk.groupFragments() self.chunks.append(chunk) for child in item: render(child, page_number) return", "def __init__(self, text, fontname, size): self.text = text self.fontname =", "self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1))", "render(item, page_number): if isinstance(item, LTPage) or isinstance(item, LTTextBox): for child", "LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To use", "\" is implemented for \\\"colStarts\\\".\") # if isinstance(child, LTChar): '''", "= fragment.size return (ffs == self.size) and (ffn == self.fontname)", "or isinstance(item, LTTextBox): for child in item: render(child, page_number) elif", "len(self.colStarts) if (cols is None) or (cols == 1): col", "''' class DocFragment: def __init__(self, text, fontname, size): self.text =", "render(ltpage, self.page_number) self.page_number += 1 self.chunks = sorted(self.chunks, key =", "fontSizes, parts)) input(\"Press enter to continue...\") \"\"\" fontSize = None", "fontSize = None fontName = None col = None cols", "= [] warnings = [] parts = [] fragments =", "self.size = size def sameStyle(self, fragment): \"\"\" Is same fontname", "child_str = ' '.join(child_str.split()).strip() if child_str: if len(warnings) > 0:", "column 2. else: raise ValueError(\"Only a list of length 1", "None fontSizes = [] fontNames = [] warnings = []", "chunk.groupFragments() self.chunks.append(chunk) for child in item: render(child, page_number) return render(ltpage,", "(ffn == self.fontname) def clean(self): self.text = clean_frag_text(self.text) ''' class", "generating chunks.json)\" \" you must first install the following module", "NameError: # Python 3 pass # TODO: from srd import", "<https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self,", "None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0 def receive_layout(self, ltpage): def", "at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr, pageno=1,", ") def ltannoDict(ltanno): return objDict(ltanno) ''' class DocFragment: def __init__(self,", "= ' '.join(child_str.split()).strip() if child_str: if len(warnings) > 0: \"\"\"", "None fontName = None col = None cols = 0", "answer edited by slushy Feb 4 '14 at 23:41 at", "Is same fontname and size. \"\"\" ffn = fragment.fontname ffs", "fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno): child_str += child.get_text() strp =", "<https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr, pageno=1, laparams=None,", "child_str += child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str = '", "following module for Python:\") prerr(\" pdfminer\") exit(1) try: input =", "if isinstance(child, LTChar): ''' try: fontName = child.fontname fontSize =", "= [] annotations = [] for child in item: strp", "None) or 2\" \" is implemented for \\\"colStarts\\\".\") # if", "DocChunk( page_number, col, item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, )", "self.page_number += 1 self.chunks = sorted(self.chunks, key = lambda f:", "fonts {} sizes {} parts {}\" \"\".format(child_str, warnings, fontNames, fontSizes,", "(cols == 2): col = 0 col2Min = math.floor(self.colStarts[1]) if", "def receive_layout(self, ltpage): def render(item, page_number): if isinstance(item, LTPage) or", "# and (len(strp) > 0) if fontName is not None:", "a list of length 1 (same as None) or 2\"", "warnings = [] parts = [] fragments = [] annotations", "This class is based on PDFPageDetailedAggregator from lindblandro's Oct 4", "ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex ''' chunk =", "= None col = None cols = 0 if self.colStarts", "fontName = None col = None cols = 0 if", "page_number) elif isinstance(item, LTTextLine): child_str = '' fontSize = None", "= [] self.colStarts = colStarts if self.colStarts is not None:", "PDFNoOutlines from pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter", "[] self.colStarts = colStarts if self.colStarts is not None: print(\"columns:", "self.fontname) def clean(self): self.text = clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\"", "= None fontName = None col = None cols =", "fontSize\") fontName = child.fontname fontSize = child.size frag = frag_dict(", "2): col = 0 col2Min = math.floor(self.colStarts[1]) if item.bbox[0] >=", "on PDFPageDetailedAggregator from lindblandro's Oct 4 '13 at 10:33 answer", "pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp", "and size. \"\"\" ffn = fragment.fontname ffs = fragment.size return", "col2Min: col = 1 # Index [1] is column 2.", "item: render(child, page_number) return render(ltpage, self.page_number) self.page_number += 1 self.chunks", "# TODO: from srd import ( objDict, BBox, DocChunk, clean_frag_text,", "= None fontSizes = [] fontNames = [] warnings =", "cols = 0 if self.colStarts is not None: cols =", "col2Min = math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min: col = 1", "is not None: if fontSize != child.size: warnings.append(\"mixed fontSize\") fontName", "fragments.append(frag) # fontNames.append(fontName) # fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno): child_str", "child.fontname fontSize = child.size frag = frag_dict( child.get_text(), child.fontname, child.size,", "frag = frag_dict( child.get_text(), child.fontname, child.size, ) fragments.append(frag) # fontNames.append(fontName)", "pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr,", "if isinstance(item, LTPage) or isinstance(item, LTTextBox): for child in item:", "# 'LTAnno' object has no attribute 'fontname'\" except AttributeError as", "fontName\") if fontSize is not None: if fontSize != child.size:", "ltpage): def render(item, page_number): if isinstance(item, LTPage) or isinstance(item, LTTextBox):", "(required for generating chunks.json)\" \" you must first install the", "> 0) if fontName is not None: if fontName !=", "# Index [1] is column 2. else: raise ValueError(\"Only a", "LTTextLine): child_str = '' fontSize = None fontName = None", "child.size frag = frag_dict( child.get_text(), child.fontname, child.size, ) fragments.append(frag) #", "1): col = 0 elif (cols == 2): col =", "item.bbox, child_str, fontName=fontName, fontSize=fontSize, fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for", "parts)) input(\"Press enter to continue...\") \"\"\" fontSize = None fontName", "= len(self.colStarts) if (cols is None) or (cols == 1):", "is implemented for \\\"colStarts\\\".\") # if isinstance(child, LTChar): ''' try:", "col = 0 col2Min = math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min:", "== 1): col = 0 elif (cols == 2): col", "the following module for Python:\") prerr(\" pdfminer\") exit(1) try: input", "the aggregator (required for generating chunks.json)\" \" you must first", "pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks = []", "= child.fontname fontSize = child.size frag = frag_dict( child.get_text(), child.fontname,", "fontNames, fontSizes, parts)) input(\"Press enter to continue...\") \"\"\" fontSize =", "fragments=fragments, annotations=annotations, ) chunk.groupFragments() self.chunks.append(chunk) for child in item: render(child,", "= fontname self.size = size def sameStyle(self, fragment): \"\"\" Is", "\" {}: fonts {} sizes {} parts {}\" \"\".format(child_str, warnings,", "for child in item: render(child, page_number) return render(ltpage, self.page_number) self.page_number", "child.fontname: warnings.append(\"mixed fontName\") if fontSize is not None: if fontSize", "Avoid \"AttributeError: # 'LTAnno' object has no attribute 'fontname'\" except", "child.size # Avoid \"AttributeError: # 'LTAnno' object has no attribute", "raise ex ''' chunk = DocChunk( page_number, col, item.bbox, child_str,", "srd import ( objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict,", "implemented for \\\"colStarts\\\".\") # if isinstance(child, LTChar): ''' try: fontName", "BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict, ) def ltannoDict(ltanno): return", "= text self.fontname = fontname self.size = size def sameStyle(self,", "fontSize = child.size frag = frag_dict( child.get_text(), child.fontname, child.size, )", "not None: cols = len(self.colStarts) if (cols is None) or", "has no attribute 'fontname'\" except AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine)))", "strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip() if child_str:", "= clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is based", "input = raw_input except NameError: # Python 3 pass #", "isinstance(child, LTChar): ''' try: fontName = child.fontname fontSize = child.size", "raw_input except NameError: # Python 3 pass # TODO: from", "DocFragment: def __init__(self, text, fontname, size): self.text = text self.fontname", "if fontName != child.fontname: warnings.append(\"mixed fontName\") if fontSize is not", "on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def __init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None):", "0 col2Min = math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min: col =", "prerr(\"To use the aggregator (required for generating chunks.json)\" \" you", "item: strp = None if isinstance(child, LTChar): child_str += child.get_text()", "# fontSizes.append(fontSize) parts.append(strp) elif isinstance(child, LTAnno): child_str += child.get_text() strp", "import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import", "parts {}\" \"\".format(child_str, warnings, fontNames, fontSizes, parts)) input(\"Press enter to", "colStarts if self.colStarts is not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number =", "{}\".format(dir(child))) raise ex ''' chunk = DocChunk( page_number, col, item.bbox,", "'14 at 23:41 at <https://stackoverflow.com/a/19179114> on <https://stackoverflow.com/questions/15737806/extract-text-using- pdfminer-and-pypdf2-merges-columns>. \"\"\" def", "if fontSize is not None: if fontSize != child.size: warnings.append(\"mixed", "no attribute 'fontname'\" except AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child):", "in item: render(child, page_number) return render(ltpage, self.page_number) self.page_number += 1", "module for Python:\") prerr(\" pdfminer\") exit(1) try: input = raw_input", "or 2\" \" is implemented for \\\"colStarts\\\".\") # if isinstance(child,", "fontSize != child.size: warnings.append(\"mixed fontSize\") fontName = child.fontname fontSize =", "if self.colStarts is not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0", "[] fontNames = [] warnings = [] parts = []", "to continue...\") \"\"\" fontSize = None fontName = None col", "''' chunk = DocChunk( page_number, col, item.bbox, child_str, fontName=fontName, fontSize=fontSize,", "__init__(self, text, fontname, size): self.text = text self.fontname = fontname", "[] for child in item: strp = None if isinstance(child,", "(cols == 1): col = 0 elif (cols == 2):", "{}: fonts {} sizes {} parts {}\" \"\".format(child_str, warnings, fontNames,", "col = 1 # Index [1] is column 2. else:", "LTAnno): child_str += child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str =", "for generating chunks.json)\" \" you must first install the following", "return objDict(ltanno) ''' class DocFragment: def __init__(self, text, fontname, size):", "child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip() if", "print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex ''' chunk = DocChunk(", "child in item: render(child, page_number) elif isinstance(item, LTTextLine): child_str =", "fontName = None fontSizes = [] fontNames = [] warnings", "fontSize = child.size # Avoid \"AttributeError: # 'LTAnno' object has", "ex ''' chunk = DocChunk( page_number, col, item.bbox, child_str, fontName=fontName,", "( objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict, ) def", "(len(strp) > 0) if fontName is not None: if fontName", "= None if isinstance(child, LTChar): child_str += child.get_text() strp =", "isinstance(item, LTTextBox): for child in item: render(child, page_number) elif isinstance(item,", "self.chunks.append(chunk) for child in item: render(child, page_number) return render(ltpage, self.page_number)", "(ffs == self.size) and (ffn == self.fontname) def clean(self): self.text", "\"\"\" Is same fontname and size. \"\"\" ffn = fragment.fontname", "you must first install the following module for Python:\") prerr(\"", "fontname self.size = size def sameStyle(self, fragment): \"\"\" Is same", "0 def receive_layout(self, ltpage): def render(item, page_number): if isinstance(item, LTPage)", "not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0 def receive_layout(self, ltpage):", "{} sizes {} parts {}\" \"\".format(child_str, warnings, fontNames, fontSizes, parts))", "page_number): if isinstance(item, LTPage) or isinstance(item, LTTextBox): for child in", "laparams=laparams) self.chunks = [] self.colStarts = colStarts if self.colStarts is", "0 elif (cols == 2): col = 0 col2Min =", "length 1 (same as None) or 2\" \" is implemented", "input(\"Press enter to continue...\") \"\"\" fontSize = None fontName =", "ValueError(\"Only a list of length 1 (same as None) or", "TODO: from srd import ( objDict, BBox, DocChunk, clean_frag_text, clean_frag,", "pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter", "!= child.fontname: warnings.append(\"mixed fontName\") if fontSize is not None: if", "objDict, BBox, DocChunk, clean_frag_text, clean_frag, same_style, frag_dict, ) def ltannoDict(ltanno):", "\\\"colStarts\\\".\") # if isinstance(child, LTChar): ''' try: fontName = child.fontname", "= math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min: col = 1 #", "0) if fontName is not None: if fontName != child.fontname:", "if fontSize != child.size: warnings.append(\"mixed fontSize\") fontName = child.fontname fontSize", "except AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex", "clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is based on", "\"\".format(child_str, warnings, fontNames, fontSizes, parts)) input(\"Press enter to continue...\") \"\"\"", "except ModuleNotFoundError: prerr(\"To use the aggregator (required for generating chunks.json)\"", "= raw_input except NameError: # Python 3 pass # TODO:", "clean_frag_text, clean_frag, same_style, frag_dict, ) def ltannoDict(ltanno): return objDict(ltanno) '''", "PDFPageDetailedAggregator: from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfparser import PDFParser", "object has no attribute 'fontname'\" except AttributeError as ex: print(\"dir(LTTextLine):", "python3 import math try: # from PDFPageDetailedAggregator: from pdfminer.pdfdocument import", "0: \"\"\" print(\"Warnings in \\\"{}\\\":\" \" {}: fonts {} sizes", "fontSizes = [] fontNames = [] warnings = [] parts", "1 # Index [1] is column 2. else: raise ValueError(\"Only", "prerr(\" pdfminer\") exit(1) try: input = raw_input except NameError: #", "PDFDocument, PDFNoOutlines from pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp import PDFResourceManager,", "key = lambda f: (f.pageid, f.column, -f.bbox.y1)) self.result = ltpage", "rsrcmgr, pageno=pageno, laparams=laparams) self.chunks = [] self.colStarts = colStarts if", "isinstance(child, LTAnno): child_str += child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str", "import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To", "from lindblandro's Oct 4 '13 at 10:33 answer edited by", "''' try: fontName = child.fontname fontSize = child.size # Avoid", "render(child, page_number) return render(ltpage, self.page_number) self.page_number += 1 self.chunks =", "self.page_number = 0 def receive_layout(self, ltpage): def render(item, page_number): if", "+= child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child)) child_str = ' '.join(child_str.split()).strip()", "isinstance(item, LTPage) or isinstance(item, LTTextBox): for child in item: render(child,", "from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LTPage, LTChar, LTAnno,", "child.size: warnings.append(\"mixed fontSize\") fontName = child.fontname fontSize = child.size frag", "None col = None cols = 0 if self.colStarts is", "attribute 'fontname'\" except AttributeError as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child)))", "from pdfminer.pdfparser import PDFParser from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from", "pass # TODO: from srd import ( objDict, BBox, DocChunk,", "LTTextBox, LTTextLine except ModuleNotFoundError: prerr(\"To use the aggregator (required for", "= size def sameStyle(self, fragment): \"\"\" Is same fontname and", "PDFPageDetailedAggregator from lindblandro's Oct 4 '13 at 10:33 answer edited", "cols = len(self.colStarts) if (cols is None) or (cols ==", "def sameStyle(self, fragment): \"\"\" Is same fontname and size. \"\"\"", "print(\"Warnings in \\\"{}\\\":\" \" {}: fonts {} sizes {} parts", "for child in item: render(child, page_number) elif isinstance(item, LTTextLine): child_str", "sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1)) self.result =", "'.join(child_str.split()).strip() if child_str: if len(warnings) > 0: \"\"\" print(\"Warnings in", "Python 3 pass # TODO: from srd import ( objDict,", "\"\"\" def __init__(self, rsrcmgr, pageno=1, laparams=None, colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno,", "\" you must first install the following module for Python:\")", "list of length 1 (same as None) or 2\" \"", "based on PDFPageDetailedAggregator from lindblandro's Oct 4 '13 at 10:33", "Index [1] is column 2. else: raise ValueError(\"Only a list", "self.colStarts is not None: print(\"columns: {}\".format(len(self.colStarts))) self.page_number = 0 def", "PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class is based on PDFPageDetailedAggregator from lindblandro's", "+= 1 self.chunks = sorted(self.chunks, key = lambda f: (f.pageid,", "child_str: if len(warnings) > 0: \"\"\" print(\"Warnings in \\\"{}\\\":\" \"", "Oct 4 '13 at 10:33 answer edited by slushy Feb", "warnings.append(\"mixed fontName\") if fontSize is not None: if fontSize !=", "\"AttributeError: # 'LTAnno' object has no attribute 'fontname'\" except AttributeError", "in item: render(child, page_number) elif isinstance(item, LTTextLine): child_str = ''", "'' fontSize = None fontName = None fontSizes = []", "clean(self): self.text = clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator): \"\"\" This class", "10:33 answer edited by slushy Feb 4 '14 at 23:41", "try: input = raw_input except NameError: # Python 3 pass", "= 0 col2Min = math.floor(self.colStarts[1]) if item.bbox[0] >= col2Min: col", "text, fontname, size): self.text = text self.fontname = fontname self.size", "colStarts=None): PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams) self.chunks = [] self.colStarts =", "= child.fontname fontSize = child.size # Avoid \"AttributeError: # 'LTAnno'", "as ex: print(\"dir(LTTextLine): {}\".format(dir(LTTextLine))) print(\"dir(child): {}\".format(dir(child))) raise ex ''' chunk", "return render(ltpage, self.page_number) self.page_number += 1 self.chunks = sorted(self.chunks, key", "if child_str: if len(warnings) > 0: \"\"\" print(\"Warnings in \\\"{}\\\":\"", "or (cols == 1): col = 0 elif (cols ==", "elif isinstance(child, LTAnno): child_str += child.get_text() strp = child.get_text().strip() annotations.append(ltannoDict(child))", "1 self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column,", "== self.fontname) def clean(self): self.text = clean_frag_text(self.text) ''' class PDFPageDetailedAggregator(PDFPageAggregator):", "[1] is column 2. else: raise ValueError(\"Only a list of", "in \\\"{}\\\":\" \" {}: fonts {} sizes {} parts {}\"", "#!/usr/bin/env python3 import math try: # from PDFPageDetailedAggregator: from pdfminer.pdfdocument", "and (ffn == self.fontname) def clean(self): self.text = clean_frag_text(self.text) '''", "[] annotations = [] for child in item: strp =", "isinstance(child, LTChar): child_str += child.get_text() strp = child.get_text().strip() # and", "pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout" ]
[ "frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0, row=0) def reload(): conn =", "from pox.core import core import pox.openflow.libopenflow_01 as of from forwarding.l2_learning", "top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame = Frame(top, padding=\"3\") frame.grid() disp", "conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0, row=1)", "# register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just use L2", "register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just use L2 learning", "* from project.firewall import TestFW from project.ui import UI def", "= Toplevel() # quit POX when window is killed top.protocol(\"WM_DELETE_WINDOW\",", "Label(frame, text=\"hmm\").grid(column=0, row=0) def reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload", "quit POX when window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\")", "core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just use L2 learning switch for", "b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit = Button(frame, text=\"quit\",", "thing\") frame = Frame(top, padding=\"3\") frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0,", "= Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid = [51,", "text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid = [51, 52] srv_list", "killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame = Frame(top, padding=\"3\") frame.grid()", "= Frame(top, padding=\"3\") frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0, row=0) def", "frame = Frame(top, padding=\"3\") frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0, row=0)", "launch(): fw_list_dpid = [51, 52] srv_list = {\"web\" : ['10.0.0.100']}", "disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit = Button(frame,", "learning switch for others core.registerNew(l2_learning, False) #core.registerNew(UI) def start_ui(): core.tk.do(setup)", "project.firewall import TestFW from project.ui import UI def setup(): top", "pox.core import core import pox.openflow.libopenflow_01 as of from forwarding.l2_learning import", "row=1) b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid", "from project.ui import UI def setup(): top = Toplevel() #", "import pox.openflow.libopenflow_01 as of from forwarding.l2_learning import * from tkinter", "= Label(frame, text=\"hmm\").grid(column=0, row=0) def reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn)))", "def setup(): top = Toplevel() # quit POX when window", "52] srv_list = {\"web\" : ['10.0.0.100']} # register firewall core.registerNew(TestFW,", "project.ui import UI def setup(): top = Toplevel() # quit", "core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit =", "= {\"web\" : ['10.0.0.100']} # register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list)", "just use L2 learning switch for others core.registerNew(l2_learning, False) #core.registerNew(UI)", "[51, 52] srv_list = {\"web\" : ['10.0.0.100']} # register firewall", "row=0) def reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame,", "# quit POX when window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall", "Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2)", "{\"web\" : ['10.0.0.100']} # register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) #", "is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame = Frame(top, padding=\"3\")", "import * from project.firewall import TestFW from project.ui import UI", "import UI def setup(): top = Toplevel() # quit POX", "core.quit) top.title(\"firewall thing\") frame = Frame(top, padding=\"3\") frame.grid() disp =", "def launch(): fw_list_dpid = [51, 52] srv_list = {\"web\" :", "core import pox.openflow.libopenflow_01 as of from forwarding.l2_learning import * from", "def reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\",", "command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid = [51, 52] srv_list =", "srv_list = {\"web\" : ['10.0.0.100']} # register firewall core.registerNew(TestFW, fw_list_dpid[0],", "reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0,", "disp = Label(frame, text=\"hmm\").grid(column=0, row=0) def reload(): conn = core.openflow.getConnection(1)", "padding=\"3\") frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0, row=0) def reload(): conn", "['10.0.0.100']} # register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just use", "setup(): top = Toplevel() # quit POX when window is", "fw_list_dpid = [51, 52] srv_list = {\"web\" : ['10.0.0.100']} #", "import core import pox.openflow.libopenflow_01 as of from forwarding.l2_learning import *", "firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just use L2 learning switch", "UI def setup(): top = Toplevel() # quit POX when", "Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid = [51, 52]", "b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch(): fw_list_dpid =", "from tkinter import * from project.firewall import TestFW from project.ui", "# just use L2 learning switch for others core.registerNew(l2_learning, False)", "others core.registerNew(l2_learning, False) #core.registerNew(UI) def start_ui(): core.tk.do(setup) core.call_when_ready(start_ui, ['openflow', 'tk'])", "for others core.registerNew(l2_learning, False) #core.registerNew(UI) def start_ui(): core.tk.do(setup) core.call_when_ready(start_ui, ['openflow',", "when window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame =", "as of from forwarding.l2_learning import * from tkinter import *", "Frame(top, padding=\"3\") frame.grid() disp = Label(frame, text=\"hmm\").grid(column=0, row=0) def reload():", "Toplevel() # quit POX when window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit)", ": ['10.0.0.100']} # register firewall core.registerNew(TestFW, fw_list_dpid[0], srv_list) # just", "text=\"hmm\").grid(column=0, row=0) def reload(): conn = core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload =", "* from tkinter import * from project.firewall import TestFW from", "= Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0,", "import TestFW from project.ui import UI def setup(): top =", "of from forwarding.l2_learning import * from tkinter import * from", "window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame = Frame(top,", "TestFW from project.ui import UI def setup(): top = Toplevel()", "= core.openflow.getConnection(1) disp.configure(str(dir(conn))) b_reload = Button(frame, text=\"reload\", command=reload).grid(column=0, row=1) b_quit", "= [51, 52] srv_list = {\"web\" : ['10.0.0.100']} # register", "from project.firewall import TestFW from project.ui import UI def setup():", "row=2) def launch(): fw_list_dpid = [51, 52] srv_list = {\"web\"", "forwarding.l2_learning import * from tkinter import * from project.firewall import", "top.title(\"firewall thing\") frame = Frame(top, padding=\"3\") frame.grid() disp = Label(frame,", "srv_list) # just use L2 learning switch for others core.registerNew(l2_learning,", "L2 learning switch for others core.registerNew(l2_learning, False) #core.registerNew(UI) def start_ui():", "switch for others core.registerNew(l2_learning, False) #core.registerNew(UI) def start_ui(): core.tk.do(setup) core.call_when_ready(start_ui,", "import * from tkinter import * from project.firewall import TestFW", "top = Toplevel() # quit POX when window is killed", "fw_list_dpid[0], srv_list) # just use L2 learning switch for others", "from forwarding.l2_learning import * from tkinter import * from project.firewall", "use L2 learning switch for others core.registerNew(l2_learning, False) #core.registerNew(UI) def", "tkinter import * from project.firewall import TestFW from project.ui import", "text=\"reload\", command=reload).grid(column=0, row=1) b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def", "POX when window is killed top.protocol(\"WM_DELETE_WINDOW\", core.quit) top.title(\"firewall thing\") frame", "command=reload).grid(column=0, row=1) b_quit = Button(frame, text=\"quit\", command=top.destroy).grid(column=0, row=2) def launch():", "pox.openflow.libopenflow_01 as of from forwarding.l2_learning import * from tkinter import" ]
[ "is part of Mako and is released under # the", "# Copyright (C) 2006-2016 the Mako authors and contributors <see", "module is part of Mako and is released under #", "is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __version__ =", "of Mako and is released under # the MIT License:", "<see AUTHORS file> # # This module is part of", "and contributors <see AUTHORS file> # # This module is", "# # This module is part of Mako and is", "part of Mako and is released under # the MIT", "Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS", "authors and contributors <see AUTHORS file> # # This module", "This module is part of Mako and is released under", "(C) 2006-2016 the Mako authors and contributors <see AUTHORS file>", "and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __version__", "contributors <see AUTHORS file> # # This module is part", "Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php", "# mako/__init__.py # Copyright (C) 2006-2016 the Mako authors and", "released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __version__ = '1.0.9'", "file> # # This module is part of Mako and", "2006-2016 the Mako authors and contributors <see AUTHORS file> #", "# This module is part of Mako and is released", "Mako authors and contributors <see AUTHORS file> # # This", "mako/__init__.py # Copyright (C) 2006-2016 the Mako authors and contributors", "the Mako authors and contributors <see AUTHORS file> # #", "AUTHORS file> # # This module is part of Mako" ]
[ "[] missing_keys_pd = [] zip_map = list(zip(*mapping)) th_keys = list(zip_map[0])", "return paddle_model def main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12',", "config): def _set_value(th_name, pd_name, transpose=True): th_shape = th_params[th_name].shape pd_shape =", "= [] missing_keys_pd = [] zip_map = list(zip(*mapping)) th_keys =", "config) missing_keys_th = [] missing_keys_pd = [] zip_map = list(zip(*mapping))", "2.0 (the \"License\"); # you may not use this file", "and transpose: value = value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1. get", "print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol = 1e-2) #", "paddle import torch import timm from config import get_config from", "= True if key.endswith('.weight'): if key[:-7] in pd_keys: missing =", "model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling',", "= {} th_params = {} for name, param in paddle_model.named_parameters():", "if key[:-7] in pd_keys: missing = False if key.endswith('.bias'): if", "3. set torch param values to paddle params: may needs", "for name, param in model.named_parameters(): print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model):", "list(zip_map[0]) pd_keys = list(zip_map[1]) for key in th_params: missing =", "on weights for th_name, pd_name in mapping: if th_name in", "and f'{pd_name}.weight' in pd_params: th_name_w = f'{th_name}.weight' pd_name_w = f'{pd_name}.weight'", "# 2. get name mapping pairs mapping = torch_to_paddle_mapping(model_name, config)", "(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),", "if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params: th_name_b =", "pp_prefix = f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),", "f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping = [", "transpose: value = value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1. get paddle", "if key not in th_keys: missing = True if key.endswith('.weight'):", "in pd_params: th_name_b = f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b)", "model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all done') if", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "from config import get_config from t2t_vit import build_t2t_vit as build_model", "torch_model.named_parameters(): th_params[name] = param for name, param in paddle_model.named_buffers(): pd_params[name]", "f'{pd_name}._mean' in pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b,", "[ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ] for idx in range(1,", "[ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2',", "import numpy as np import paddle import torch import timm", "th_params and f'{pd_name}.bias' in pd_params: th_name_b = f'{th_name}.bias' pd_name_b =", "<reponame>RangeKing/PaddleViT<filename>image_classification/T2T_ViT/load_pytorch_weights.py # Copyright (c) 2021 PPViT Authors. All Rights Reserved.", "eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000)", "missing = True if key.endswith('.weight'): if key[:-7] in pd_keys: missing", "def torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name) mapping = [ ('cls_token',", "pd_name_b) return paddle_model def main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10',", "missing = False if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:')", "th_params: missing = False if key not in th_keys: missing", "f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ]", "2 and transpose: value = value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1.", "from T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for name, param", "else: _set_value(th_name, pd_name) else: if f'{th_name}.weight' in th_params and f'{pd_name}.weight'", "use this file except in compliance with the License. #", "print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights paddle_model = convert(torch_model, paddle_model, model_name,", "model_path) print(f'{model_name} done') print('all done') if __name__ == \"__main__\": main()", "('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ] for idx in range(1, 3):", "torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights paddle_model", "False if key.endswith('.bias'): if key[:-5] in th_keys: missing = False", "print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol =", "= 384 if '384' in model_name else 224 if 'token_labeling'", "f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in th_params", "# limitations under the License. \"\"\"convert pytorch model weights to", "paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle = paddle_model(x_paddle)", "f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "[ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1',", "if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else: value = th_params[th_name].numpy()", "get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device =", "License. # You may obtain a copy of the License", "f'{pd_name}.bias' in pd_params: th_name_b = f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b,", "th_prefix = f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if '_t_' in model_name:", "under the License is distributed on an \"AS IS\" BASIS,", "param # 2. get name mapping pairs mapping = torch_to_paddle_mapping(model_name,", "License for the specific language governing permissions and # limitations", "th_params and pd_name in pd_params: # nn.Parameters if th_name.endswith('w'): _set_value(th_name,", "range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping = [", "f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2',", "else: if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params: th_name_w", "in model_name else 224 if 'token_labeling' in model_name: config =", "from T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model):", "False if key not in pd_keys: missing = True if", "key.endswith('.bias'): if key[:-5] in pd_keys: missing = False if missing:", "Reserved. # # Licensed under the Apache License, Version 2.0", "pd_params: missing = False if key not in pd_keys: missing", "list(zip_map[1]) for key in th_params: missing = False if key", "idx in range(1, 3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}'", "in torch_model.named_parameters(): th_params[name] = param for name, param in paddle_model.named_buffers():", "paddle pdparams\"\"\" import os import numpy as np import paddle", "'t2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar',", "in th_keys: missing = True if key.endswith('.weight'): if key[:-7] in", "import get_config from t2t_vit import build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit", "if 'token_labeling' in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config =", "model_name, pth_model_path in zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name} =============') sz", "False if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================')", "in compliance with the License. # You may obtain a", "_set_value(th_name, pd_name, transpose=True): th_shape = th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) #", "model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml') paddle_model =", "paddle shape default type is list #assert th_shape == pd_shape,", "software # distributed under the License is distributed on an", "parameters pd_params = {} th_params = {} for name, param", "= [ ('norm', 'norm'), ('head', 'head'), ] mapping.extend(head_mapping) return mapping", "pd_params: th_name_b = f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if", "nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else: _set_value(th_name, pd_name) else:", "* from T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for name,", "'./T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path", "key.endswith('.weight'): if key[:-7] in th_keys: missing = False if key.endswith('.bias'):", "!= {pd_shape}' print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name],", "and pd_name in pd_params: # nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name,", "if key.endswith('.weight'): if key[:-7] in pd_keys: missing = False if", "pd_name in mapping: if th_name in th_params and pd_name in", "import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for name, param in model.named_parameters():", "np import paddle import torch import timm from config import", "for name, param in model.named_buffers(): print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name,", "if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else: _set_value(th_name, pd_name) else: if", "f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model def main():", "paddle model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all", "param in model.named_buffers(): print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config): #", "in th_params: missing = False if key not in th_keys:", "not in th_keys: missing = True if key.endswith('.weight'): if key[:-7]", "_set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:", "if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params: th_name_b =", "out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch,", "{model_name} =============') sz = 384 if '384' in model_name else", "config) # check correctness x = np.random.randn(2, 3, sz, sz).astype('float32')", "key in pd_params: missing = False if key not in", "_set_value(th_name, pd_name) else: if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in", "= get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device", "= torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch =", "print_model_named_buffers(model): print('----------------------------------') for name, param in model.named_buffers(): print(name, param.shape) print('----------------------------------')", "name, param in paddle_model.named_buffers(): pd_params[name] = param for name, param", "= torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle =", "= False if key.endswith('.bias'): if key[:-5] in th_keys: missing =", "print(missing_keys_pd) print('====================================') # 3. set torch param values to paddle", "'cls_token'), ('pos_embed', 'pos_embed'), ] for idx in range(1, 3): th_prefix", "] for idx in range(1, 3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix", "under the License. \"\"\"convert pytorch model weights to paddle pdparams\"\"\"", "torch import timm from config import get_config from t2t_vit import", "= False if missing: missing_keys_th.append(key) for key in pd_params: missing", "= list(zip_map[0]) pd_keys = list(zip_map[1]) for key in th_params: missing", "f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in th_params", "def main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384',", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "in range(1, 3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if", "pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "'384' in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})')", "f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params: th_name_b = f'{th_name}.bias'", "paddle_param_name) mapping = [ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ] for", "range(1, 3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if '_t_'", "if key not in pd_keys: missing = True if key.endswith('.weight'):", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "params: may needs transpose on weights for th_name, pd_name in", "= False if key.endswith('.bias'): if key[:-5] in pd_keys: missing =", "weights for th_name, pd_name in mapping: if th_name in th_params", "shape default type is list #assert th_shape == pd_shape, f'{th_shape}", "to in writing, software # distributed under the License is", "pd_params: th_name_b = f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return", "0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol = 1e-2)", "build_model from T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils import load_for_transfer_learning def", "# See the License for the specific language governing permissions", "torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy()", "list #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET** {th_name}", "= param # 2. get name mapping pairs mapping =", "or agreed to in writing, software # distributed under the", "if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params: th_name_b =", "2. get name mapping pairs mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th", "required by applicable law or agreed to in writing, software", "if key[:-5] in pd_keys: missing = False if missing: missing_keys_pd.append(key)", "in th_keys: missing = False if missing: missing_keys_th.append(key) for key", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# (torch_param_name, paddle_param_name) mapping = [ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'),", "pd_params[pd_name].set_value(value) # 1. get paddle and torch model parameters pd_params", "with the License. # You may obtain a copy of", "] mapping.extend(head_mapping) return mapping def convert(torch_model, paddle_model, model_name, config): def", "param for name, param in torch_model.named_buffers(): th_params[name] = param #", "in pd_keys: missing = False if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:')", "Copyright (c) 2021 PPViT Authors. All Rights Reserved. # #", "pd_keys: missing = True if key.endswith('.weight'): if key[:-7] in pd_keys:", "in pd_keys: missing = False if key.endswith('.bias'): if key[:-5] in", "compliance with the License. # You may obtain a copy", "type is list #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils import", "f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2',", "layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1',", "governing permissions and # limitations under the License. \"\"\"convert pytorch", "values to paddle params: may needs transpose on weights for", "zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name} =============') sz = 384 if", "param in torch_model.named_buffers(): th_params[name] = param # 2. get name", "get paddle and torch model parameters pd_params = {} th_params", "(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping)", "distributed under the License is distributed on an \"AS IS\"", "config import get_config from t2t_vit import build_t2t_vit as build_model from", "f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv',", "key[:-7] in th_keys: missing = False if key.endswith('.bias'): if key[:-5]", "out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0,", "limitations under the License. \"\"\"convert pytorch model weights to paddle", "in pd_params: th_name_w = f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w)", "model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path,", "torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else: value = th_params[th_name].numpy() if len(value.shape)", "pd_params[name] = param for name, param in torch_model.named_parameters(): th_params[name] =", "check correctness x = np.random.randn(2, 3, sz, sz).astype('float32') x_paddle =", "paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all done') if __name__ == \"__main__\":", "as np import paddle import torch import timm from config", "mapping.extend(head_mapping) return mapping def convert(torch_model, paddle_model, model_name, config): def _set_value(th_name,", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "param in paddle_model.named_parameters(): pd_params[name] = param for name, param in", "load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for name, param in model.named_parameters(): print(name,", "= 1e-2) # save weights for paddle model model_path =", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "missing_keys_th.append(key) for key in pd_params: missing = False if key", "sz = 384 if '384' in model_name else 224 if", "not use this file except in compliance with the License.", "paddle_model.named_buffers(): pd_params[name] = param for name, param in torch_model.named_buffers(): th_params[name]", "mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for idx in range(num_layers): th_prefix =", "out_paddle = paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape,", "pp_prefix = f'patch_embed.attn{idx}' if '_t_' in model_name: layer_mapping = [", "mapping pairs mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th = [] missing_keys_pd", "writing, software # distributed under the License is distributed on", "= build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if", "in model.named_buffers(): print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config): # (torch_param_name,", "th_params and f'{pd_name}.weight' in pd_params: th_name_w = f'{th_name}.weight' pd_name_w =", "if key[:-7] in th_keys: missing = False if key.endswith('.bias'): if", "you may not use this file except in compliance with", "f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ]", "'./T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in zip(model_name_list,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "2021 PPViT Authors. All Rights Reserved. # # Licensed under", "# 1. get paddle and torch model parameters pd_params =", "key not in pd_keys: missing = True if key.endswith('.weight'): if", "isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else: value = th_params[th_name].numpy() if", "False if missing: missing_keys_th.append(key) for key in pd_params: missing =", "torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights paddle_model = convert(torch_model,", "for idx in range(1, 3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix =", "param in torch_model.named_parameters(): th_params[name] = param for name, param in", "eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else:", "= f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in th_params and f'{pd_name}._variance'", "(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping =", "CONDITIONS OF ANY KIND, either express or implied. # See", "_set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params:", "= paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape)", "for name, param in paddle_model.named_parameters(): pd_params[name] = param for name,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "pd_name) else: if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:", "eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model = torch_model.to(device) torch_model.eval()", "#assert th_shape == pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET** {th_name} {th_shape}", "default type is list #assert th_shape == pd_shape, f'{th_shape} !=", "in pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b)", "in torch_model.named_buffers(): th_params[name] = param # 2. get name mapping", "f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'),", "len(value.shape) == 2 and transpose: value = value.transpose((1, 0)) pd_params[pd_name].set_value(value)", "'./T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in", "the License. \"\"\"convert pytorch model weights to paddle pdparams\"\"\" import", "'pos_embed'), ] for idx in range(1, 3): th_prefix = f'tokens_to_token.attention{idx}'", "= list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys = list(zip_map[1]) for key", "= f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),", "(torch_param_name, paddle_param_name) mapping = [ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ]", "= torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights paddle_model =", "(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),", "= torch.device('cpu') if 'token_labeling' in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else:", "if key.endswith('.bias'): if key[:-5] in pd_keys: missing = False if", "in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml') paddle_model", "(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else:", "(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),", "correctness x = np.random.randn(2, 3, sz, sz).astype('float32') x_paddle = paddle.to_tensor(x)", "(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping =", "0)) pd_params[pd_name].set_value(value) # 1. get paddle and torch model parameters", "1e-2) # save weights for paddle model model_path = os.path.join(f'./{model_name}.pdparams')", "th_keys = list(zip_map[0]) pd_keys = list(zip_map[1]) for key in th_params:", "= th_params[th_name].numpy() if len(value.shape) == 2 and transpose: value =", "f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for", "'norm'), ('head', 'head'), ] mapping.extend(head_mapping) return mapping def convert(torch_model, paddle_model,", "= f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if '_t_' in model_name: layer_mapping", "key.endswith('.bias'): if key[:-5] in th_keys: missing = False if missing:", "'./T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar']", "'token_labeling' in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml')", "num_classes=1000) torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights", "pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in th_params and", "OR CONDITIONS OF ANY KIND, either express or implied. #", "model weights to paddle pdparams\"\"\" import os import numpy as", "in model.named_parameters(): print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for name,", "build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if 'token_labeling'", "the License is distributed on an \"AS IS\" BASIS, #", "param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name) mapping =", "and torch model parameters pd_params = {} th_params = {}", "{pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else: value =", "print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle,", "import torch import timm from config import get_config from t2t_vit", "th_params and f'{pd_name}._mean' in pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b =", "# save weights for paddle model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(),", "permissions and # limitations under the License. \"\"\"convert pytorch model", "in th_params and f'{pd_name}._mean' in pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b", "= convert(torch_model, paddle_model, model_name, config) # check correctness x =", "pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model)", "pdparams\"\"\" import os import numpy as np import paddle import", "= list(zip_map[1]) for key in th_params: missing = False if", "f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in th_params", "pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in th_params and", "pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if", "print('----------------------------------') for name, param in model.named_parameters(): print(name, param.shape) print('----------------------------------') def", "th_name_b = f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var'", "1. get paddle and torch model parameters pd_params = {}", "name, param in model.named_buffers(): print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config):", "get name mapping pairs mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th =", "to paddle pdparams\"\"\" import os import numpy as np import", "] mapping.extend(layer_mapping) head_mapping = [ ('norm', 'norm'), ('head', 'head'), ]", "law or agreed to in writing, software # distributed under", "f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj'))", "th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else: _set_value(th_name, pd_name) else: if f'{th_name}.weight'", "x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle", "and f'{pd_name}._variance' in pd_params: th_name_b = f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance'", "import build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils", "print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for name, param in", "name mapping pairs mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th = []", "if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') #", "main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19',", "and f'{pd_name}._mean' in pd_params: th_name_b = f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean'", "th_params = {} for name, param in paddle_model.named_parameters(): pd_params[name] =", "if len(value.shape) == 2 and transpose: value = value.transpose((1, 0))", "for key in th_params: missing = False if key not", "= f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in th_params and f'{pd_name}.bias'", "out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100])", "out_paddle, atol = 1e-2) # save weights for paddle model", "pd_name_b) if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params: th_name_b", "'token_labeling' in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in", "paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu')", "th_name_w = f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias'", "== pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET** {th_name} {th_shape} **TO** {pd_name}", "if key[:-5] in th_keys: missing = False if missing: missing_keys_th.append(key)", "= tuple(pd_params[pd_name].shape) # paddle shape default type is list #assert", "print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if 'token_labeling' in model_name:", "and # limitations under the License. \"\"\"convert pytorch model weights", "as build_model from T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils import load_for_transfer_learning", "pth_model_path in zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name} =============') sz =", "torch_to_paddle_mapping(model_name, config) missing_keys_th = [] missing_keys_pd = [] zip_map =", "name, param in paddle_model.named_parameters(): pd_params[name] = param for name, param", "print(f'============= NOW: {model_name} =============') sz = 384 if '384' in", "may obtain a copy of the License at # #", "for th_name, pd_name in mapping: if th_name in th_params and", "f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'),", "th_params[th_name].numpy() if len(value.shape) == 2 and transpose: value = value.transpose((1,", "if missing: missing_keys_th.append(key) for key in pd_params: missing = False", "f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model def main(): paddle.set_device('cpu') model_name_list =", "# 3. set torch param values to paddle params: may", "'./T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in zip(model_name_list, pth_model_path_list): print(f'=============", "(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "(f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for idx", "numpy as np import paddle import torch import timm from", "'t2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list =", "config.MODEL.DEPTH for idx in range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix =", "if key.endswith('.weight'): if key[:-7] in th_keys: missing = False if", "may not use this file except in compliance with the", "param in model.named_parameters(): print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for", "paddle_model, model_name, config): def _set_value(th_name, pd_name, transpose=True): th_shape = th_params[th_name].shape", "if '384' in model_name else 224 if 'token_labeling' in model_name:", "= {} for name, param in paddle_model.named_parameters(): pd_params[name] = param", "value = value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1. get paddle and", "th_params[name] = param for name, param in paddle_model.named_buffers(): pd_params[name] =", "th_name_b = f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean'", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "for model_name, pth_model_path in zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name} =============')", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for idx in", "paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24',", "'t2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar',", "paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0,", "th_params[name] = param # 2. get name mapping pairs mapping", "transpose on weights for th_name, pd_name in mapping: if th_name", "th_params[th_name].data.numpy() else: value = th_params[th_name].numpy() if len(value.shape) == 2 and", "# # Licensed under the Apache License, Version 2.0 (the", "value = th_params[th_name].data.numpy() else: value = th_params[th_name].numpy() if len(value.shape) ==", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "pd_params: th_name_w = f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if", "sz, sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch =", "print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3. set torch", "th_shape == pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET** {th_name} {th_shape} **TO**", "value = th_params[th_name].numpy() if len(value.shape) == 2 and transpose: value", "param in paddle_model.named_buffers(): pd_params[name] = param for name, param in", "= True if key.endswith('.weight'): if key[:-7] in th_keys: missing =", "tuple(pd_params[pd_name].shape) # paddle shape default type is list #assert th_shape", "= param for name, param in torch_model.named_buffers(): th_params[name] = param", "sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch = torch_model(x_torch)", "load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model)", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "config = get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++')", "'t2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24']", "head_mapping = [ ('norm', 'norm'), ('head', 'head'), ] mapping.extend(head_mapping) return", "in pd_params: missing = False if key not in pd_keys:", "= f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in", "(f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'),", "in zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name} =============') sz = 384", "[] zip_map = list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys = list(zip_map[1])", "assert np.allclose(out_torch, out_paddle, atol = 1e-2) # save weights for", "pytorch model weights to paddle pdparams\"\"\" import os import numpy", "'_t_' in model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),", "if th_name in th_params and pd_name in pd_params: # nn.Parameters", "f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in", "else: layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'),", "f'patch_embed.attn{idx}' if '_t_' in model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),", "missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3.", "= paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle =", "= value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1. get paddle and torch", "PPViT Authors. All Rights Reserved. # # Licensed under the", "for name, param in torch_model.named_buffers(): th_params[name] = param # 2.", "save weights for paddle model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path)", "('head', 'head'), ] mapping.extend(head_mapping) return mapping def convert(torch_model, paddle_model, model_name,", "_set_value(th_name_b, pd_name_b) return paddle_model def main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7',", "'./T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for", "model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all done')", "key not in th_keys: missing = True if key.endswith('.weight'): if", "name, param in model.named_parameters(): print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------')", "(f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'),", "pairs mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th = [] missing_keys_pd =", "x_torch = torch.Tensor(x).to(device) out_torch = torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch", "print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if 'token_labeling' in model_name: torch_model", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'./T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in zip(model_name_list, pth_model_path_list): print(f'============= NOW:", "timm from config import get_config from t2t_vit import build_t2t_vit as", "convert(torch_model, paddle_model, model_name, config): def _set_value(th_name, pd_name, transpose=True): th_shape =", "weights to paddle pdparams\"\"\" import os import numpy as np", "= eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})')", "print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3. set torch param", "# nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else: _set_value(th_name, pd_name)", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "key[:-5] in th_keys: missing = False if missing: missing_keys_th.append(key) for", "'./T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in zip(model_name_list, pth_model_path_list):", "for name, param in paddle_model.named_buffers(): pd_params[name] = param for name,", "def convert(torch_model, paddle_model, model_name, config): def _set_value(th_name, pd_name, transpose=True): th_shape", "print('====================================') # 3. set torch param values to paddle params:", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "(f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH", "== 2 and transpose: value = value.transpose((1, 0)) pd_params[pd_name].set_value(value) #", "= [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),", "paddle_model = convert(torch_model, paddle_model, model_name, config) # check correctness x", "f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# check correctness x = np.random.randn(2, 3, sz, sz).astype('float32') x_paddle", "config): # (torch_param_name, paddle_param_name) mapping = [ ('cls_token', 'cls_token'), ('pos_embed',", "missing: missing_keys_th.append(key) for key in pd_params: missing = False if", "384 if '384' in model_name else 224 if 'token_labeling' in", "License. \"\"\"convert pytorch model weights to paddle pdparams\"\"\" import os", "= [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),", "= f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model def", "out_torch = torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle", "in pd_keys: missing = True if key.endswith('.weight'): if key[:-7] in", "f'{pd_name}.weight' in pd_params: th_name_w = f'{th_name}.weight' pd_name_w = f'{pd_name}.weight' _set_value(th_name_w,", "(the \"License\"); # you may not use this file except", "list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys = list(zip_map[1]) for key in", "# you may not use this file except in compliance", "[ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1',", "value.transpose((1, 0)) pd_params[pd_name].set_value(value) # 1. get paddle and torch model", "= f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model def main(): paddle.set_device('cpu') model_name_list", "pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar',", "mapping def convert(torch_model, paddle_model, model_name, config): def _set_value(th_name, pd_name, transpose=True):", "3, sz, sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device) out_torch", "= False if key not in th_keys: missing = True", "'t2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar',", "if 'token_labeling' in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if '384'", "f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping = [ ('norm', 'norm'),", "th_name in th_params and pd_name in pd_params: # nn.Parameters if", "config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config)", "weights for paddle model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name}", "# # Unless required by applicable law or agreed to", "f'{pd_name}._variance' in pd_params: th_name_b = f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b,", "] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for idx in range(num_layers):", "paddle params: may needs transpose on weights for th_name, pd_name", "= f'patch_embed.attn{idx}' if '_t_' in model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv',", "layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2',", "= out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert", "(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping = [ (f'{th_prefix}.w',", "(c) 2021 PPViT Authors. All Rights Reserved. # # Licensed", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar',", "print('----------------------------------') def torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name) mapping = [", "Version 2.0 (the \"License\"); # you may not use this", "= eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model = torch_model.to(device)", "= f'{th_name}.bias' pd_name_b = f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in", "('norm', 'norm'), ('head', 'head'), ] mapping.extend(head_mapping) return mapping def convert(torch_model,", "th_keys: missing = False if missing: missing_keys_th.append(key) for key in", "torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name) mapping = [ ('cls_token', 'cls_token'),", "missing = False if key not in pd_keys: missing =", "key.endswith('.weight'): if key[:-7] in pd_keys: missing = False if key.endswith('.bias'):", "in paddle_model.named_parameters(): pd_params[name] = param for name, param in torch_model.named_parameters():", "f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params: th_name_w = f'{th_name}.weight'", "paddle_model.named_parameters(): pd_params[name] = param for name, param in torch_model.named_parameters(): th_params[name]", "th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is", "= [] zip_map = list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys =", "if '_t_' in model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj',", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "in mapping: if th_name in th_params and pd_name in pd_params:", "(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping = [ ('norm', 'norm'), ('head',", "'./T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name,", "{th_shape} **TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy()", "pd_name_w) if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params: th_name_b", "f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping", "= os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all done') if __name__", "is list #assert th_shape == pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET**", "weights paddle_model = convert(torch_model, paddle_model, model_name, config) # check correctness", "by applicable law or agreed to in writing, software #", "pd_keys: missing = False if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th)", "= False if missing: missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd)", "model parameters pd_params = {} th_params = {} for name,", "(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),", "= param for name, param in torch_model.named_parameters(): th_params[name] = param", "= out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================')", "f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params: th_name_b = f'{th_name}.running_mean'", "else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model", "model_name, config) # check correctness x = np.random.randn(2, 3, sz,", "mapping.extend(layer_mapping) head_mapping = [ ('norm', 'norm'), ('head', 'head'), ] mapping.extend(head_mapping)", "in th_params and f'{pd_name}._variance' in pd_params: th_name_b = f'{th_name}.running_var' pd_name_b", "torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True,", "in paddle_model.named_buffers(): pd_params[name] = param for name, param in torch_model.named_buffers():", "= np.random.randn(2, 3, sz, sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch =", "for key in pd_params: missing = False if key not", "and f'{pd_name}.bias' in pd_params: th_name_b = f'{th_name}.bias' pd_name_b = f'{pd_name}.bias'", "'t2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list", "T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for name, param in", "key[:-5] in pd_keys: missing = False if missing: missing_keys_pd.append(key) print('====================================')", "torch model parameters pd_params = {} th_params = {} for", "print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3. set torch param values to", "f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping = [ ('norm', 'norm'), ('head', 'head'),", "os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done') print('all done') if __name__ ==", "(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping = [ ('norm',", "= f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj',", "'./T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar'] for model_name, pth_model_path in zip(model_name_list, pth_model_path_list): print(f'============= NOW: {model_name}", "0:100]) assert np.allclose(out_torch, out_paddle, atol = 1e-2) # save weights", "out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol", "False if key.endswith('.bias'): if key[:-5] in pd_keys: missing = False", "else: if '384' in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model", "in model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1',", "**TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else:", "transpose=True): th_shape = th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) # paddle shape", "= torch_to_paddle_mapping(model_name, config) missing_keys_th = [] missing_keys_pd = [] zip_map", "= [ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ] for idx in", "out_paddle = out_paddle.cpu().numpy() print(out_torch.shape, out_paddle.shape) print(out_torch[0, 0:100]) print('========================================================') print(out_paddle[0, 0:100])", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "missing_keys_th = [] missing_keys_pd = [] zip_map = list(zip(*mapping)) th_keys", "in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model,", "= th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type", "def print_model_named_params(model): print('----------------------------------') for name, param in model.named_parameters(): print(name, param.shape)", "Unless required by applicable law or agreed to in writing,", "device = torch.device('cpu') if 'token_labeling' in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})')", "'t2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar',", "def _set_value(th_name, pd_name, transpose=True): th_shape = th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape)", "x = np.random.randn(2, 3, sz, sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch", "the specific language governing permissions and # limitations under the", "# paddle shape default type is list #assert th_shape ==", "pd_name, transpose=False) else: _set_value(th_name, pd_name) else: if f'{th_name}.weight' in th_params", "print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for name, param in model.named_buffers(): print(name,", "applicable law or agreed to in writing, software # distributed", "print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name) mapping", "param for name, param in paddle_model.named_buffers(): pd_params[name] = param for", "= ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14',", "paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model) print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if 'token_labeling' in", "model.named_buffers(): print(name, param.shape) print('----------------------------------') def torch_to_paddle_mapping(model_name, config): # (torch_param_name, paddle_param_name)", "f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ]", "in th_params and pd_name in pd_params: # nn.Parameters if th_name.endswith('w'):", "f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params: th_name_b = f'{th_name}.running_var'", "= config.MODEL.DEPTH for idx in range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix", "224 if 'token_labeling' in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else: config", "# convert weights paddle_model = convert(torch_model, paddle_model, model_name, config) #", "f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),", "may needs transpose on weights for th_name, pd_name in mapping:", "= False if key not in pd_keys: missing = True", "pd_params[name] = param for name, param in torch_model.named_buffers(): th_params[name] =", "from t2t_vit import build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit import *", "th_name, pd_name in mapping: if th_name in th_params and pd_name", "in writing, software # distributed under the License is distributed", "layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2',", "pd_keys: missing = False if key.endswith('.bias'): if key[:-5] in pd_keys:", "{th_name} {th_shape} **TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value =", "print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3. set torch param values", "= f'{th_name}.running_mean' pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in", "= get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval()", "missing = False if key.endswith('.bias'): if key[:-5] in th_keys: missing", "import * from T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------') for", "return mapping def convert(torch_model, paddle_model, model_name, config): def _set_value(th_name, pd_name,", "def print_model_named_buffers(model): print('----------------------------------') for name, param in model.named_buffers(): print(name, param.shape)", "in pd_params: th_name_b = f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b)", "num_layers = config.MODEL.DEPTH for idx in range(num_layers): th_prefix = f'blocks.{idx}'", "missing_keys_pd = [] zip_map = list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys", "get_config from t2t_vit import build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit import", "_set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params:", "if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params: th_name_w =", "print_model_named_buffers(torch_model) # convert weights paddle_model = convert(torch_model, paddle_model, model_name, config)", "=============') sz = 384 if '384' in model_name else 224", "3): th_prefix = f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if '_t_' in", "missing = False if missing: missing_keys_th.append(key) for key in pd_params:", "paddle_model def main(): paddle.set_device('cpu') model_name_list = ['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14',", "pd_name_b = f'{pd_name}._mean' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_var' in th_params and", "for name, param in torch_model.named_parameters(): th_params[name] = param for name,", "else: config = get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model) print_model_named_buffers(paddle_model)", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "name, param in torch_model.named_parameters(): th_params[name] = param for name, param", "License, Version 2.0 (the \"License\"); # you may not use", "paddle and torch model parameters pd_params = {} th_params =", "param values to paddle params: may needs transpose on weights", "# You may obtain a copy of the License at", "in range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping =", "missing_keys_pd.append(key) print('====================================') print('missing_keys_pytorch:') print(missing_keys_th) print('missing_keys_paddle:') print(missing_keys_pd) print('====================================') # 3. set", "use_ema=True, strict=False, num_classes=1000) torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) #", "t2t_vit import build_t2t_vit as build_model from T2T_ViT_torch.models.t2t_vit import * from", "mapping = torch_to_paddle_mapping(model_name, config) missing_keys_th = [] missing_keys_pd = []", "missing = False if key not in th_keys: missing =", "for idx in range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix = f'blocks.{idx}'", "key[:-7] in pd_keys: missing = False if key.endswith('.bias'): if key[:-5]", "f'tokens_to_token.attention{idx}' pp_prefix = f'patch_embed.attn{idx}' if '_t_' in model_name: layer_mapping =", "torch.device('cpu') if 'token_labeling' in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "missing = True if key.endswith('.weight'): if key[:-7] in th_keys: missing", "idx in range(num_layers): th_prefix = f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping", "True if key.endswith('.weight'): if key[:-7] in th_keys: missing = False", "{} for name, param in paddle_model.named_parameters(): pd_params[name] = param for", "torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False, num_classes=1000) torch_model =", "Authors. All Rights Reserved. # # Licensed under the Apache", "for paddle model model_path = os.path.join(f'./{model_name}.pdparams') paddle.save(paddle_model.state_dict(), model_path) print(f'{model_name} done')", "os import numpy as np import paddle import torch import", "np.random.randn(2, 3, sz, sz).astype('float32') x_paddle = paddle.to_tensor(x) x_torch = torch.Tensor(x).to(device)", "in th_params and f'{pd_name}.weight' in pd_params: th_name_w = f'{th_name}.weight' pd_name_w", "print('+++++++++++++++++++++++++++++++++++') device = torch.device('cpu') if 'token_labeling' in model_name: torch_model =", "model.named_parameters(): print(name, param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for name, param", "convert(torch_model, paddle_model, model_name, config) # check correctness x = np.random.randn(2,", "in th_keys: missing = False if key.endswith('.bias'): if key[:-5] in", "mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers = config.MODEL.DEPTH for idx in range(num_layers): th_prefix", "'384' in model_name else 224 if 'token_labeling' in model_name: config", "mapping = [ ('cls_token', 'cls_token'), ('pos_embed', 'pos_embed'), ] for idx", "# Copyright (c) 2021 PPViT Authors. All Rights Reserved. #", "the License for the specific language governing permissions and #", "else: value = th_params[th_name].numpy() if len(value.shape) == 2 and transpose:", "pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model def main(): paddle.set_device('cpu')", "Apache License, Version 2.0 (the \"License\"); # you may not", "('pos_embed', 'pos_embed'), ] for idx in range(1, 3): th_prefix =", "'t2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar',", "= th_params[th_name].data.numpy() else: value = th_params[th_name].numpy() if len(value.shape) == 2", "in th_params and f'{pd_name}.bias' in pd_params: th_name_b = f'{th_name}.bias' pd_name_b", "convert weights paddle_model = convert(torch_model, paddle_model, model_name, config) # check", "either express or implied. # See the License for the", "print(out_paddle[0, 0:100]) assert np.allclose(out_torch, out_paddle, atol = 1e-2) # save", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "f'{pd_name}.weight' _set_value(th_name_w, pd_name_w) if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in", "pd_name, transpose=True): th_shape = th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) # paddle", "= param for name, param in paddle_model.named_buffers(): pd_params[name] = param", "print('----------------------------------') for name, param in model.named_buffers(): print(name, param.shape) print('----------------------------------') def", "strict=False, num_classes=1000) torch_model = torch_model.to(device) torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert", "import paddle import torch import timm from config import get_config", "else 224 if 'token_labeling' in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml') else:", "language governing permissions and # limitations under the License. \"\"\"convert", "np.allclose(out_torch, out_paddle, atol = 1e-2) # save weights for paddle", "key in th_params: missing = False if key not in", "in pd_params: # nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else:", "paddle_model, model_name, config) # check correctness x = np.random.randn(2, 3,", "print_model_named_params(model): print('----------------------------------') for name, param in model.named_parameters(): print(name, param.shape) print('----------------------------------')", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0',", "'t2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19', 't2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar',", "pd_params = {} th_params = {} for name, param in", "False if key not in th_keys: missing = True if", "f'{th_shape} != {pd_shape}' print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') if", "pth_model_path_list): print(f'============= NOW: {model_name} =============') sz = 384 if '384'", "torch param values to paddle params: may needs transpose on", "T2T_ViT_torch.models.t2t_vit import * from T2T_ViT_torch.utils import load_for_transfer_learning def print_model_named_params(model): print('----------------------------------')", "model_name, config): def _set_value(th_name, pd_name, transpose=True): th_shape = th_params[th_name].shape pd_shape", "f'{pp_prefix}.norm1'), (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2',", "get_config(f'./configs/{model_name[:-15]}.yaml') else: config = get_config(f'./configs/{model_name}.yaml') paddle_model = build_model(config) paddle_model.eval() print_model_named_params(paddle_model)", "[ ('norm', 'norm'), ('head', 'head'), ] mapping.extend(head_mapping) return mapping def", "{} th_params = {} for name, param in paddle_model.named_parameters(): pd_params[name]", "missing = False if key.endswith('.bias'): if key[:-5] in pd_keys: missing", "if '384' in model_name: torch_model = eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model =", "True if key.endswith('.weight'): if key[:-7] in pd_keys: missing = False", "in model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in model_name:", "['t2t_vit_7', 't2t_vit_10', 't2t_vit_12', 't2t_vit_14', 't2t_vit_14_384', 't2t_vit_19', 't2t_vit_24', 't2t_vit_24_token_labeling', 't2t_vit_t_14', 't2t_vit_t_19',", "pd_params: # nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False) else: _set_value(th_name,", "model_name: torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in model_name: torch_model", "f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping", "import timm from config import get_config from t2t_vit import build_t2t_vit", "model_name else 224 if 'token_labeling' in model_name: config = get_config(f'./configs/{model_name[:-15]}.yaml')", "pd_name_b) if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params: th_name_b", "th_shape = th_params[th_name].shape pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default", "\"License\"); # you may not use this file except in", "name, param in torch_model.named_buffers(): th_params[name] = param # 2. get", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "if key.endswith('.bias'): if key[:-5] in th_keys: missing = False if", "\"\"\"convert pytorch model weights to paddle pdparams\"\"\" import os import", "{pd_shape}' print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter):", "torch_model.named_buffers(): th_params[name] = param # 2. get name mapping pairs", "torch_model = eval(f'{model_name[:-15]}(img_size={sz})') else: if '384' in model_name: torch_model =", "# distributed under the License is distributed on an \"AS", "param for name, param in torch_model.named_parameters(): th_params[name] = param for", "# Unless required by applicable law or agreed to in", "NOW: {model_name} =============') sz = 384 if '384' in model_name", "set torch param values to paddle params: may needs transpose", "th_params and f'{pd_name}._variance' in pd_params: th_name_b = f'{th_name}.running_var' pd_name_b =", "(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] else: layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv',", "zip_map = list(zip(*mapping)) th_keys = list(zip_map[0]) pd_keys = list(zip_map[1]) for", "{pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value = th_params[th_name].data.numpy() else: value", "(f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj', f'{pp_prefix}.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),", "th_keys: missing = False if key.endswith('.bias'): if key[:-5] in th_keys:", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "] else: layer_mapping = [ (f'{th_prefix}.w', f'{pp_prefix}.w'), (f'{th_prefix}.kqv', f'{pp_prefix}.kqv'), (f'{th_prefix}.proj',", "= eval(f'{model_name[:-4]}(img_size={sz})') else: torch_model = eval(f'{model_name}(img_size={sz})') load_for_transfer_learning(torch_model, pth_model_path, use_ema=True, strict=False,", "= f'{pd_name}.bias' _set_value(th_name_b, pd_name_b) if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean'", "param.shape) print('----------------------------------') def print_model_named_buffers(model): print('----------------------------------') for name, param in model.named_buffers():", "pd_keys = list(zip_map[1]) for key in th_params: missing = False", "You may obtain a copy of the License at #", "torch_model.eval() print_model_named_params(torch_model) print_model_named_buffers(torch_model) # convert weights paddle_model = convert(torch_model, paddle_model,", "'head'), ] mapping.extend(head_mapping) return mapping def convert(torch_model, paddle_model, model_name, config):", "f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'), (f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'), ] mapping.extend(layer_mapping) head_mapping = [", "(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'), (f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'), (f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'), ] mapping.extend(layer_mapping) mapping.append(('tokens_to_token.project','patch_embed.proj')) num_layers", "= ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar',", "th_keys: missing = True if key.endswith('.weight'): if key[:-7] in th_keys:", "not in pd_keys: missing = True if key.endswith('.weight'): if key[:-7]", "th_name_b = f'{th_name}.running_var' pd_name_b = f'{pd_name}._variance' _set_value(th_name_b, pd_name_b) return paddle_model", "pd_shape, f'{th_shape} != {pd_shape}' print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')", "mapping: if th_name in th_params and pd_name in pd_params: #", "needs transpose on weights for th_name, pd_name in mapping: if", "pd_name in pd_params: # nn.Parameters if th_name.endswith('w'): _set_value(th_name, pd_name, transpose=False)", "to paddle params: may needs transpose on weights for th_name,", "'t2t_vit_t_24'] pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar', './T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar',", "model_name: layer_mapping = [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),", "print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}') if isinstance(th_params[th_name], torch.nn.parameter.Parameter): value", "the Apache License, Version 2.0 (the \"License\"); # you may", "_set_value(th_name, pd_name, transpose=False) else: _set_value(th_name, pd_name) else: if f'{th_name}.weight' in", "transpose=False) else: _set_value(th_name, pd_name) else: if f'{th_name}.weight' in th_params and", "import os import numpy as np import paddle import torch", "atol = 1e-2) # save weights for paddle model model_path", "torch_model(x_torch) out_paddle = paddle_model(x_paddle) out_torch = out_torch.data.cpu().numpy() out_paddle = out_paddle.cpu().numpy()", "= [ (f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'), (f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'), (f'{th_prefix}.norm1', f'{pp_prefix}.norm1'), (f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),", "th_prefix = f'blocks.{idx}' pp_prefix = f'blocks.{idx}' layer_mapping = [ (f'{th_prefix}.norm1'," ]
[ "point_inseparability_to_pointID from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo", "<reponame>j-bac/id-concentration from ._FisherS import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators", "preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo", "twonn_py from ._ESS import essLocalDimEst as ess_py from ._mada import", "import essLocalDimEst as ess_py from ._mada import mada as mada_py", "import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst as", "TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst as danco_py", "as danco_py from ._TwoNN import twonn as twonn_py from ._ESS", "ess_py from ._mada import mada as mada_py from ._corint import", "._ESS import essLocalDimEst as ess_py from ._mada import mada as", "from ._DANCo import dancoDimEst as danco_py from ._TwoNN import twonn", "from ._TwoNN import twonn as twonn_py from ._ESS import essLocalDimEst", "randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats,", "essLocalDimEst as ess_py from ._mada import mada as mada_py from", "run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst as danco_py from", "from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import", "._TwoNN import twonn as twonn_py from ._ESS import essLocalDimEst as", "from ._ESS import essLocalDimEst as ess_py from ._mada import mada", "._FisherS import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import TwoNN,", "import twonn as twonn_py from ._ESS import essLocalDimEst as ess_py", "runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst as danco_py from ._TwoNN import", "as ess_py from ._mada import mada as mada_py from ._corint", "import dancoDimEst as danco_py from ._TwoNN import twonn as twonn_py", "._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst", "as twonn_py from ._ESS import essLocalDimEst as ess_py from ._mada", "dancoDimEst as danco_py from ._TwoNN import twonn as twonn_py from", "twonn as twonn_py from ._ESS import essLocalDimEst as ess_py from", "runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from ._DANCo import dancoDimEst as danco_py from ._TwoNN", "import mada as mada_py from ._corint import corint as corint_py", "._DANCo import dancoDimEst as danco_py from ._TwoNN import twonn as", "SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo from", "._mada import mada as mada_py from ._corint import corint as", "from ._mada import mada as mada_py from ._corint import corint", "from ._FisherS import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import", "danco_py from ._TwoNN import twonn as twonn_py from ._ESS import", "import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo," ]
[ "'密码') cookies = lgn.cookies # cookies获取方法 person = GetInfo(base_url=base_url, cookies=cookies)", "zfnew import GetInfo, Login base_url = '学校教务系统的主页url' lgn = Login(base_url=base_url)", "lgn = Login(base_url=base_url) lgn.login('账号', '密码') cookies = lgn.cookies # cookies获取方法", "lgn.cookies # cookies获取方法 person = GetInfo(base_url=base_url, cookies=cookies) message = person.get_message()", "import GetInfo, Login base_url = '学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号',", "获取调课、改课通知例子 from zfnew import GetInfo, Login base_url = '学校教务系统的主页url' lgn", "= '学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号', '密码') cookies = lgn.cookies", "# 获取调课、改课通知例子 from zfnew import GetInfo, Login base_url = '学校教务系统的主页url'", "Login(base_url=base_url) lgn.login('账号', '密码') cookies = lgn.cookies # cookies获取方法 person =", "Login base_url = '学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号', '密码') cookies", "'学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号', '密码') cookies = lgn.cookies #", "= Login(base_url=base_url) lgn.login('账号', '密码') cookies = lgn.cookies # cookies获取方法 person", "# cookies获取方法 person = GetInfo(base_url=base_url, cookies=cookies) message = person.get_message() print(message)", "= lgn.cookies # cookies获取方法 person = GetInfo(base_url=base_url, cookies=cookies) message =", "cookies = lgn.cookies # cookies获取方法 person = GetInfo(base_url=base_url, cookies=cookies) message", "GetInfo, Login base_url = '学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号', '密码')", "base_url = '学校教务系统的主页url' lgn = Login(base_url=base_url) lgn.login('账号', '密码') cookies =", "lgn.login('账号', '密码') cookies = lgn.cookies # cookies获取方法 person = GetInfo(base_url=base_url,", "from zfnew import GetInfo, Login base_url = '学校教务系统的主页url' lgn =" ]
[ "fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for _ in range(nLinhas): file.write(f\"{random.randint(0,", "= random.randint(2, 10) camposFuncs = [ fake.name, fake.date, fake.ssn, fake.ascii_email,", "\"__main__\": fake = Faker(\"pt_BR\") path = \"python/\" try: nLinhas =", "file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ == \"__main__\": fake = Faker(\"pt_BR\") path", "= int(sys.argv[1]) nCampos = int(sys.argv[2]) except: nLinhas = 1000 nCampos", "Faker(\"pt_BR\") path = \"python/\" try: nLinhas = int(sys.argv[1]) nCampos =", "range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\")", "import sys import random from faker import Faker def gera(nLinhas=100,", "import random from faker import Faker def gera(nLinhas=100, nCampos=None): with", "path = \"python/\" try: nLinhas = int(sys.argv[1]) nCampos = int(sys.argv[2])", "from faker import Faker def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\",", "][:nCampos] for _ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao in", "camposFuncs = [ fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate,", "not nCampos: nCampos = random.randint(2, 10) camposFuncs = [ fake.name,", "= [ fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate,", "for _ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao in camposFuncs[:-1]:", "nCampos = random.randint(2, 10) camposFuncs = [ fake.name, fake.date, fake.ssn,", "random.randint(2, 10) camposFuncs = [ fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job,", "funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ == \"__main__\":", "file.write(f\"{random.randint(0, 999999)},\") for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if", "camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ == \"__main__\": fake =", "file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ == \"__main__\": fake = Faker(\"pt_BR\")", "def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file: if", "nCampos: nCampos = random.randint(2, 10) camposFuncs = [ fake.name, fake.date,", "in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ == \"__main__\": fake", "gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file: if not", "fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for _", "int(sys.argv[1]) nCampos = int(sys.argv[2]) except: nLinhas = 1000 nCampos =", "file: if not nCampos: nCampos = random.randint(2, 10) camposFuncs =", "import Faker def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as", "faker import Faker def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\")", "sys import random from faker import Faker def gera(nLinhas=100, nCampos=None):", "open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file: if not nCampos: nCampos =", "fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for _ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\")", "fake.credit_card_expire, ][:nCampos] for _ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao", "if not nCampos: nCampos = random.randint(2, 10) camposFuncs = [", "fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for _ in range(nLinhas):", "fake.license_plate, fake.credit_card_expire, ][:nCampos] for _ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for", "file.write(\"\\n\") if __name__ == \"__main__\": fake = Faker(\"pt_BR\") path =", "= \"python/\" try: nLinhas = int(sys.argv[1]) nCampos = int(sys.argv[2]) except:", "nCampos = int(sys.argv[2]) except: nLinhas = 1000 nCampos = 10", "for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__ ==", "__name__ == \"__main__\": fake = Faker(\"pt_BR\") path = \"python/\" try:", "nLinhas = int(sys.argv[1]) nCampos = int(sys.argv[2]) except: nLinhas = 1000", "10) camposFuncs = [ fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number,", "Faker def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file:", "[ fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire,", "\"w+\", encoding=\"utf8\") as file: if not nCampos: nCampos = random.randint(2,", "with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file: if not nCampos: nCampos", "fake = Faker(\"pt_BR\") path = \"python/\" try: nLinhas = int(sys.argv[1])", "\"python/\" try: nLinhas = int(sys.argv[1]) nCampos = int(sys.argv[2]) except: nLinhas", "= Faker(\"pt_BR\") path = \"python/\" try: nLinhas = int(sys.argv[1]) nCampos", "in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]())", "999999)},\") for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\") file.write(camposFuncs[-1]()) file.write(\"\\n\") if __name__", "nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\", \"w+\", encoding=\"utf8\") as file: if not nCampos:", "try: nLinhas = int(sys.argv[1]) nCampos = int(sys.argv[2]) except: nLinhas =", "random from faker import Faker def gera(nLinhas=100, nCampos=None): with open(f\"{path}/file{nLinhas}-{nCampos}_python.txt\",", "if __name__ == \"__main__\": fake = Faker(\"pt_BR\") path = \"python/\"", "= int(sys.argv[2]) except: nLinhas = 1000 nCampos = 10 gera(nLinhas,", "as file: if not nCampos: nCampos = random.randint(2, 10) camposFuncs", "int(sys.argv[2]) except: nLinhas = 1000 nCampos = 10 gera(nLinhas, nCampos)", "fake.name, fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos]", "fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for _ in", "_ in range(nLinhas): file.write(f\"{random.randint(0, 999999)},\") for funcao in camposFuncs[:-1]: file.write(f\"{funcao()},\")", "== \"__main__\": fake = Faker(\"pt_BR\") path = \"python/\" try: nLinhas", "fake.date, fake.ssn, fake.ascii_email, fake.job, fake.phone_number, fake.coordinate, fake.license_plate, fake.credit_card_expire, ][:nCampos] for", "encoding=\"utf8\") as file: if not nCampos: nCampos = random.randint(2, 10)" ]
[ "for i in range(1, n+1): print(f'Введите {i} строку: ', end='')", "s, t, fi, si, ti = sort(data, sorted(time_list)) time_list =", "and tt and values[0] not in winers_name: three_id = key", "key winers_name.add(values[0]) is_find = False three_i = index break return", "1 for key, values in data.items(): if time[0 - index]", "True winers_name.add(values[0]) second_i = index elif time[0 -index] == int(values[1])", "= [name, time] data[i] = tuple(obj) f, s, t, fi,", "not in winers_name: second_id = key st = False tt", "in winers_name: second_id = key st = False tt =", "not in winers_name: three_id = key winers_name.add(values[0]) is_find = False", "index elif time[0 -index] == int(values[1]) and tt and values[0]", "')) data = dict() time_list = list() for i in", "i in range(1, n+1): print(f'Введите {i} строку: ', end='') text", "dict() time_list = list() for i in range(1, n+1): print(f'Введите", "sort(data, sorted(time_list)) time_list = sorted(time_list) print('1 место занимает: {0}, с", "{1}'.format(data[s][0], time_list[-si])) print('3 место занимает: {0}, с очками {1}'.format(data[t][0], time_list[-ti]))", "first_id, second_id, three_id, first_i, second_i, three_i n = int(input('Введите количество", "= text[0] time_list.append(int(time)) name = text[1] obj = [name, time]", "= tuple(obj) f, s, t, fi, si, ti = sort(data,", "int(values[1]) and tt and values[0] not in winers_name: three_id =", "three_id, first_i, second_i, three_i n = int(input('Введите количество строк: '))", "values in data.items(): if time[0 - index] == int(values[1]) and", "{1}'.format(data[f][0], time_list[-fi])) print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si]))", "True winers_name.add(values[0]) first_i = index elif time[0 -index] == int(values[1])", "def sort(data, time): tt = False ft = True st", "index += 1 for key, values in data.items(): if time[0", "= False is_find = True winers_name = set() index =", "text = input().split() time = text[0] time_list.append(int(time)) name = text[1]", "name = text[1] obj = [name, time] data[i] = tuple(obj)", "- index] == int(values[1]) and ft and values[0] not in", "time[0 -index] == int(values[1]) and st and values[0] not in", "== int(values[1]) and st and values[0] not in winers_name: second_id", "tuple(obj) f, s, t, fi, si, ti = sort(data, sorted(time_list))", "sort(data, time): tt = False ft = True st =", "-index] == int(values[1]) and tt and values[0] not in winers_name:", "fi, si, ti = sort(data, sorted(time_list)) time_list = sorted(time_list) print('1", "second_id = key st = False tt = True winers_name.add(values[0])", "not in winers_name: first_id = key ft = False st", "and st and values[0] not in winers_name: second_id = key", "second_id, three_id, first_i, second_i, three_i n = int(input('Введите количество строк:", "for key, values in data.items(): if time[0 - index] ==", "строк: ')) data = dict() time_list = list() for i", "if time[0 - index] == int(values[1]) and ft and values[0]", "time_list.append(int(time)) name = text[1] obj = [name, time] data[i] =", "data[i] = tuple(obj) f, s, t, fi, si, ti =", "False tt = True winers_name.add(values[0]) second_i = index elif time[0", "is_find = False three_i = index break return first_id, second_id,", "tt = True winers_name.add(values[0]) second_i = index elif time[0 -index]", "False three_i = index break return first_id, second_id, three_id, first_i,", "text[0] time_list.append(int(time)) name = text[1] obj = [name, time] data[i]", "= True winers_name = set() index = 0 while is_find:", "first_i = index elif time[0 -index] == int(values[1]) and st", "= False three_i = index break return first_id, second_id, three_id,", "строку: ', end='') text = input().split() time = text[0] time_list.append(int(time))", "= dict() time_list = list() for i in range(1, n+1):", "= False ft = True st = False is_find =", "= text[1] obj = [name, time] data[i] = tuple(obj) f,", "print(f'Введите {i} строку: ', end='') text = input().split() time =", "index elif time[0 -index] == int(values[1]) and st and values[0]", "winers_name.add(values[0]) is_find = False three_i = index break return first_id,", "= True winers_name.add(values[0]) first_i = index elif time[0 -index] ==", "+= 1 for key, values in data.items(): if time[0 -", "end='') text = input().split() time = text[0] time_list.append(int(time)) name =", "three_id = key winers_name.add(values[0]) is_find = False three_i = index", "winers_name.add(values[0]) first_i = index elif time[0 -index] == int(values[1]) and", "0 while is_find: index += 1 for key, values in", "= set() index = 0 while is_find: index += 1", "key ft = False st = True winers_name.add(values[0]) first_i =", "text[1] obj = [name, time] data[i] = tuple(obj) f, s,", "st = True winers_name.add(values[0]) first_i = index elif time[0 -index]", "<reponame>zainllw0w/skillbox def sort(data, time): tt = False ft = True", "= index elif time[0 -index] == int(values[1]) and st and", "index break return first_id, second_id, three_id, first_i, second_i, three_i n", "= 0 while is_find: index += 1 for key, values", "= key st = False tt = True winers_name.add(values[0]) second_i", "index] == int(values[1]) and ft and values[0] not in winers_name:", "-index] == int(values[1]) and st and values[0] not in winers_name:", "{0}, с очками {1}'.format(data[s][0], time_list[-si])) print('3 место занимает: {0}, с", "set() index = 0 while is_find: index += 1 for", "= index break return first_id, second_id, three_id, first_i, second_i, three_i", "{0}, с очками {1}'.format(data[f][0], time_list[-fi])) print('2 место занимает: {0}, с", "and values[0] not in winers_name: second_id = key st =", "False is_find = True winers_name = set() index = 0", "time[0 -index] == int(values[1]) and tt and values[0] not in", "= False tt = True winers_name.add(values[0]) second_i = index elif", "in winers_name: first_id = key ft = False st =", "tt = False ft = True st = False is_find", "time = text[0] time_list.append(int(time)) name = text[1] obj = [name,", "time] data[i] = tuple(obj) f, s, t, fi, si, ti", "t, fi, si, ti = sort(data, sorted(time_list)) time_list = sorted(time_list)", "three_i n = int(input('Введите количество строк: ')) data = dict()", "list() for i in range(1, n+1): print(f'Введите {i} строку: ',", "sorted(time_list) print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi])) print('2", "st = False is_find = True winers_name = set() index", "return first_id, second_id, three_id, first_i, second_i, three_i n = int(input('Введите", "sorted(time_list)) time_list = sorted(time_list) print('1 место занимает: {0}, с очками", "int(values[1]) and st and values[0] not in winers_name: second_id =", "= True st = False is_find = True winers_name =", "three_i = index break return first_id, second_id, three_id, first_i, second_i,", "False ft = True st = False is_find = True", "n = int(input('Введите количество строк: ')) data = dict() time_list", "место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si])) print('3 место занимает:", "очками {1}'.format(data[f][0], time_list[-fi])) print('2 место занимает: {0}, с очками {1}'.format(data[s][0],", "and ft and values[0] not in winers_name: first_id = key", "= False st = True winers_name.add(values[0]) first_i = index elif", "= list() for i in range(1, n+1): print(f'Введите {i} строку:", "range(1, n+1): print(f'Введите {i} строку: ', end='') text = input().split()", "ft and values[0] not in winers_name: first_id = key ft", "time_list = list() for i in range(1, n+1): print(f'Введите {i}", "si, ti = sort(data, sorted(time_list)) time_list = sorted(time_list) print('1 место", "elif time[0 -index] == int(values[1]) and tt and values[0] not", "False st = True winers_name.add(values[0]) first_i = index elif time[0", "key, values in data.items(): if time[0 - index] == int(values[1])", "winers_name.add(values[0]) second_i = index elif time[0 -index] == int(values[1]) and", "', end='') text = input().split() time = text[0] time_list.append(int(time)) name", "занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si])) print('3 место занимает: {0},", "first_i, second_i, three_i n = int(input('Введите количество строк: ')) data", "is_find = True winers_name = set() index = 0 while", "time_list[-fi])) print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si])) print('3", "== int(values[1]) and tt and values[0] not in winers_name: three_id", "ti = sort(data, sorted(time_list)) time_list = sorted(time_list) print('1 место занимает:", "is_find: index += 1 for key, values in data.items(): if", "== int(values[1]) and ft and values[0] not in winers_name: first_id", "= int(input('Введите количество строк: ')) data = dict() time_list =", "n+1): print(f'Введите {i} строку: ', end='') text = input().split() time", "True winers_name = set() index = 0 while is_find: index", "winers_name: first_id = key ft = False st = True", "obj = [name, time] data[i] = tuple(obj) f, s, t,", "= input().split() time = text[0] time_list.append(int(time)) name = text[1] obj", "data.items(): if time[0 - index] == int(values[1]) and ft and", "and values[0] not in winers_name: first_id = key ft =", "time[0 - index] == int(values[1]) and ft and values[0] not", "first_id = key ft = False st = True winers_name.add(values[0])", "= key ft = False st = True winers_name.add(values[0]) first_i", "с очками {1}'.format(data[s][0], time_list[-si])) print('3 место занимает: {0}, с очками", "очками {1}'.format(data[s][0], time_list[-si])) print('3 место занимает: {0}, с очками {1}'.format(data[t][0],", "[name, time] data[i] = tuple(obj) f, s, t, fi, si,", "while is_find: index += 1 for key, values in data.items():", "in winers_name: three_id = key winers_name.add(values[0]) is_find = False three_i", "True st = False is_find = True winers_name = set()", "int(values[1]) and ft and values[0] not in winers_name: first_id =", "in range(1, n+1): print(f'Введите {i} строку: ', end='') text =", "st and values[0] not in winers_name: second_id = key st", "second_i = index elif time[0 -index] == int(values[1]) and tt", "st = False tt = True winers_name.add(values[0]) second_i = index", "input().split() time = text[0] time_list.append(int(time)) name = text[1] obj =", "место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi])) print('2 место занимает:", "= True winers_name.add(values[0]) second_i = index elif time[0 -index] ==", "key st = False tt = True winers_name.add(values[0]) second_i =", "winers_name: three_id = key winers_name.add(values[0]) is_find = False three_i =", "= sorted(time_list) print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi]))", "and values[0] not in winers_name: three_id = key winers_name.add(values[0]) is_find", "elif time[0 -index] == int(values[1]) and st and values[0] not", "= index elif time[0 -index] == int(values[1]) and tt and", "с очками {1}'.format(data[f][0], time_list[-fi])) print('2 место занимает: {0}, с очками", "values[0] not in winers_name: three_id = key winers_name.add(values[0]) is_find =", "int(input('Введите количество строк: ')) data = dict() time_list = list()", "time_list = sorted(time_list) print('1 место занимает: {0}, с очками {1}'.format(data[f][0],", "winers_name = set() index = 0 while is_find: index +=", "break return first_id, second_id, three_id, first_i, second_i, three_i n =", "ft = False st = True winers_name.add(values[0]) first_i = index", "winers_name: second_id = key st = False tt = True", "print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi])) print('2 место", "f, s, t, fi, si, ti = sort(data, sorted(time_list)) time_list", "tt and values[0] not in winers_name: three_id = key winers_name.add(values[0])", "in data.items(): if time[0 - index] == int(values[1]) and ft", "= key winers_name.add(values[0]) is_find = False three_i = index break", "ft = True st = False is_find = True winers_name", "time): tt = False ft = True st = False", "количество строк: ')) data = dict() time_list = list() for", "values[0] not in winers_name: first_id = key ft = False", "занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi])) print('2 место занимает: {0},", "values[0] not in winers_name: second_id = key st = False", "data = dict() time_list = list() for i in range(1,", "index = 0 while is_find: index += 1 for key,", "second_i, three_i n = int(input('Введите количество строк: ')) data =", "{i} строку: ', end='') text = input().split() time = text[0]", "= sort(data, sorted(time_list)) time_list = sorted(time_list) print('1 место занимает: {0},", "print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si])) print('3 место" ]
[ "options[\"tap_if\"] = None options[\"node_ip\"] = None options[\"ipv4_gateway\"] = None options[\"dns\"]", "2.0 (the \"License\"); # you may not use this file", "and # limitations under the License. # # # @file", "= opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"]", "= opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self,", "now \"\"\" cmd = \"sudo \" cmd += self.getWeaveInetLayerDNSPath() node_ip", "run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet %s", "does not exist.\" % (self.node_id) self.__log_error_and_exit(emsg) # check if prefix", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "None options[\"use_lwip\"] = False def option(): return options.copy() class WeaveInetDNS(HappyNode,", "the node, %s\" % (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd +=", "# @file # Implements WeaveInet class that tests Weave Inet", "\" --tap-device \" + self.tap_if + \" -a \" +", "Check if virtual node exists if not self._nodeExists(): emsg =", "self.node_process_tag, True) results = self.__gather_results() result, output = self.__process_results(results) data", "output) def __start_node_dnscheck(self): \"\"\" lwip and socket use different command", "Check if the name of the new node is given", "+ \" -a \" + node_ip + \" --ipv4-gateway \"", "self.node_id: emsg = \"Missing name of the virtual node that", "\"Could not find IP address of the node, %s\" %", "def __process_results(self, results): \"\"\" process results from gather_results() \"\"\" status", "use this file except in compliance with the License. #", "# Q: what are the parameters need to specify? options", "(results) def __process_results(self, results): \"\"\" process results from gather_results() \"\"\"", "reserved. # # Licensed under the Apache License, Version 2.0", "\"WeaveInet %s should be running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\"", "=opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\"", "= \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error))", "should be running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id,", "\" + self.tap_if + \" -a \" + node_ip +", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "def __pre_check(self): # Check if the name of the new", "License. # You may obtain a copy of the License", "class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self, opts = options): HappyNode.__init__(self)", "under the License is distributed on an \"AS IS\" BASIS,", "emsg = \"Could not find IP address of the node,", "License for the specific language governing permissions and # limitations", "= \"Could not find IP address of the node, %s\"", "self.__log_error_and_exit(emsg) if self.use_lwip: cmd += \" --tap-device \" + self.tap_if", "self.node_id)[0] if node_ip == None: emsg = \"Could not find", "node, description): if not self._nodeExists(node): emsg = \"The %s '%s'", "return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self, opts =", "__init__(self, opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"]", "\\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag,", "options[\"ipv4_gateway\"] = None options[\"dns\"] = None options[\"use_lwip\"] = False def", "== None: emsg = \"prefix is None, Please specifiy a", "data[\"node_output\"] = node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost] WeaveInetDNSTest: Done.\") return", "prefix if self.prefix == None: emsg = \"prefix is None,", "opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix = opts[\"prefix\"]", "results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results) def __process_results(self, results):", "not self._nodeExists(node): emsg = \"The %s '%s' does not exist", "new node is given if not self.node_id: emsg = \"Missing", "\" + self.ipv4_gateway + \\ \" --dns-server \" + self.dns", "options[\"use_lwip\"] = False def option(): return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork,", "not exist in the test topology.\" % (description, node) self.__log_error_and_exit(emsg)", "in compliance with the License. # You may obtain a", "node) self.__log_error_and_exit(emsg) def __pre_check(self): # Check if the name of", "software # distributed under the License is distributed on an", "output = results['output'] return (status, output) def __start_node_dnscheck(self): \"\"\" lwip", "= False output = \"\" status = (results['status'] == 0)", "of the node, %s\" % (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd", "not self._nodeExists(): emsg = \"virtual node %s does not exist.\"", "== None: emsg = \"Could not find IP address of", "False options[\"node_id\"] = None options[\"tap_if\"] = None options[\"node_ip\"] = None", "-a \" + node_ip + \" --ipv4-gateway \" + self.ipv4_gateway", "= \"prefix is None, Please specifiy a valid prefix.\" self.__log_error_and_exit(emsg)", "self.get_test_strace(self.node_id, self.node_process_tag, True) results = self.__gather_results() result, output = self.__process_results(results)", "None, Please specifiy a valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\"", "the new node is given if not self.node_id: emsg =", "if not self._nodeExists(): emsg = \"virtual node %s does not", "process results from gather_results() \"\"\" status = False output =", "\" + node_ip + \" --ipv4-gateway \" + self.ipv4_gateway +", "%s does not exist.\" % (self.node_id) self.__log_error_and_exit(emsg) # check if", "Inet Layer among Weave Nodes. # import os import sys", "from happy.Utils import * from happy.HappyNode import HappyNode from happy.HappyNetwork", "% (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id, emsg)) self.__stop_node() node_output_value,", "opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id", "+ self.ipv4_gateway + \\ \" --dns-server \" + self.dns print", "self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather result from get_test_output() \"\"\" quiet", "\"prefix is None, Please specifiy a valid prefix.\" self.__log_error_and_exit(emsg) def", "def option(): return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self,", "= True results = {} results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag,", "\"\"\" quiet = True results = {} results['status'], results['output'] =", "{} data[\"node_output\"] = node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost] WeaveInetDNSTest: Done.\")", "options[\"quiet\"] = False options[\"node_id\"] = None options[\"tap_if\"] = None options[\"node_ip\"]", "Layer among Weave Nodes. # import os import sys import", "%s\" % (error)) sys.exit(1) def __checkNodeExists(self, node, description): if not", "== 0) output = results['output'] return (status, output) def __start_node_dnscheck(self):", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "find IP address of the node, %s\" % (self.node_id) self.__log_error_and_exit(emsg)", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "% (error)) sys.exit(1) def __checkNodeExists(self, node, description): if not self._nodeExists(node):", "ReturnMsg from happy.Utils import * from happy.HappyNode import HappyNode from", "self.ipv4_gateway + \\ \" --dns-server \" + self.dns print \"dns", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "= self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip == None: emsg = \"Could", "# See the License for the specific language governing permissions", "to specify? options = {} options[\"quiet\"] = False options[\"node_id\"] =", "= options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id =", "self.node_process_tag, quiet) return (results) def __process_results(self, results): \"\"\" process results", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "options[\"node_id\"] = None options[\"tap_if\"] = None options[\"node_ip\"] = None options[\"ipv4_gateway\"]", "required by applicable law or agreed to in writing, software", "option(): return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self, opts", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "self.__log_error_and_exit(emsg) # Check if virtual node exists if not self._nodeExists():", "node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip == None: emsg =", "with the License. # You may obtain a copy of", "limitations under the License. # # # @file # Implements", "(self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data", "different command for now \"\"\" cmd = \"sudo \" cmd", "description): if not self._nodeExists(node): emsg = \"The %s '%s' does", "= \"Missing name of the virtual node that should start", "\"virtual node %s does not exist.\" % (self.node_id) self.__log_error_and_exit(emsg) #", "command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id,", "os import sys import time from happy.ReturnMsg import ReturnMsg from", "= \"\" status = (results['status'] == 0) output = results['output']", "tests Weave Inet Layer among Weave Nodes. # import os", "from WeaveTest import WeaveTest # Q: what are the parameters", "compliance with the License. # You may obtain a copy", "self._nodeExists(): emsg = \"virtual node %s does not exist.\" %", "agreed to in writing, software # distributed under the License", "Nodes. # import os import sys import time from happy.ReturnMsg", "None: emsg = \"Could not find IP address of the", "= \\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id,", "results['output'] return (status, output) def __start_node_dnscheck(self): \"\"\" lwip and socket", "distributed under the License is distributed on an \"AS IS\"", "# check if prefix if self.prefix == None: emsg =", "sys.exit(1) def __checkNodeExists(self, node, description): if not self._nodeExists(node): emsg =", "of the virtual node that should start shell.\" self.__log_error_and_exit(emsg) #", "emsg = \"Missing name of the virtual node that should", "False def option(): return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def", "node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag, True) results = self.__gather_results() result,", "express or implied. # See the License for the specific", "of the new node is given if not self.node_id: emsg", "except in compliance with the License. # You may obtain", "permissions and # limitations under the License. # # #", "(results['status'] == 0) output = results['output'] return (status, output) def", "node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost] WeaveInetDNSTest: Done.\") return ReturnMsg(result, data)", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "def __gather_results(self): \"\"\" gather result from get_test_output() \"\"\" quiet =", "exists if not self._nodeExists(): emsg = \"virtual node %s does", "print \"dns check command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str)", "writing, software # distributed under the License is distributed on", "happy.Utils import * from happy.HappyNode import HappyNode from happy.HappyNetwork import", "% (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd += \" --tap-device \"", "you may not use this file except in compliance with", "Q: what are the parameters need to specify? options =", "parameters need to specify? options = {} options[\"quiet\"] = False", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if =", "Nest Labs, Inc. # All rights reserved. # # Licensed", "--dns-server \" + self.dns print \"dns check command : {}\".format(cmd)", "+ self.tap_if + \" -a \" + node_ip + \"", "happy.ReturnMsg import ReturnMsg from happy.Utils import * from happy.HappyNode import", "self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS:", "CONDITIONS OF ANY KIND, either express or implied. # See", "\"dns check command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def", "if not self._nodeExists(node): emsg = \"The %s '%s' does not", "WeaveInet class that tests Weave Inet Layer among Weave Nodes.", "(self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd += \" --tap-device \" +", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "+= \" --tap-device \" + self.tap_if + \" -a \"", "True) results = self.__gather_results() result, output = self.__process_results(results) data =", "def __init__(self, opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet =", "that tests Weave Inet Layer among Weave Nodes. # import", "opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns", "HappyNode from happy.HappyNetwork import HappyNetwork from WeaveTest import WeaveTest #", "HappyNetwork from WeaveTest import WeaveTest # Q: what are the", "@file # Implements WeaveInet class that tests Weave Inet Layer", "WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet %s should be", "import ReturnMsg from happy.Utils import * from happy.HappyNode import HappyNode", "self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip =", "cmd += self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip ==", "\" --dns-server \" + self.dns print \"dns check command :", "node_ip + \" --ipv4-gateway \" + self.ipv4_gateway + \\ \"", "not self.node_id: emsg = \"Missing name of the virtual node", "self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" %", "= node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost] WeaveInetDNSTest: Done.\") return ReturnMsg(result,", "be running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id, emsg))", "def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check()", "self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data =", "Weave Nodes. # import os import sys import time from", "OR CONDITIONS OF ANY KIND, either express or implied. #", "from gather_results() \"\"\" status = False output = \"\" status", "\" + self.dns print \"dns check command : {}\".format(cmd) self.start_weave_process(self.node_id,", "= self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results) def __process_results(self, results): \"\"\"", ": {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag)", "= None options[\"node_ip\"] = None options[\"ipv4_gateway\"] = None options[\"dns\"] =", "the License is distributed on an \"AS IS\" BASIS, #", "class that tests Weave Inet Layer among Weave Nodes. #", "gather result from get_test_output() \"\"\" quiet = True results =", "the name of the new node is given if not", "node_ip == None: emsg = \"Could not find IP address", "'%s' does not exist in the test topology.\" % (description,", "\"\"\" cmd = \"sudo \" cmd += self.getWeaveInetLayerDNSPath() node_ip =", "law or agreed to in writing, software # distributed under", "does not exist in the test topology.\" % (description, node)", "True) node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag, True) results =", "\"sudo \" cmd += self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if", "and socket use different command for now \"\"\" cmd =", "lwip and socket use different command for now \"\"\" cmd", "# # Copyright (c) 2016-2017 Nest Labs, Inc. # All", "self.dns print \"dns check command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag,", "python # # Copyright (c) 2016-2017 Nest Labs, Inc. #", "= self.__process_results(results) data = {} data[\"node_output\"] = node_output_data data[\"node_strace\"] =", "__gather_results(self): \"\"\" gather result from get_test_output() \"\"\" quiet = True", "node %s does not exist.\" % (self.node_id) self.__log_error_and_exit(emsg) # check", "is None, Please specifiy a valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self):", "self.node_process_tag, True) node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag, True) results", "if node_ip == None: emsg = \"Could not find IP", "specify? options = {} options[\"quiet\"] = False options[\"node_id\"] = None", "given if not self.node_id: emsg = \"Missing name of the", "\" --ipv4-gateway \" + self.ipv4_gateway + \\ \" --dns-server \"", "= None options[\"tap_if\"] = None options[\"node_ip\"] = None options[\"ipv4_gateway\"] =", "self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error)) sys.exit(1) def __checkNodeExists(self, node, description):", "may obtain a copy of the License at # #", "= opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"]", "cmd = \"sudo \" cmd += self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix,", "self.use_lwip = opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost]", "Inc. # All rights reserved. # # Licensed under the", "# All rights reserved. # # Licensed under the Apache", "if not self.node_id: emsg = \"Missing name of the virtual", "the parameters need to specify? options = {} options[\"quiet\"] =", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# limitations under the License. # # # @file #", "__pre_check(self): # Check if the name of the new node", "is given if not self.node_id: emsg = \"Missing name of", "may not use this file except in compliance with the", "import HappyNetwork from WeaveTest import WeaveTest # Q: what are", "(self.node_id) self.__log_error_and_exit(emsg) # check if prefix if self.prefix == None:", "quiet = True results = {} results['status'], results['output'] = self.get_test_output(self.node_id,", "self.__process_results(results) data = {} data[\"node_output\"] = node_output_data data[\"node_strace\"] = node_strace_data", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "status = (results['status'] == 0) output = results['output'] return (status,", "# import os import sys import time from happy.ReturnMsg import", "return (results) def __process_results(self, results): \"\"\" process results from gather_results()", "check if prefix if self.prefix == None: emsg = \"prefix", "import WeaveTest # Q: what are the parameters need to", "Labs, Inc. # All rights reserved. # # Licensed under", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.dns = opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def", "# # @file # Implements WeaveInet class that tests Weave", "if prefix if self.prefix == None: emsg = \"prefix is", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "emsg = \"WeaveInet %s should be running.\" % (self.node_process_tag) self.logger.debug(\"[%s]", "{} results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results) def", "= {} results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results)", "import * from happy.HappyNode import HappyNode from happy.HappyNetwork import HappyNetwork", "+ node_ip + \" --ipv4-gateway \" + self.ipv4_gateway + \\", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "= None options[\"ipv4_gateway\"] = None options[\"dns\"] = None options[\"use_lwip\"] =", "None options[\"ipv4_gateway\"] = None options[\"dns\"] = None options[\"use_lwip\"] = False", "are the parameters need to specify? options = {} options[\"quiet\"]", "\"\" status = (results['status'] == 0) output = results['output'] return", "+ \" --ipv4-gateway \" + self.ipv4_gateway + \\ \" --dns-server", "rights reserved. # # Licensed under the Apache License, Version", "self.prefix == None: emsg = \"prefix is None, Please specifiy", "happy.HappyNode import HappyNode from happy.HappyNetwork import HappyNetwork from WeaveTest import", "--ipv4-gateway \" + self.ipv4_gateway + \\ \" --dns-server \" +", "error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error)) sys.exit(1) def __checkNodeExists(self, node,", "self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip == None: emsg", "under the License. # # # @file # Implements WeaveInet", "HappyNetwork, WeaveTest): def __init__(self, opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self)", "prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather result from get_test_output() \"\"\"", "Weave Inet Layer among Weave Nodes. # import os import", "not exist.\" % (self.node_id) self.__log_error_and_exit(emsg) # check if prefix if", "+ \\ \" --dns-server \" + self.dns print \"dns check", "WeaveInet: %s\" % (self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data = \\", "= False options[\"node_id\"] = None options[\"tap_if\"] = None options[\"node_ip\"] =", "emsg = \"The %s '%s' does not exist in the", "opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\"", "exist.\" % (self.node_id) self.__log_error_and_exit(emsg) # check if prefix if self.prefix", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "address of the node, %s\" % (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip:", "* from happy.HappyNode import HappyNode from happy.HappyNetwork import HappyNetwork from", "+= self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip == None:", "= self.__gather_results() result, output = self.__process_results(results) data = {} data[\"node_output\"]", "%s\" % (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd += \" --tap-device", "WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self, opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self)", "def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet", "or implied. # See the License for the specific language", "= \"sudo \" cmd += self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0]", "self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag, True)", "self.quiet = opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix", "node exists if not self._nodeExists(): emsg = \"virtual node %s", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error)) sys.exit(1) def", "virtual node exists if not self._nodeExists(): emsg = \"virtual node", "self.__gather_results() result, output = self.__process_results(results) data = {} data[\"node_output\"] =", "(self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag, True)", "= None options[\"use_lwip\"] = False def option(): return options.copy() class", "test topology.\" % (description, node) self.__log_error_and_exit(emsg) def __pre_check(self): # Check", "# Check if virtual node exists if not self._nodeExists(): emsg", "{}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "start shell.\" self.__log_error_and_exit(emsg) # Check if virtual node exists if", "%s should be running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" %", "from happy.HappyNode import HappyNode from happy.HappyNetwork import HappyNetwork from WeaveTest", "virtual node that should start shell.\" self.__log_error_and_exit(emsg) # Check if", "+ self.dns print \"dns check command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd,", "check command : {}\".format(cmd) self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self):", "data = {} data[\"node_output\"] = node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost]", "{} options[\"quiet\"] = False options[\"node_id\"] = None options[\"tap_if\"] = None", "from get_test_output() \"\"\" quiet = True results = {} results['status'],", "in the test topology.\" % (description, node) self.__log_error_and_exit(emsg) def __pre_check(self):", "self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results) def __process_results(self, results): \"\"\" process", "opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip", "\"Missing name of the virtual node that should start shell.\"", "__stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck()", "(the \"License\"); # you may not use this file except", "shell.\" self.__log_error_and_exit(emsg) # Check if virtual node exists if not", "node that should start shell.\" self.__log_error_and_exit(emsg) # Check if virtual", "# you may not use this file except in compliance", "happy.HappyNetwork import HappyNetwork from WeaveTest import WeaveTest # Q: what", "opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag", "# Copyright (c) 2016-2017 Nest Labs, Inc. # All rights", "\"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error)) sys.exit(1)", "node_output_value, node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data =", "name of the virtual node that should start shell.\" self.__log_error_and_exit(emsg)", "a valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather result from", "None options[\"tap_if\"] = None options[\"node_ip\"] = None options[\"ipv4_gateway\"] = None", "# # Unless required by applicable law or agreed to", "time from happy.ReturnMsg import ReturnMsg from happy.Utils import * from", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "the virtual node that should start shell.\" self.__log_error_and_exit(emsg) # Check", "= opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"]", "Version 2.0 (the \"License\"); # you may not use this", "self.__log_error_and_exit(emsg) def __pre_check(self): # Check if the name of the", "False output = \"\" status = (results['status'] == 0) output", "\" cmd += self.getWeaveInetLayerDNSPath() node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip", "for now \"\"\" cmd = \"sudo \" cmd += self.getWeaveInetLayerDNSPath()", "# Implements WeaveInet class that tests Weave Inet Layer among", "\"\"\" status = False output = \"\" status = (results['status']", "topology.\" % (description, node) self.__log_error_and_exit(emsg) def __pre_check(self): # Check if", "\"\"\" lwip and socket use different command for now \"\"\"", "Copyright (c) 2016-2017 Nest Labs, Inc. # All rights reserved.", "WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if = opts[\"tap_if\"]", "command for now \"\"\" cmd = \"sudo \" cmd +=", "implied. # See the License for the specific language governing", "__process_results(self, results): \"\"\" process results from gather_results() \"\"\" status =", "that should start shell.\" self.__log_error_and_exit(emsg) # Check if virtual node", "what are the parameters need to specify? options = {}", "import time from happy.ReturnMsg import ReturnMsg from happy.Utils import *", "Implements WeaveInet class that tests Weave Inet Layer among Weave", "under the Apache License, Version 2.0 (the \"License\"); # you", "%s\" % (self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data = \\ self.get_test_output(self.node_id,", "output = \"\" status = (results['status'] == 0) output =", "2016-2017 Nest Labs, Inc. # All rights reserved. # #", "% (self.node_id) self.__log_error_and_exit(emsg) # check if prefix if self.prefix ==", "quiet) return (results) def __process_results(self, results): \"\"\" process results from", "All rights reserved. # # Licensed under the Apache License,", "%s '%s' does not exist in the test topology.\" %", "by applicable law or agreed to in writing, software #", "emsg = \"virtual node %s does not exist.\" % (self.node_id)", "node, %s\" % (self.node_id) self.__log_error_and_exit(emsg) if self.use_lwip: cmd += \"", "import os import sys import time from happy.ReturnMsg import ReturnMsg", "= \\ self.get_test_strace(self.node_id, self.node_process_tag, True) results = self.__gather_results() result, output", "results from gather_results() \"\"\" status = False output = \"\"", "= opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS:", "#!/usr/bin/env python # # Copyright (c) 2016-2017 Nest Labs, Inc.", "(c) 2016-2017 Nest Labs, Inc. # All rights reserved. #", "IP address of the node, %s\" % (self.node_id) self.__log_error_and_exit(emsg) if", "= opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix =", "= (results['status'] == 0) output = results['output'] return (status, output)", "sys import time from happy.ReturnMsg import ReturnMsg from happy.Utils import", "among Weave Nodes. # import os import sys import time", "the License. # # # @file # Implements WeaveInet class", "results = self.__gather_results() result, output = self.__process_results(results) data = {}", "(description, node) self.__log_error_and_exit(emsg) def __pre_check(self): # Check if the name", "self.__start_node_dnscheck() emsg = \"WeaveInet %s should be running.\" % (self.node_process_tag)", "node_strace_value, node_strace_data = \\ self.get_test_strace(self.node_id, self.node_process_tag, True) results = self.__gather_results()", "% (description, node) self.__log_error_and_exit(emsg) def __pre_check(self): # Check if the", "output = self.__process_results(results) data = {} data[\"node_output\"] = node_output_data data[\"node_strace\"]", "Please specifiy a valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather", "= \"virtual node %s does not exist.\" % (self.node_id) self.__log_error_and_exit(emsg)", "not find IP address of the node, %s\" % (self.node_id)", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "from happy.HappyNetwork import HappyNetwork from WeaveTest import WeaveTest # Q:", "Unless required by applicable law or agreed to in writing,", "options = {} options[\"quiet\"] = False options[\"node_id\"] = None options[\"tap_if\"]", "True results = {} results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet)", "self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet %s should be running.\" %", "self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet %s should", "the specific language governing permissions and # limitations under the", "exist in the test topology.\" % (description, node) self.__log_error_and_exit(emsg) def", "applicable law or agreed to in writing, software # distributed", "= results['output'] return (status, output) def __start_node_dnscheck(self): \"\"\" lwip and", "node is given if not self.node_id: emsg = \"Missing name", "self.use_lwip: cmd += \" --tap-device \" + self.tap_if + \"", "gather_results() \"\"\" status = False output = \"\" status =", "the test topology.\" % (description, node) self.__log_error_and_exit(emsg) def __pre_check(self): #", "in writing, software # distributed under the License is distributed", "governing permissions and # limitations under the License. # #", "\"\"\" gather result from get_test_output() \"\"\" quiet = True results", "= None options[\"dns\"] = None options[\"use_lwip\"] = False def option():", "options[\"dns\"] = None options[\"use_lwip\"] = False def option(): return options.copy()", "License. # # # @file # Implements WeaveInet class that", "self._nodeExists(node): emsg = \"The %s '%s' does not exist in", "results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet) return (results) def __process_results(self,", "(status, output) def __start_node_dnscheck(self): \"\"\" lwip and socket use different", "WeaveInetDNS: %s\" % (error)) sys.exit(1) def __checkNodeExists(self, node, description): if", "self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg =", "self.__log_error_and_exit(emsg) # check if prefix if self.prefix == None: emsg", "def __start_node_dnscheck(self): \"\"\" lwip and socket use different command for", "if self.prefix == None: emsg = \"prefix is None, Please", "cmd += \" --tap-device \" + self.tap_if + \" -a", "import sys import time from happy.ReturnMsg import ReturnMsg from happy.Utils", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "if self.use_lwip: cmd += \" --tap-device \" + self.tap_if +", "# You may obtain a copy of the License at", "None: emsg = \"prefix is None, Please specifiy a valid", "status = False output = \"\" status = (results['status'] ==", "# Check if the name of the new node is", "if the name of the new node is given if", "get_test_output() \"\"\" quiet = True results = {} results['status'], results['output']", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "socket use different command for now \"\"\" cmd = \"sudo", "= {} options[\"quiet\"] = False options[\"node_id\"] = None options[\"tap_if\"] =", "from happy.ReturnMsg import ReturnMsg from happy.Utils import * from happy.HappyNode", "__start_node_dnscheck(self): \"\"\" lwip and socket use different command for now", "% (self.node_id, emsg)) self.__stop_node() node_output_value, node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag,", "if virtual node exists if not self._nodeExists(): emsg = \"virtual", "specifiy a valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather result", "emsg)) self.__stop_node() node_output_value, node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value,", "node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data = \\", "result from get_test_output() \"\"\" quiet = True results = {}", "need to specify? options = {} options[\"quiet\"] = False options[\"node_id\"]", "return (status, output) def __start_node_dnscheck(self): \"\"\" lwip and socket use", "__checkNodeExists(self, node, description): if not self._nodeExists(node): emsg = \"The %s", "the License for the specific language governing permissions and #", "self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0] if node_ip == None: emsg = \"Could not", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "__log_error_and_exit(self, error): self.logger.error(\"[localhost] WeaveInetDNS: %s\" % (error)) sys.exit(1) def __checkNodeExists(self,", "\"\"\" process results from gather_results() \"\"\" status = False output", "running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet: %s\" % (self.node_id, emsg)) self.__stop_node()", "= \"WeaveInet %s should be running.\" % (self.node_process_tag) self.logger.debug(\"[%s] WeaveInet:", "cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost]", "results): \"\"\" process results from gather_results() \"\"\" status = False", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "\\ \" --dns-server \" + self.dns print \"dns check command", "Run.\") self.__pre_check() self.__start_node_dnscheck() emsg = \"WeaveInet %s should be running.\"", "(error)) sys.exit(1) def __checkNodeExists(self, node, description): if not self._nodeExists(node): emsg", "sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\")", "None options[\"node_ip\"] = None options[\"ipv4_gateway\"] = None options[\"dns\"] = None", "self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns = opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag =", "import HappyNode from happy.HappyNetwork import HappyNetwork from WeaveTest import WeaveTest", "\"The %s '%s' does not exist in the test topology.\"", "options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id = opts[\"node_id\"]", "--tap-device \" + self.tap_if + \" -a \" + node_ip", "WeaveTest import WeaveTest # Q: what are the parameters need", "None options[\"dns\"] = None options[\"use_lwip\"] = False def option(): return", "self.stop_weave_process(self.node_id, self.node_process_tag) def run(self): self.logger.debug(\"[localhost] WeaveInetDNS: Run.\") self.__pre_check() self.__start_node_dnscheck() emsg", "should start shell.\" self.__log_error_and_exit(emsg) # Check if virtual node exists", "options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest): def __init__(self, opts = options):", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "name of the new node is given if not self.node_id:", "\" -a \" + node_ip + \" --ipv4-gateway \" +", "result, output = self.__process_results(results) data = {} data[\"node_output\"] = node_output_data", "= False def option(): return options.copy() class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest):", "HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet = opts[\"quiet\"] self.node_id = opts[\"node_id\"] self.tap_if", "self.__stop_node() node_output_value, node_output_data = \\ self.get_test_output(self.node_id, self.node_process_tag, True) node_strace_value, node_strace_data", "def __checkNodeExists(self, node, description): if not self._nodeExists(node): emsg = \"The", "emsg = \"prefix is None, Please specifiy a valid prefix.\"", "\"License\"); # you may not use this file except in", "self.node_id = opts[\"node_id\"] self.tap_if = opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway", "self.tap_if + \" -a \" + node_ip + \" --ipv4-gateway", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# # # @file # Implements WeaveInet class that tests", "= \"The %s '%s' does not exist in the test", "results = {} results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet) return", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "self.tap_if = opts[\"tap_if\"] self.prefix = opts[\"prefix\"] self.ipv4_gateway =opts[\"ipv4_gateway\"] self.dns =", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "use different command for now \"\"\" cmd = \"sudo \"", "0) output = results['output'] return (status, output) def __start_node_dnscheck(self): \"\"\"", "You may obtain a copy of the License at #", "WeaveTest # Q: what are the parameters need to specify?", "options[\"node_ip\"] = None options[\"ipv4_gateway\"] = None options[\"dns\"] = None options[\"use_lwip\"]", "self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str) def __stop_node(self): self.stop_weave_process(self.node_id, self.node_process_tag) def run(self):", "\\ self.get_test_strace(self.node_id, self.node_process_tag, True) results = self.__gather_results() result, output =", "valid prefix.\" self.__log_error_and_exit(emsg) def __gather_results(self): \"\"\" gather result from get_test_output()", "the Apache License, Version 2.0 (the \"License\"); # you may", "WeaveTest): def __init__(self, opts = options): HappyNode.__init__(self) HappyNetwork.__init__(self) WeaveTest.__init__(self) self.quiet", "opts[\"dns\"] self.use_lwip = opts[\"use_lwip\"] self.node_process_tag = \"WEAVE-INET-NODE\" def __log_error_and_exit(self, error):", "= {} data[\"node_output\"] = node_output_data data[\"node_strace\"] = node_strace_data self.logger.debug(\"[localhost] WeaveInetDNSTest:" ]
[ "0 self.quadrante = 0 self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\") def", "elif 180 < vquad < 270: self.quadrante = 3 elif", "self.quadrante = 4 if vquad == 0 or vquad ==", "* self.raio * -1) t.left(90) t.forward(self.seno * self.raio * -1)", "self.raio = 0 self.grau = 0 self.seno = 0 self.cosseno", "print (self.tangente) elif self.quadrante == 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) +", "os angulos de resultados indefinidos self.quadrante = 0 def sen(self):", "= 0 self.tangente = 0 self.quadrante = 0 self.tema =", "vquad == 0 or vquad == 90 or vquad ==", "== 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau", "self.reset() # EIXO Y t.left(90) t.penup() t.backward(self.raio + 50) t.pendown()", "self.tangente = math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE DO ANGULO vquad", "* self.raio) t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante", "t.circle(self.raio) self.reset() def eixos(self): # EIXO X t.penup() t.backward(self.raio +", "or vquad == 270 or vquad == 360: # Quadrante", "self.raio * -1) t.left(90) t.forward(self.cosseno * self.raio) print (self.cosseno) else:", "< vquad < 270: self.quadrante = 3 elif 270 <", "t.pendown() t.forward(pixels%10) def reset(self): # RETORNA PRA POSICAO INICIAL t.penup()", "O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante == 1: t.left(180", "TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau))", "angulos de resultados indefinidos self.quadrante = 0 def sen(self): #", "t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante == 1: t.left(180 - self.grau)", "t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self, raio): # DESENHA O", "2: t.right(self.grau) self.linha((self.cosseno * self.raio) * -1) t.right(90) t.forward(self.seno *", "* -1) t.right(90) t.forward(self.cosseno * self.raio * -1) print (self.cosseno)", "(self.seno) elif self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno * self.raio) *", "0 self.seno = 0 self.cosseno = 0 self.tangente = 0", "t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def reset(self): # RETORNA PRA", "vquad = self.grau if 0 < vquad < 90: self.quadrante", "def sen(self): # DESENHA O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if", "DESENHA UMA LINHA PONTILHADA pixels = int(pxls//1) if pixels %", "270: self.quadrante = 3 elif 270 < vquad < 360:", "self.quadrante == 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)", "elif self.quadrante == 2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2))", "self.linha(self.cosseno * self.raio * -1) t.left(90) t.forward(self.seno * self.raio *", "elif self.quadrante == 4: t.left(180 - self.grau) self.linha(self.cosseno * self.raio)", "# DESENHA UMA SETA t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10)", "(self.cosseno) elif self.quadrante == 3: t.right(self.grau - 90) self.linha(self.seno *", "self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno * self.raio * -1) t.left(90)", "resultados indefinidos self.quadrante = 0 def sen(self): # DESENHA O", "== 3: t.right(self.grau) self.linha(self.cosseno * self.raio * -1) t.left(90) t.forward(self.seno", "- self.raio) t.left(90 - self.grau) t.forward(self.tangente * self.raio) print (self.tangente)", "self.quadrante = 2 elif 180 < vquad < 270: self.quadrante", "in range(0, pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def", "elif self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno * self.raio) * -1)", "DESENHA UMA SETA t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120)", "elif 90 < vquad < 180: self.quadrante = 2 elif", "90 < vquad < 180: self.quadrante = 2 elif 180", "math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) # DEFINE O", "print (self.cosseno) else: print(\"Erro: angulo invalido\") self.reset() def tan(self): #", "self.raio) * -1) t.right(90) t.forward(self.seno * self.raio) print (self.seno) elif", "t.forward(self.seno * self.raio * -1) print (self.seno) elif self.quadrante ==", "t.penup() t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self, raio): #", "tan(self): # DESENHA A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante", "0 self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): # DESENHA", "< vquad < 360: self.quadrante = 4 if vquad ==", "= pixels + 1 for x in range(0, pixels//10): t.pendown()", "EIXO X t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset()", "== 2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)", "== 1: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno", "if self.quadrante == 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) -", "* self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante", "t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90) def linha(self, pxls):", "self.linha(self.seno * self.raio * -1) t.right(90) t.forward(self.cosseno * self.raio *", "self.raio * -1) print (self.seno) elif self.quadrante == 4: t.left(180", "if self.quadrante == 1: t.left(180 - self.grau) self.linha(self.cosseno * self.raio)", "(self.tangente) elif self.quadrante == 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2))", "self.raio) t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante ==", "90: self.quadrante = 1 elif 90 < vquad < 180:", "math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE DO ANGULO", "vquad < 180: self.quadrante = 2 elif 180 < vquad", "t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante == 2:", "VALOR DO SENO, COSSENO E TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno", "t.left(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante == 2:", "self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(90 + self.grau) t.forward(self.tangente *", "t.penup() t.pencolor(\"blue\") if self.quadrante == 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) +", "360 t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE O VALOR DO SENO,", "QUADRANTE DO ANGULO vquad = self.grau if 0 < vquad", "t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau + 90) t.forward(self.tangente", "0 self.grau = 0 self.seno = 0 self.cosseno = 0", "self.quadrante = 3 elif 270 < vquad < 360: self.quadrante", "3: t.right(self.grau - 90) self.linha(self.seno * self.raio * -1) t.right(90)", "self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): # DESENHA UMA", "t.forward(5) t.pendown() t.forward(pixels%10) def reset(self): # RETORNA PRA POSICAO INICIAL", "- self.raio) t.right(90 + self.grau) t.forward(self.tangente * self.raio) print (self.tangente)", "(self.seno) elif self.quadrante == 4: t.left(180 - self.grau) self.linha(self.cosseno *", "(self.seno) else: print(\"Erro: angulo invalido\") self.reset() def csen(self): # DESENHA", "50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self, grau): # DESENHA", "vquad < 360: self.quadrante = 4 if vquad == 0", "+ (self.raio**2)) - self.raio) t.right(self.grau + 90) t.forward(self.tangente * self.raio)", "self.linha(self.seno * self.raio * -1) t.left(90) t.forward(self.cosseno * self.raio) print", "O VALOR DO SENO, COSSENO E TANGENTE. self.seno = math.sin(math.radians(self.grau))", "invalido\") self.reset() def csen(self): # DESENHA O COSSENO t.left(self.grau) t.forward(self.raio)", "SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante == 1: t.left(180 -", "< 180: self.quadrante = 2 elif 180 < vquad <", "representa os angulos de resultados indefinidos self.quadrante = 0 def", "= 3 elif 270 < vquad < 360: self.quadrante =", "angulo invalido\") self.reset() def tan(self): # DESENHA A TANGENTE t.left(self.grau)", "t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset() def eixos(self): # EIXO X", "self.linha((self.cosseno * self.raio) * -1) t.right(90) t.forward(self.seno * self.raio) print", "self.raio * -1) print (self.cosseno) elif self.quadrante == 4: t.right(self.grau", "int(pxls//1) if pixels % 2 == 0: pixels = pixels", "* -1) print (self.cosseno) elif self.quadrante == 4: t.right(self.grau -", "-1) t.left(90) t.forward(self.cosseno * self.raio) print (self.cosseno) else: print(\"Erro: angulo", "def __init__(self): self.raio = 0 self.grau = 0 self.seno =", "(self.cosseno) elif self.quadrante == 2: t.right(self.grau + 90) self.linha(self.seno *", "def eixos(self): # EIXO X t.penup() t.backward(self.raio + 50) t.pendown()", "== 0: pixels = pixels + 1 for x in", "* self.raio * -1) print (self.cosseno) elif self.quadrante == 4:", "4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(90 +", "* self.raio) print (self.tangente) elif self.quadrante == 3: t.left(180) t.forward(self.raio)", "print (self.seno) elif self.quadrante == 4: t.left(180 - self.grau) self.linha(self.cosseno", "self.quadrante == 1: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90)", "if pixels % 2 == 0: pixels = pixels +", "grau % 360 t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE O VALOR", "= int(pxls//1) if pixels % 2 == 0: pixels =", "+ (self.raio**2)) - self.raio) t.right(90 + self.grau) t.forward(self.tangente * self.raio)", "self.reset() def csen(self): # DESENHA O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\")", "self.seta() self.reset() def angulo(self, grau): # DESENHA O ANGULO self.grau", "self.reset() def angulo(self, grau): # DESENHA O ANGULO self.grau =", "(self.cosseno) else: print(\"Erro: angulo invalido\") self.reset() def tan(self): # DESENHA", "math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE DO ANGULO vquad = self.grau", "180 < vquad < 270: self.quadrante = 3 elif 270", "t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante == 2: t.right(self.grau", "__init__(self): self.raio = 0 self.grau = 0 self.seno = 0", "self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau + 90) t.forward(self.tangente *", "= grau % 360 t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE O", "self.seta() self.reset() # EIXO Y t.left(90) t.penup() t.backward(self.raio + 50)", "+ 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO Y t.left(90)", "* self.raio * -1) t.right(90) t.forward(self.cosseno * self.raio * -1)", "self.tangente = 0 self.quadrante = 0 self.tema = '' t.bgcolor(\"black\")", "DO ANGULO vquad = self.grau if 0 < vquad <", "t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau + 90)", "= math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE DO", "* -1) t.right(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante", "- self.raio) t.right(self.grau + 90) t.forward(self.tangente * self.raio) print (self.tangente)", "t.right(self.grau) self.linha((self.cosseno * self.raio) * -1) t.right(90) t.forward(self.seno * self.raio)", "elif self.quadrante == 4: t.right(self.grau - 90) self.linha(self.seno * self.raio", "elif self.quadrante == 3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2))", "# DESENHA O CIRCULO self.raio = raio t.right(90) t.penup() t.forward(self.raio)", "turtle as t import math class circTrigo: def __init__(self): self.raio", "print (self.cosseno) elif self.quadrante == 2: t.right(self.grau + 90) self.linha(self.seno", "t.right(90 + self.grau) t.forward(self.tangente * self.raio) print (self.tangente) else: print(\"Erro:", "1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau +", "self.quadrante == 3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) -", "* self.raio) print (self.tangente) elif self.quadrante == 4: t.forward(self.raio) t.pendown()", "* self.raio) print (self.seno) elif self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno", "3 elif 270 < vquad < 360: self.quadrante = 4", "-1) print (self.seno) elif self.quadrante == 4: t.left(180 - self.grau)", "print (self.seno) elif self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno * self.raio", "= 0 self.quadrante = 0 self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\")", "t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO Y t.left(90) t.penup() t.backward(self.raio", "reset(self): # RETORNA PRA POSICAO INICIAL t.penup() t.home() t.pendown() t.speed(0)", "t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90) def linha(self,", "indefinidos self.quadrante = 0 def sen(self): # DESENHA O SENO", "if vquad == 0 or vquad == 90 or vquad", "O ANGULO self.grau = grau % 360 t.left(self.grau) t.forward(self.raio) self.reset()", "t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90) def", "print (self.seno) else: print(\"Erro: angulo invalido\") self.reset() def csen(self): #", "% 2 == 0: pixels = pixels + 1 for", "TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante == 1: t.forward(self.raio) t.pendown()", "== 180 or vquad == 270 or vquad == 360:", "t.right(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante == 3:", "if 0 < vquad < 90: self.quadrante = 1 elif", "t.pencolor(\"green\") if self.quadrante == 1: t.right(self.grau + 90) self.linha(self.seno *", "self.raio) t.right(self.grau + 90) t.forward(self.tangente * self.raio) print (self.tangente) elif", "t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self, grau): # DESENHA O", "DESENHA O CIRCULO self.raio = raio t.right(90) t.penup() t.forward(self.raio) t.left(90)", "== 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(90", "* self.raio) print (self.cosseno) elif self.quadrante == 2: t.right(self.grau +", "0 self.tangente = 0 self.quadrante = 0 self.tema = ''", "E TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente =", "== 4: t.right(self.grau - 90) self.linha(self.seno * self.raio * -1)", "t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def reset(self): # RETORNA PRA POSICAO", "print(\"Erro: angulo invalido\") self.reset() def csen(self): # DESENHA O COSSENO", "t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno * self.raio)", "DESENHA O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante == 1:", "eixos(self): # EIXO X t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100)", "= 2 elif 180 < vquad < 270: self.quadrante =", "50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO Y t.left(90) t.penup()", "0 representa os angulos de resultados indefinidos self.quadrante = 0", "t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno * self.raio)", "== 90 or vquad == 180 or vquad == 270", "vquad == 270 or vquad == 360: # Quadrante 0", "t.left(90) t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def", "or vquad == 90 or vquad == 180 or vquad", "t.pencolor(\"white\") def circulo(self, raio): # DESENHA O CIRCULO self.raio =", "== 1: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno", "self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) else:", "import math class circTrigo: def __init__(self): self.raio = 0 self.grau", "vquad < 270: self.quadrante = 3 elif 270 < vquad", "vquad == 180 or vquad == 270 or vquad ==", "(self.seno) elif self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno * self.raio *", "sen(self): # DESENHA O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante", "+ self.grau) t.forward(self.tangente * self.raio) print (self.tangente) else: print(\"Erro: angulo", "self.quadrante == 4: t.right(self.grau - 90) self.linha(self.seno * self.raio *", "self.raio) print (self.tangente) elif self.quadrante == 2: t.left(180) t.forward(self.raio) t.pendown()", "t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self,", "t.forward(pixels%10) def reset(self): # RETORNA PRA POSICAO INICIAL t.penup() t.home()", "270 or vquad == 360: # Quadrante 0 representa os", "# DESENHA O ANGULO self.grau = grau % 360 t.left(self.grau)", "+ (self.raio**2)) - self.raio) t.right(self.grau - 90) t.forward(self.tangente * self.raio)", "self.raio) t.left(90 - self.grau) t.forward(self.tangente * self.raio) print (self.tangente) elif", "self.cosseno = math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE", "def angulo(self, grau): # DESENHA O ANGULO self.grau = grau", "90) self.linha(self.seno * self.raio * -1) t.right(90) t.forward(self.cosseno * self.raio", "csen(self): # DESENHA O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante", "LINHA PONTILHADA pixels = int(pxls//1) if pixels % 2 ==", "= 0 self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): #", "vquad < 90: self.quadrante = 1 elif 90 < vquad", "t.pencolor(\"red\") if self.quadrante == 1: t.left(180 - self.grau) self.linha(self.cosseno *", "* self.raio * -1) print (self.seno) elif self.quadrante == 4:", "COSSENO E TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente", "self.seno = 0 self.cosseno = 0 self.tangente = 0 self.quadrante", "self.grau if 0 < vquad < 90: self.quadrante = 1", "(self.cosseno) elif self.quadrante == 4: t.right(self.grau - 90) self.linha(self.seno *", "t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 3: t.left(180)", "= math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) # DEFINE", "self.seno = math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau)) self.tangente = math.tan(math.radians(self.grau)) #", "INICIAL t.penup() t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self, raio):", "t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante == 2: t.right(self.grau)", "* self.raio) print (self.cosseno) else: print(\"Erro: angulo invalido\") self.reset() def", "angulo(self, grau): # DESENHA O ANGULO self.grau = grau %", "self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) elif", "1: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno *", "t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau - 90)", "def reset(self): # RETORNA PRA POSICAO INICIAL t.penup() t.home() t.pendown()", "< vquad < 90: self.quadrante = 1 elif 90 <", "pixels = pixels + 1 for x in range(0, pixels//10):", "ANGULO self.grau = grau % 360 t.left(self.grau) t.forward(self.raio) self.reset() #", "# DEFINE O QUADRANTE DO ANGULO vquad = self.grau if", "- self.raio) t.right(self.grau - 90) t.forward(self.tangente * self.raio) print (self.tangente)", "else: print(\"Erro: angulo invalido\") self.reset() def csen(self): # DESENHA O", "t.pendown() t.circle(self.raio) self.reset() def eixos(self): # EIXO X t.penup() t.backward(self.raio", "t.forward(self.cosseno * self.raio * -1) print (self.cosseno) elif self.quadrante ==", "t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau - 90) t.forward(self.tangente", "self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.left(90 - self.grau) t.forward(self.tangente *", "angulo invalido\") self.reset() def csen(self): # DESENHA O COSSENO t.left(self.grau)", "< 90: self.quadrante = 1 elif 90 < vquad <", "ANGULO vquad = self.grau if 0 < vquad < 90:", "* self.raio) print (self.seno) else: print(\"Erro: angulo invalido\") self.reset() def", "90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno)", "t.right(120) t.forward(5) t.right(90) def linha(self, pxls): # DESENHA UMA LINHA", "self.raio * -1) t.left(90) t.forward(self.seno * self.raio * -1) print", "import turtle as t import math class circTrigo: def __init__(self):", "== 360: # Quadrante 0 representa os angulos de resultados", "- 90) self.linha(self.seno * self.raio * -1) t.right(90) t.forward(self.cosseno *", "PONTILHADA pixels = int(pxls//1) if pixels % 2 == 0:", "t.left(90 - self.grau) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante", "t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 4: t.forward(self.raio)", "= math.tan(math.radians(self.grau)) # DEFINE O QUADRANTE DO ANGULO vquad =", "= 0 self.seno = 0 self.cosseno = 0 self.tangente =", "= 4 if vquad == 0 or vquad == 90", "= self.grau if 0 < vquad < 90: self.quadrante =", "self.quadrante = 0 self.tema = '' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self):", "t.right(self.grau + 90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante", "self.grau) t.forward(self.tangente * self.raio) print (self.tangente) else: print(\"Erro: angulo invalido\")", "270 < vquad < 360: self.quadrante = 4 if vquad", "== 0 or vquad == 90 or vquad == 180", "== 2: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno", "t.pensize(2) t.pencolor(\"white\") def circulo(self, raio): # DESENHA O CIRCULO self.raio", "3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau", "self.raio) print (self.seno) elif self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno *", "0 self.cosseno = 0 self.tangente = 0 self.quadrante = 0", "self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self, grau): # DESENHA O ANGULO", "t.right(self.grau - 90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante", "math class circTrigo: def __init__(self): self.raio = 0 self.grau =", "t.right(self.grau) self.linha(self.cosseno * self.raio * -1) t.left(90) t.forward(self.seno * self.raio", "elif 270 < vquad < 360: self.quadrante = 4 if", "or vquad == 360: # Quadrante 0 representa os angulos", "self.quadrante == 3: t.right(self.grau - 90) self.linha(self.seno * self.raio *", "180: self.quadrante = 2 elif 180 < vquad < 270:", "raio): # DESENHA O CIRCULO self.raio = raio t.right(90) t.penup()", "DEFINE O QUADRANTE DO ANGULO vquad = self.grau if 0", "4: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno *", "- 90) self.linha(self.seno * self.raio * -1) t.left(90) t.forward(self.cosseno *", "t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def reset(self): # RETORNA", "t.right(90) t.forward(self.cosseno * self.raio * -1) print (self.cosseno) elif self.quadrante", "t.forward(5) t.right(90) def linha(self, pxls): # DESENHA UMA LINHA PONTILHADA", "print(\"Erro: angulo invalido\") self.reset() def tan(self): # DESENHA A TANGENTE", "< 360: self.quadrante = 4 if vquad == 0 or", "== 3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)", "90) self.linha(self.seno * self.raio * -1) t.left(90) t.forward(self.cosseno * self.raio)", "# EIXO X t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta()", "self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno) elif", "elif self.quadrante == 3: t.right(self.grau - 90) self.linha(self.seno * self.raio", "self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau - 90) t.forward(self.tangente *", "= 0 def sen(self): # DESENHA O SENO t.left(self.grau) t.forward(self.raio)", "print (self.tangente) elif self.quadrante == 3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2)", "t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(self.grau -", "self.raio) print (self.seno) elif self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno *", "COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante == 1: t.right(self.grau +", "2 elif 180 < vquad < 270: self.quadrante = 3", "+ 1 for x in range(0, pixels//10): t.pendown() t.forward(5) t.penup()", "t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante == 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2)", "2: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno *", "t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 2: t.left(180)", "t.forward(self.cosseno * self.raio) print (self.cosseno) else: print(\"Erro: angulo invalido\") self.reset()", "1: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno *", "self.quadrante == 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)", "90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 2:", "2 == 0: pixels = pixels + 1 for x", "print (self.cosseno) elif self.quadrante == 3: t.right(self.grau - 90) self.linha(self.seno", "O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante == 1: t.right(self.grau", "t.pencolor(\"blue\") if self.quadrante == 1: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2))", "+ 90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante ==", "t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset() def eixos(self): #", "t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self, raio): # DESENHA", "< 270: self.quadrante = 3 elif 270 < vquad <", "as t import math class circTrigo: def __init__(self): self.raio =", "self.raio) print (self.cosseno) elif self.quadrante == 3: t.right(self.grau - 90)", "t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO Y", "elif self.quadrante == 3: t.right(self.grau) self.linha(self.cosseno * self.raio * -1)", "t.right(self.grau - 90) self.linha(self.seno * self.raio * -1) t.right(90) t.forward(self.cosseno", "<reponame>ZezaoDev/Circtrigo import turtle as t import math class circTrigo: def", "0: pixels = pixels + 1 for x in range(0,", "DESENHA O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante == 1:", "seta(self): # DESENHA UMA SETA t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120)", "self.raio = raio t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset()", "-1) t.right(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante ==", "- self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno * self.raio) print", "self.quadrante == 1: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90)", "self.reset() def eixos(self): # EIXO X t.penup() t.backward(self.raio + 50)", "360: self.quadrante = 4 if vquad == 0 or vquad", "t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.left(90 -", "360: # Quadrante 0 representa os angulos de resultados indefinidos", "self.grau = 0 self.seno = 0 self.cosseno = 0 self.tangente", "def seta(self): # DESENHA UMA SETA t.left(90) t.forward(5) t.right(120) t.forward(10)", "+ 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self, grau): #", "t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() def angulo(self, grau):", "(self.raio**2)) - self.raio) t.left(90 - self.grau) t.forward(self.tangente * self.raio) print", "t.left(90) t.pendown() t.circle(self.raio) self.reset() def eixos(self): # EIXO X t.penup()", "CIRCULO self.raio = raio t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio)", "invalido\") self.reset() def tan(self): # DESENHA A TANGENTE t.left(self.grau) t.penup()", "de resultados indefinidos self.quadrante = 0 def sen(self): # DESENHA", "t.forward(self.raio) t.pencolor(\"red\") if self.quadrante == 1: t.left(180 - self.grau) self.linha(self.cosseno", "t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(90 + self.grau) t.forward(self.tangente", "# DESENHA UMA LINHA PONTILHADA pixels = int(pxls//1) if pixels", "self.reset() # DEFINE O VALOR DO SENO, COSSENO E TANGENTE.", "- 90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante ==", "vquad == 360: # Quadrante 0 representa os angulos de", "1 elif 90 < vquad < 180: self.quadrante = 2", "(self.tangente) elif self.quadrante == 3: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) +", "t.pencolor(\"white\") def seta(self): # DESENHA UMA SETA t.left(90) t.forward(5) t.right(120)", "vquad == 90 or vquad == 180 or vquad ==", "(self.tangente) elif self.quadrante == 2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) +", "linha(self, pxls): # DESENHA UMA LINHA PONTILHADA pixels = int(pxls//1)", "t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante == 3: t.right(self.grau", "DEFINE O VALOR DO SENO, COSSENO E TANGENTE. self.seno =", "self.raio) print (self.cosseno) elif self.quadrante == 2: t.right(self.grau + 90)", "pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def reset(self): #", "self.grau) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 3:", "self.raio) t.right(90 + self.grau) t.forward(self.tangente * self.raio) print (self.tangente) else:", "# Quadrante 0 representa os angulos de resultados indefinidos self.quadrante", "-1) print (self.cosseno) elif self.quadrante == 4: t.right(self.grau - 90)", "# DESENHA O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\") if self.quadrante ==", "t.right(90) def linha(self, pxls): # DESENHA UMA LINHA PONTILHADA pixels", "t.right(self.grau - 90) self.linha(self.seno * self.raio * -1) t.left(90) t.forward(self.cosseno", "1 for x in range(0, pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5)", "SENO, COSSENO E TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno = math.cos(math.radians(self.grau))", "self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante ==", "== 270 or vquad == 360: # Quadrante 0 representa", "print (self.seno) elif self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno * self.raio)", "(self.raio**2)) - self.raio) t.right(self.grau - 90) t.forward(self.tangente * self.raio) print", "# DESENHA O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante ==", "for x in range(0, pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown()", "t.left(90) t.forward(self.seno * self.raio) print (self.seno) else: print(\"Erro: angulo invalido\")", "t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90) def linha(self, pxls): #", "'' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): # DESENHA UMA SETA t.left(90)", "self.raio) print (self.cosseno) else: print(\"Erro: angulo invalido\") self.reset() def tan(self):", "grau): # DESENHA O ANGULO self.grau = grau % 360", "- self.grau) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante ==", "def linha(self, pxls): # DESENHA UMA LINHA PONTILHADA pixels =", "self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno)", "pixels % 2 == 0: pixels = pixels + 1", "t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.right(90 + self.grau)", "circTrigo: def __init__(self): self.raio = 0 self.grau = 0 self.seno", "EIXO Y t.left(90) t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta()", "self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) else: print(\"Erro: angulo", "Quadrante 0 representa os angulos de resultados indefinidos self.quadrante =", "def csen(self): # DESENHA O COSSENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if", "pxls): # DESENHA UMA LINHA PONTILHADA pixels = int(pxls//1) if", "self.quadrante == 2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) -", "t.forward(self.raio) self.reset() # DEFINE O VALOR DO SENO, COSSENO E", "print (self.tangente) elif self.quadrante == 2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2)", "< vquad < 180: self.quadrante = 2 elif 180 <", "circulo(self, raio): # DESENHA O CIRCULO self.raio = raio t.right(90)", "= 0 self.cosseno = 0 self.tangente = 0 self.quadrante =", "+ 90) self.linha(self.seno * self.raio) t.right(90) t.forward(self.cosseno * self.raio) print", "UMA LINHA PONTILHADA pixels = int(pxls//1) if pixels % 2", "* self.raio * -1) t.left(90) t.forward(self.cosseno * self.raio) print (self.cosseno)", "90) t.forward(self.tangente * self.raio) print (self.tangente) elif self.quadrante == 4:", "90 or vquad == 180 or vquad == 270 or", "-1) t.right(90) t.forward(self.cosseno * self.raio * -1) print (self.cosseno) elif", "2: t.left(180) t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.left(90", "t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self, raio): # DESENHA O CIRCULO", "range(0, pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10) def reset(self):", "* -1) print (self.seno) elif self.quadrante == 4: t.left(180 -", "DESENHA A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante == 1:", "t.left(90) t.forward(self.cosseno * self.raio) print (self.cosseno) else: print(\"Erro: angulo invalido\")", "else: print(\"Erro: angulo invalido\") self.reset() def tan(self): # DESENHA A", "class circTrigo: def __init__(self): self.raio = 0 self.grau = 0", "180 or vquad == 270 or vquad == 360: #", "self.grau = grau % 360 t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE", "= 0 self.grau = 0 self.seno = 0 self.cosseno =", "t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.left(90 - self.grau)", "* self.raio) * -1) t.right(90) t.forward(self.seno * self.raio) print (self.seno)", "self.raio * -1) t.right(90) t.forward(self.cosseno * self.raio * -1) print", "* self.raio) print (self.cosseno) elif self.quadrante == 3: t.right(self.grau -", "0 < vquad < 90: self.quadrante = 1 elif 90", "elif self.quadrante == 2: t.right(self.grau + 90) self.linha(self.seno * self.raio)", "def circulo(self, raio): # DESENHA O CIRCULO self.raio = raio", "4 if vquad == 0 or vquad == 90 or", "t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): # DESENHA UMA SETA t.left(90) t.forward(5)", "t.forward(self.raio) t.pencolor(\"green\") if self.quadrante == 1: t.right(self.grau + 90) self.linha(self.seno", "# DESENHA A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante ==", "self.raio) t.right(self.grau - 90) t.forward(self.tangente * self.raio) print (self.tangente) elif", "def tan(self): # DESENHA A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if", "* self.raio) t.left(90) t.forward(self.seno * self.raio) print (self.seno) else: print(\"Erro:", "t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO", "RETORNA PRA POSICAO INICIAL t.penup() t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\")", "DO SENO, COSSENO E TANGENTE. self.seno = math.sin(math.radians(self.grau)) self.cosseno =", "== 4: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90) t.forward(self.seno", "3: t.right(self.grau) self.linha(self.cosseno * self.raio * -1) t.left(90) t.forward(self.seno *", "== 3: t.right(self.grau - 90) self.linha(self.seno * self.raio * -1)", "if self.quadrante == 1: t.right(self.grau + 90) self.linha(self.seno * self.raio)", "self.raio) print (self.seno) else: print(\"Erro: angulo invalido\") self.reset() def csen(self):", "# RETORNA PRA POSICAO INICIAL t.penup() t.home() t.pendown() t.speed(0) t.pensize(2)", "O QUADRANTE DO ANGULO vquad = self.grau if 0 <", "self.raio) print (self.tangente) elif self.quadrante == 3: t.left(180) t.forward(self.raio) t.pendown()", "elif self.quadrante == 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) -", "self.raio) print (self.tangente) elif self.quadrante == 4: t.forward(self.raio) t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2)", "= 1 elif 90 < vquad < 180: self.quadrante =", "t.pendown() self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio) t.left(90 - self.grau) t.forward(self.tangente", "SETA t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90)", "POSICAO INICIAL t.penup() t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def circulo(self,", "self.quadrante == 4: t.left(180 - self.grau) self.linha(self.cosseno * self.raio) t.left(90)", "0 def sen(self): # DESENHA O SENO t.left(self.grau) t.forward(self.raio) t.pencolor(\"red\")", "t import math class circTrigo: def __init__(self): self.raio = 0", "t.right(90) t.forward(self.cosseno * self.raio) print (self.cosseno) elif self.quadrante == 3:", "# DEFINE O VALOR DO SENO, COSSENO E TANGENTE. self.seno", "% 360 t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE O VALOR DO", "UMA SETA t.left(90) t.forward(5) t.right(120) t.forward(10) t.right(120) t.forward(10) t.right(120) t.forward(5)", "-1) t.left(90) t.forward(self.seno * self.raio * -1) print (self.seno) elif", "t.left(self.grau) t.forward(self.raio) t.pencolor(\"green\") if self.quadrante == 1: t.right(self.grau + 90)", "A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\") if self.quadrante == 1: t.forward(self.raio)", "or vquad == 180 or vquad == 270 or vquad", "t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset() def eixos(self): # EIXO", "self.cosseno = 0 self.tangente = 0 self.quadrante = 0 self.tema", "t.forward(self.seno * self.raio) print (self.seno) else: print(\"Erro: angulo invalido\") self.reset()", "DESENHA O ANGULO self.grau = grau % 360 t.left(self.grau) t.forward(self.raio)", "pixels = int(pxls//1) if pixels % 2 == 0: pixels", "Y t.left(90) t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset()", "self.quadrante == 2: t.right(self.grau + 90) self.linha(self.seno * self.raio) t.right(90)", "self.quadrante = 1 elif 90 < vquad < 180: self.quadrante", "t.forward(10) t.right(120) t.forward(5) t.right(90) def linha(self, pxls): # DESENHA UMA", "= '' t.bgcolor(\"black\") t.pencolor(\"white\") def seta(self): # DESENHA UMA SETA", "t.forward(self.tangente * self.raio) print (self.tangente) else: print(\"Erro: angulo invalido\") self.reset()", "pixels + 1 for x in range(0, pixels//10): t.pendown() t.forward(5)", "(self.raio**2)) - self.raio) t.right(self.grau + 90) t.forward(self.tangente * self.raio) print", "= raio t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset() def", "* -1) t.left(90) t.forward(self.cosseno * self.raio) print (self.cosseno) else: print(\"Erro:", "self.reset() def tan(self): # DESENHA A TANGENTE t.left(self.grau) t.penup() t.pencolor(\"blue\")", "t.right(120) t.forward(10) t.right(120) t.forward(5) t.right(90) def linha(self, pxls): # DESENHA", "t.left(90) t.forward(self.seno * self.raio * -1) print (self.seno) elif self.quadrante", "self.linha((self.raio*2)+100) self.seta() self.reset() # EIXO Y t.left(90) t.penup() t.backward(self.raio +", "self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno * self.raio) * -1) t.right(90)", "self.quadrante = 0 def sen(self): # DESENHA O SENO t.left(self.grau)", "O CIRCULO self.raio = raio t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown()", "4: t.right(self.grau - 90) self.linha(self.seno * self.raio * -1) t.left(90)", "x in range(0, pixels//10): t.pendown() t.forward(5) t.penup() t.forward(5) t.pendown() t.forward(pixels%10)", "* -1) t.left(90) t.forward(self.seno * self.raio * -1) print (self.seno)", "* self.raio) print (self.tangente) elif self.quadrante == 2: t.left(180) t.forward(self.raio)", "t.forward(self.seno * self.raio) print (self.seno) elif self.quadrante == 3: t.right(self.grau)", "PRA POSICAO INICIAL t.penup() t.home() t.pendown() t.speed(0) t.pensize(2) t.pencolor(\"white\") def", "print (self.cosseno) elif self.quadrante == 4: t.right(self.grau - 90) self.linha(self.seno", "# EIXO Y t.left(90) t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100)", "(self.raio**2)) - self.raio) t.right(90 + self.grau) t.forward(self.tangente * self.raio) print", "+ (self.raio**2)) - self.raio) t.left(90 - self.grau) t.forward(self.tangente * self.raio)", "== 2: t.right(self.grau) self.linha((self.cosseno * self.raio) * -1) t.right(90) t.forward(self.seno", "X t.penup() t.backward(self.raio + 50) t.pendown() self.linha((self.raio*2)+100) self.seta() self.reset() #", "t.left(self.grau) t.forward(self.raio) self.reset() # DEFINE O VALOR DO SENO, COSSENO", "0 or vquad == 90 or vquad == 180 or", "* self.raio) print (self.seno) elif self.quadrante == 2: t.right(self.grau) self.linha((self.cosseno", "raio t.right(90) t.penup() t.forward(self.raio) t.left(90) t.pendown() t.circle(self.raio) self.reset() def eixos(self):" ]
[ "<gh_stars>1-10 def pick_food(name): if name == \"chima\": return \"chicken\" else:", "if name == \"chima\": return \"chicken\" else: return \"dry food\"", "def pick_food(name): if name == \"chima\": return \"chicken\" else: return", "pick_food(name): if name == \"chima\": return \"chicken\" else: return \"dry" ]
[ "attentions[..., :-1, :-1] # remove cls token attentions if tokens[:,", "= x.transpose(0, 1) if not padding_mask.any(): padding_mask = None for", "None, None, :, :] result[\"attentions\"] = attentions if return_contacts: contacts", "contacts return result def _predict_contacts_from_token_attentions(self, tokens, attentions): # remove eos", "attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) return self.contact_head(attentions)", "under the MIT license found in the # LICENSE file", "ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1 self.embed_positions", "repr_layers: hidden_representations[layer_idx + 1] = x.transpose(0, 1) if need_head_weights: #", "_ in range(self.args.layers) ] ) self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads)", "an additional null-token for attention, which we remove attentions =", "= math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size,", "x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None,", "LICENSE file in the root directory of this source tree.", "this source tree. import math import torch import torch.nn as", "None, :, :] attentions = attentions[..., :-1, :-1] # remove", "= 0.15 * 0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed = (tokens", "x C mask_ratio_train = 0.15 * 0.8 src_lengths = (~padding_mask).sum(-1)", "_init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx)", "if tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) *", "cls token attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[..., 1:,", "attn_weights.append(attn.transpose(1, 0)) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_after(x) x", "T x T attentions = torch.stack(attn_weights, 1) if self.model_version ==", "x * (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations =", "type=int, metavar=\"N\", help=\"embedding dimension for FFN\", ) parser.add_argument( \"--attention_heads\", default=20,", "def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim,", "self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for", "x: B x T x C mask_ratio_train = 0.15 *", "padding_mask.any(): padding_mask = None for layer_idx, layer in enumerate(self.layers): x,", "# x: B x T x C mask_ratio_train = 0.15", "C mask_ratio_train = 0.15 * 0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed", "(~padding_mask).sum(-1) mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths x =", "the MIT license found in the # LICENSE file in", "= SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias", "of this source tree. import math import torch import torch.nn", "if need_head_weights: # attentions: B x L x H x", "not None: attention_mask = (1 - padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1)", ":] result[\"attentions\"] = attentions if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions)", "/ src_lengths x = x * (1 - mask_ratio_train) /", "self.args.arch == 'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b() else: self.model_version =", "= nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias = None if self.args.final_bias:", "license found in the # LICENSE file in the root", "parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to logits\" )", "= attentions.view(batch_size, layers * heads, seqlen, seqlen) return self.contact_head(attentions) def", "x = F.linear(x, self.embed_out, bias=self.embed_out_bias) x = x.transpose(0, 1) #", "need_head_weights: # (H, B, T, T) => (B, H, T,", "help=\"number of layers\" ) parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding", "TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'),", "attentions: B x L x H x T x T", "self.model_version == 'ESM-1b': x = self.emb_layer_norm_after(x) x = x.transpose(0, 1)", "1] = x.transpose(0, 1) if need_head_weights: # (H, B, T,", "attention heads\", ) def __init__(self, args, alphabet): super().__init__() self.args =", "1) # (T, B, E) => (B, T, E) #", "x T x C mask_ratio_train = 0.15 * 0.8 src_lengths", "tree. import math import torch import torch.nn as nn import", "nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers = nn.ModuleList( [ TransformerLayer(", "logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding dimension for", "apply bias to logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\",", "False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) # x: B x T", "(T, B, E) x = x.transpose(0, 1) if not padding_mask.any():", "def add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of", "repr_layers = set(repr_layers) hidden_representations = {} if 0 in repr_layers:", "layers * heads, seqlen, seqlen) return self.contact_head(attentions) def predict_contacts(self, tokens):", "= attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions = attentions * attention_mask[:, None,", "output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions", "additional null-token for attention, which we remove attentions = attentions[...,", "- padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions = attentions", "self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight )", "0].eq(self.cls_idx).all(): attentions = attentions[..., 1:, 1:] batch_size, layers, heads, seqlen,", "metavar=\"N\", help=\"number of layers\" ) parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\",", "type=int, metavar=\"N\", help=\"number of layers\" ) parser.add_argument( \"--embed_dim\", default=1280, type=int,", ".modules import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, )", "remove attentions = attentions[..., :-1] if padding_mask is not None:", "# remove cls token attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions =", "if need_head_weights: # (H, B, T, T) => (B, H,", "remove cls token attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[...,", "B, E) => (B, T, E) # last hidden representation", "bias=self.embed_out_bias) x = x.transpose(0, 1) # (T, B, E) =>", "mask_ratio_train = 0.15 * 0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed =", "import torch import torch.nn as nn import torch.nn.functional as F", "import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class", "self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for _", "ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\",", "(T, B, E) => (B, T, E) # last hidden", "if not padding_mask.any(): padding_mask = None for layer_idx, layer in", "seqlen, seqlen) return self.contact_head(attentions) def predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"]", "x T attentions = torch.stack(attn_weights, 1) if self.model_version == \"ESM-1\":", "set(repr_layers) hidden_representations = {} if 0 in repr_layers: hidden_representations[0] =", "root directory of this source tree. import math import torch", "args self.alphabet_size = len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx = alphabet.mask_idx", "if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[], need_head_weights=False,", "_predict_contacts_from_token_attentions(self, tokens, attentions): # remove eos token attentions if tokens[:,", "=> (B, H, T, T) attn_weights.append(attn.transpose(1, 0)) if self.model_version ==", "alphabet.cls_idx self.eos_idx = alphabet.eos_idx if self.args.arch == 'roberta_large': self.model_version =", "'ESM-1b': x = self.emb_layer_norm_after(x) x = x.transpose(0, 1) # (T,", "self.args.embed_dim)) ) self.embed_out_bias = None if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))", "self.embed_out, bias=self.embed_out_bias) x = x.transpose(0, 1) # (T, B, E)", "x = self.lm_head(x) else: x = F.linear(x, self.embed_out, bias=self.embed_out_bias) x", "FFN\", ) parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number of attention", ") self.embed_out_bias = None if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def", "= x + self.embed_positions(tokens) if self.model_version == 'ESM-1b': x =", "hidden_representations = {} if 0 in repr_layers: hidden_representations[0] = x", "x.transpose(0, 1) # (T, B, E) => (B, T, E)", "if self.model_version == 'ESM-1b': x = self.emb_layer_norm_before(x) if padding_mask is", "None: attention_mask = (1 - padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) *", "'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx", "return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts return result", "True assert tokens.ndim == 2 padding_mask = tokens.eq(self.padding_idx) # B,", "x + self.embed_positions(tokens) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_before(x)", "padding_mask = None for layer_idx, layer in enumerate(self.layers): x, attn", "affiliates. # # This source code is licensed under the", "as F from .modules import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead,", "attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1)", "self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after =", "LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head", "to logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding dimension", "parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding dimension for FFN\", )", "super().__init__() self.args = args self.alphabet_size = len(alphabet) self.padding_idx = alphabet.padding_idx", "hidden_representations[layer_idx + 1] = x.transpose(0, 1) if need_head_weights: # (H,", "representation should have layer norm applied if (layer_idx + 1)", "parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\" ) parser.add_argument(", "# last hidden representation should have layer norm applied if", "attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[..., 1:, 1:] batch_size,", "help=\"whether to apply bias to logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120,", "return_contacts=False): if return_contacts: need_head_weights = True assert tokens.ndim == 2", "x, \"representations\": hidden_representations} if need_head_weights: # attentions: B x L", "ESM-1 models have an additional null-token for attention, which we", "tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights = True assert", "(B, H, T, T) attn_weights.append(attn.transpose(1, 0)) if self.model_version == 'ESM-1b':", "0.15 * 0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed = (tokens ==", "F from .modules import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm,", "padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations = {} if 0 in", "'ESM-1b'), ) for _ in range(self.args.layers) ] ) self.contact_head =", "= self.lm_head(x) else: x = F.linear(x, self.embed_out, bias=self.embed_out_bias) x =", "attention_mask = (1 - padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)", "if tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[..., 1:, 1:] batch_size, layers,", "1:, 1:] batch_size, layers, heads, seqlen, _ = attentions.size() attentions", "torch.nn as nn import torch.nn.functional as F from .modules import", "self.padding_idx = alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx", "and its affiliates. # # This source code is licensed", "layers\" ) parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\" )", "alphabet): super().__init__() self.args = args self.alphabet_size = len(alphabet) self.padding_idx =", "from .modules import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead,", "\"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding dimension for FFN\", ) parser.add_argument(", "we remove attentions = attentions[..., :-1] if padding_mask is not", "# (T, B, E) => (B, T, E) # last", "attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _ = attentions.size()", "T attentions = torch.stack(attn_weights, 1) if self.model_version == \"ESM-1\": #", "Copyright (c) Facebook, Inc. and its affiliates. # # This", ") parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number of attention heads\",", "+ 1] = x.transpose(0, 1) if need_head_weights: # (H, B,", "'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b() else: self.model_version = 'ESM-1' self._init_submodules_esm1()", "self.embed_out_bias = None if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self,", "None for layer_idx, layer in enumerate(self.layers): x, attn = layer(x,", "should have layer norm applied if (layer_idx + 1) in", "{\"logits\": x, \"representations\": hidden_representations} if need_head_weights: # attentions: B x", "== 'ESM-1b': x = self.emb_layer_norm_after(x) x = x.transpose(0, 1) #", "H x T x T attentions = torch.stack(attn_weights, 1) if", ") parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument(", "self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head =", "help=\"number of attention heads\", ) def __init__(self, args, alphabet): super().__init__()", "x H x T x T attentions = torch.stack(attn_weights, 1)", "@classmethod def add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number", "# LICENSE file in the root directory of this source", "B, T x = self.embed_scale * self.embed_tokens(tokens) if getattr(self.args, 'token_dropout',", "to apply bias to logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int,", "def predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"] @property def num_layers(self): return", "\"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number of attention heads\", ) def", "def _predict_contacts_from_token_attentions(self, tokens, attentions): # remove eos token attentions if", "found in the # LICENSE file in the root directory", "alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx = alphabet.eos_idx", "= x * (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations", "nn import torch.nn.functional as F from .modules import ( TransformerLayer,", "attentions = attentions[..., :-1] if padding_mask is not None: attention_mask", "getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) # x: B", "norm applied if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx +", "forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights = True", "x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx + 1)", "for _ in range(self.args.layers) ] ) self.contact_head = ContactPredictionHead(self.args.layers *", "= (tokens == self.mask_idx).sum(-1).float() / src_lengths x = x *", "self.contact_head(attentions) def predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"] @property def num_layers(self):", "(1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations = {} if", "E) # last hidden representation should have layer norm applied", "def _init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers", "null-token for attention, which we remove attentions = attentions[..., :-1]", "* eos_mask[:, None, None, :, :] attentions = attentions[..., :-1,", ":-1] # remove cls token attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions", "=> (B, T, E) result = {\"logits\": x, \"representations\": hidden_representations}", "padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions = attentions *", ") for _ in range(self.args.layers) ] ) self.contact_head = ContactPredictionHead(self.args.layers", "self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx )", "= eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None,", "1] = x x = self.lm_head(x) else: x = F.linear(x,", "None, None] x = x + self.embed_positions(tokens) if self.model_version ==", "attentions) result[\"contacts\"] = contacts return result def _predict_contacts_from_token_attentions(self, tokens, attentions):", "self.layers = nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version !=", "T) attn_weights.append(attn.transpose(1, 0)) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_after(x)", "use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for _ in range(self.args.layers) ] )", "attentions.size() attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) return", "- mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] x =", "x = x + self.embed_positions(tokens) if self.model_version == 'ESM-1b': x", "= True assert tokens.ndim == 2 padding_mask = tokens.eq(self.padding_idx) #", "+ 1] = x x = self.lm_head(x) else: x =", "== 2 padding_mask = tokens.eq(self.padding_idx) # B, T x =", "heads\", ) def __init__(self, args, alphabet): super().__init__() self.args = args", "of layers\" ) parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\"", "self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers = nn.ModuleList( [ TransformerLayer( self.args.embed_dim,", "B, E) => (B, T, E) result = {\"logits\": x,", "eos token attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask", "padding_mask is not None: x = x * (1 -", "= None for layer_idx, layer in enumerate(self.layers): x, attn =", "(c) Facebook, Inc. and its affiliates. # # This source", "seqlen) return self.contact_head(attentions) def predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"] @property", "= x.transpose(0, 1) # (T, B, E) => (B, T,", "== self.mask_idx).unsqueeze(-1), 0.0) # x: B x T x C", "in repr_layers: hidden_representations[layer_idx + 1] = x.transpose(0, 1) if need_head_weights:", "# (H, B, T, T) => (B, H, T, T)", "attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions = attentions * attention_mask[:, None, None,", "not padding_mask.any(): padding_mask = None for layer_idx, layer in enumerate(self.layers):", "repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights = True assert tokens.ndim", "self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out =", "nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights", "=> (B, T, E) # last hidden representation should have", "'ESM-1b': x = self.emb_layer_norm_before(x) if padding_mask is not None: x", "file in the root directory of this source tree. import", "'ESM-1b' self._init_submodules_esm1b() else: self.model_version = 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens", "= torch.stack(attn_weights, 1) if self.model_version == \"ESM-1\": # ESM-1 models", "self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias = None if", "parser): parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\" )", "result def _predict_contacts_from_token_attentions(self, tokens, attentions): # remove eos token attentions", "models have an additional null-token for attention, which we remove", "hidden_representations[layer_idx + 1] = x x = self.lm_head(x) else: x", "This source code is licensed under the MIT license found", "attn_weights = [] # (B, T, E) => (T, B,", "* self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1 self.embed_positions =", "attentions * eos_mask[:, None, None, :, :] attentions = attentions[...,", "alphabet.eos_idx if self.args.arch == 'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b() else:", "ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser): parser.add_argument( \"--num_layers\",", "- mask_ratio_observed)[:, None, None] x = x + self.embed_positions(tokens) if", "self.embed_scale = 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before =", "tokens.eq(self.padding_idx) # B, T x = self.embed_scale * self.embed_tokens(tokens) if", "ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self):", "= 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim,", "self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale =", "torch.nn.functional as F from .modules import ( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding,", ") self.layers = nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version", "= x if need_head_weights: attn_weights = [] # (B, T,", "eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions =", "* attention_mask.unsqueeze(2) attentions = attentions * attention_mask[:, None, None, :,", "self._init_submodules_common() self.embed_scale = 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before", "licensed under the MIT license found in the # LICENSE", "self.embed_positions(tokens) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_before(x) if padding_mask", "self.model_version == \"ESM-1\": # ESM-1 models have an additional null-token", "mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] x = x", ") self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale", "self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if", "is not None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))", "layer norm applied if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx", "MIT license found in the # LICENSE file in the", "None] x = x + self.embed_positions(tokens) if self.model_version == 'ESM-1b':", "need_head_weights: # attentions: B x L x H x T", "self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts return result def _predict_contacts_from_token_attentions(self, tokens,", "= nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts:", "import torch.nn.functional as F from .modules import ( TransformerLayer, LearnedPositionalEmbedding,", "not None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers", "\"representations\": hidden_representations} if need_head_weights: # attentions: B x L x", "E) => (B, T, E) result = {\"logits\": x, \"representations\":", "self._init_submodules_esm1b() else: self.model_version = 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens =", "None, :, :] result[\"attentions\"] = attentions if return_contacts: contacts =", "add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for _ in", ") def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim,", "= args self.alphabet_size = len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx =", "attentions = attentions[..., :-1, :-1] # remove cls token attentions", "self.mask_idx).sum(-1).float() / src_lengths x = x * (1 - mask_ratio_train)", "parser.add_argument( \"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\",", "attentions = torch.stack(attn_weights, 1) if self.model_version == \"ESM-1\": # ESM-1", "weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions =", "TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod", "1) if need_head_weights: # (H, B, T, T) => (B,", "= self.emb_layer_norm_after(x) x = x.transpose(0, 1) # (T, B, E)", "of attention heads\", ) def __init__(self, args, alphabet): super().__init__() self.args", "* eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None, None, :,", "\"--embed_dim\", default=1280, type=int, metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\",", "= x.transpose(0, 1) if need_head_weights: # (H, B, T, T)", "torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias = None if self.args.final_bias: self.embed_out_bias =", "tokens.ndim == 2 padding_mask = tokens.eq(self.padding_idx) # B, T x", "if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] =", "predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"] @property def num_layers(self): return self.args.layers", "math import torch import torch.nn as nn import torch.nn.functional as", "self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim,", "need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights = True assert tokens.ndim ==", ") class ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36,", "= alphabet.cls_idx self.eos_idx = alphabet.eos_idx if self.args.arch == 'roberta_large': self.model_version", "(layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x", "in the # LICENSE file in the root directory of", "= alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx =", "dimension for FFN\", ) parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number", "x L x H x T x T attentions =", "= alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx = alphabet.eos_idx if self.args.arch", "x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers)", ") def __init__(self, args, alphabet): super().__init__() self.args = args self.alphabet_size", "0.0) # x: B x T x C mask_ratio_train =", "_ = attentions.size() attentions = attentions.view(batch_size, layers * heads, seqlen,", "in range(self.args.layers) ] ) self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads) def", "self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False):", "default=5120, type=int, metavar=\"N\", help=\"embedding dimension for FFN\", ) parser.add_argument( \"--attention_heads\",", "= set(repr_layers) hidden_representations = {} if 0 in repr_layers: hidden_representations[0]", "attentions = attentions * eos_mask[:, None, None, :, :] attentions", "= {} if 0 in repr_layers: hidden_representations[0] = x if", ") parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding dimension for FFN\",", "L x H x T x T attentions = torch.stack(attn_weights,", "have an additional null-token for attention, which we remove attentions", "self.args = args self.alphabet_size = len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx", "+ 1) in repr_layers: hidden_representations[layer_idx + 1] = x.transpose(0, 1)", "self.model_version = 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size,", "= (1 - padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions", "LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod def", "= attentions[..., :-1] if padding_mask is not None: attention_mask =", "in the root directory of this source tree. import math", "if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts return", "return result def _predict_contacts_from_token_attentions(self, tokens, attentions): # remove eos token", "contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts return result def", "T x C mask_ratio_train = 0.15 * 0.8 src_lengths =", "T) => (B, H, T, T) attn_weights.append(attn.transpose(1, 0)) if self.model_version", "math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim))", "(B, T, E) result = {\"logits\": x, \"representations\": hidden_representations} if", "self.args.embed_dim, padding_idx=self.padding_idx ) self.layers = nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim,", "] ) self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common()", "default=36, type=int, metavar=\"N\", help=\"number of layers\" ) parser.add_argument( \"--embed_dim\", default=1280,", "attentions): # remove eos token attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask", "self.mask_idx = alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx = alphabet.eos_idx if", "x = self.emb_layer_norm_after(x) x = x.transpose(0, 1) # (T, B,", "ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser): parser.add_argument(", "x x = self.lm_head(x) else: x = F.linear(x, self.embed_out, bias=self.embed_out_bias)", ":] attentions = attentions[..., :-1, :-1] # remove cls token", "self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common()", "self.mask_idx).unsqueeze(-1), 0.0) # x: B x T x C mask_ratio_train", "attentions * attention_mask[:, None, None, :, :] result[\"attentions\"] = attentions", "result[\"contacts\"] = contacts return result def _predict_contacts_from_token_attentions(self, tokens, attentions): #", "attentions = attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _", "= attentions * attention_mask[:, None, None, :, :] result[\"attentions\"] =", "args, alphabet): super().__init__() self.args = args self.alphabet_size = len(alphabet) self.padding_idx", "as nn import torch.nn.functional as F from .modules import (", "= LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)", "B x T x C mask_ratio_train = 0.15 * 0.8", "attentions = attentions * attention_mask[:, None, None, :, :] result[\"attentions\"]", "1) in repr_layers: hidden_representations[layer_idx + 1] = x.transpose(0, 1) if", "= self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts return result def _predict_contacts_from_token_attentions(self,", "B x L x H x T x T attentions", "== \"ESM-1\": # ESM-1 models have an additional null-token for", "self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) )", "2 padding_mask = tokens.eq(self.padding_idx) # B, T x = self.embed_scale", "self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias = None", "T x = self.embed_scale * self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False):", "padding_mask is not None: attention_mask = (1 - padding_mask.type_as(attentions)) attention_mask", "* self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)", "self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) #", "(H, B, T, T) => (B, H, T, T) attn_weights.append(attn.transpose(1,", "bias to logits\" ) parser.add_argument( \"--ffn_embed_dim\", default=5120, type=int, metavar=\"N\", help=\"embedding", "# (T, B, E) => (B, T, E) result =", "attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions = attentions * attention_mask[:,", "repr_layers: hidden_representations[0] = x if need_head_weights: attn_weights = [] #", "= RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale", "self.emb_layer_norm_after(x) x = x.transpose(0, 1) # (T, B, E) =>", "[] # (B, T, E) => (T, B, E) x", "= x x = self.lm_head(x) else: x = F.linear(x, self.embed_out,", "+ 1) in repr_layers: hidden_representations[layer_idx + 1] = x x", "== self.mask_idx).sum(-1).float() / src_lengths x = x * (1 -", "which we remove attentions = attentions[..., :-1] if padding_mask is", "source tree. import math import torch import torch.nn as nn", "parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number of attention heads\", )", "src_lengths = (~padding_mask).sum(-1) mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths", "in enumerate(self.layers): x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx", "* 0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float()", "(layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x.transpose(0,", "the # LICENSE file in the root directory of this", "= self.embed_scale * self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens ==", "x.transpose(0, 1) if need_head_weights: # (H, B, T, T) =>", "src_lengths x = x * (1 - mask_ratio_train) / (1", "dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to", ":, :] result[\"attentions\"] = attentions if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens,", "(B, T, E) # last hidden representation should have layer", "token attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask =", ":-1] if padding_mask is not None: attention_mask = (1 -", "self.alphabet_size = len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx", "T, E) => (T, B, E) x = x.transpose(0, 1)", "* heads, seqlen, seqlen) return self.contact_head(attentions) def predict_contacts(self, tokens): return", "SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod def add_args(cls,", "token attentions if tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[..., 1:, 1:]", "= layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx + 1) in repr_layers:", "if self.model_version == \"ESM-1\": # ESM-1 models have an additional", "self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions,", "T, T) => (B, H, T, T) attn_weights.append(attn.transpose(1, 0)) if", "/ (1 - mask_ratio_observed)[:, None, None] x = x +", "E) x = x.transpose(0, 1) if not padding_mask.any(): padding_mask =", "= ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size,", "layer_idx, layer in enumerate(self.layers): x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights)", "code is licensed under the MIT license found in the", "heads, seqlen, seqlen) return self.contact_head(attentions) def predict_contacts(self, tokens): return self(tokens,", ":, :] attentions = attentions[..., :-1, :-1] # remove cls", "= attentions.size() attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)", "layers, heads, seqlen, _ = attentions.size() attentions = attentions.view(batch_size, layers", "mask_ratio_observed)[:, None, None] x = x + self.embed_positions(tokens) if self.model_version", "source code is licensed under the MIT license found in", "Facebook, Inc. and its affiliates. # # This source code", "# remove eos token attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask =", "\"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\" ) parser.add_argument( \"--embed_dim\",", "{} if 0 in repr_layers: hidden_representations[0] = x if need_head_weights:", "result[\"attentions\"] = attentions if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"]", "x.transpose(0, 1) if not padding_mask.any(): padding_mask = None for layer_idx,", "add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36, type=int, metavar=\"N\", help=\"number of layers\"", "return_contacts: need_head_weights = True assert tokens.ndim == 2 padding_mask =", "= self.emb_layer_norm_before(x) if padding_mask is not None: x = x", "# # This source code is licensed under the MIT", "= len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx =", "(B, T, E) => (T, B, E) x = x.transpose(0,", "1) if not padding_mask.any(): padding_mask = None for layer_idx, layer", "( TransformerLayer, LearnedPositionalEmbedding, SinusoidalPositionalEmbedding, RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module):", "T, E) # last hidden representation should have layer norm", "help=\"embedding dimension for FFN\", ) parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\",", "assert tokens.ndim == 2 padding_mask = tokens.eq(self.padding_idx) # B, T", "range(self.args.layers) ] ) self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self):", "# (B, T, E) => (T, B, E) x =", "self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx +", "attention_mask.unsqueeze(2) attentions = attentions * attention_mask[:, None, None, :, :]", "# This source code is licensed under the MIT license", "x = x.transpose(0, 1) if not padding_mask.any(): padding_mask = None", "the root directory of this source tree. import math import", "need_head_weights: attn_weights = [] # (B, T, E) => (T,", "self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead(", "= {\"logits\": x, \"representations\": hidden_representations} if need_head_weights: # attentions: B", "= ContactPredictionHead(self.args.layers * self.args.attention_heads) def _init_submodules_esm1b(self): self._init_submodules_common() self.embed_scale = 1", "\"ESM-1\": # ESM-1 models have an additional null-token for attention,", "need_head_weights = True assert tokens.ndim == 2 padding_mask = tokens.eq(self.padding_idx)", "* (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations = {}", "x = self.emb_layer_norm_before(x) if padding_mask is not None: x =", "eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:,", "tokens, attentions): # remove eos token attentions if tokens[:, -1].eq(self.eos_idx).any():", "\"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to logits\" ) parser.add_argument(", "x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) # x: B x T x", "default=1280, type=int, metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether", "metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to apply", "padding_mask = tokens.eq(self.padding_idx) # B, T x = self.embed_scale *", "in repr_layers: hidden_representations[layer_idx + 1] = x x = self.lm_head(x)", "tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)", "tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions *", "(1 - mask_ratio_observed)[:, None, None] x = x + self.embed_positions(tokens)", "layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx", "0 in repr_layers: hidden_representations[0] = x if need_head_weights: attn_weights =", "= x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:,", "(1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] x", "None if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens, repr_layers=[],", "applied if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1]", "self.model_version == 'ESM-1b': x = self.emb_layer_norm_before(x) if padding_mask is not", "help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias", "== 'ESM-1b': x = self.emb_layer_norm_before(x) if padding_mask is not None:", "ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight", "its affiliates. # # This source code is licensed under", "_init_submodules_common(self): self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers =", "repr_layers: hidden_representations[layer_idx + 1] = x x = self.lm_head(x) else:", "hidden_representations[0] = x if need_head_weights: attn_weights = [] # (B,", "hidden representation should have layer norm applied if (layer_idx +", "# attentions: B x L x H x T x", "if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) # x:", "__init__(self, args, alphabet): super().__init__() self.args = args self.alphabet_size = len(alphabet)", "=> (T, B, E) x = x.transpose(0, 1) if not", "return self.contact_head(attentions) def predict_contacts(self, tokens): return self(tokens, return_contacts=True)[\"contacts\"] @property def", "else: x = F.linear(x, self.embed_out, bias=self.embed_out_bias) x = x.transpose(0, 1)", "for attention, which we remove attentions = attentions[..., :-1] if", "remove eos token attentions if tokens[:, -1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions)", "embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim)", "B, E) x = x.transpose(0, 1) if not padding_mask.any(): padding_mask", "None, None, :, :] attentions = attentions[..., :-1, :-1] #", "layer in enumerate(self.layers): x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if", "attentions[..., :-1] if padding_mask is not None: attention_mask = (1", "x = self.embed_scale * self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens", "self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter(", "# B, T x = self.embed_scale * self.embed_tokens(tokens) if getattr(self.args,", "* (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]", "None: x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) repr_layers =", ":-1, :-1] # remove cls token attentions if tokens[:, 0].eq(self.cls_idx).all():", "T, E) result = {\"logits\": x, \"representations\": hidden_representations} if need_head_weights:", "eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None, None,", "if self.model_version == 'ESM-1b': x = self.emb_layer_norm_after(x) x = x.transpose(0,", "have layer norm applied if (layer_idx + 1) in repr_layers:", "for FFN\", ) parser.add_argument( \"--attention_heads\", default=20, type=int, metavar=\"N\", help=\"number of", "'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0) # x: B x", "1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) self.emb_layer_norm_after", "self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), )", "(tokens == self.mask_idx).sum(-1).float() / src_lengths x = x * (1", "attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx + 1) in", "= 1 self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx) self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)", "metavar=\"N\", help=\"embedding dimension for FFN\", ) parser.add_argument( \"--attention_heads\", default=20, type=int,", "0)) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_after(x) x =", "if padding_mask is not None: attention_mask = (1 - padding_mask.type_as(attentions))", "-1].eq(self.eos_idx).any(): eos_mask = tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions", "E) => (T, B, E) x = x.transpose(0, 1) if", ") parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to apply bias to logits\"", "= (~padding_mask).sum(-1) mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths x", "padding_idx=self.padding_idx ) self.layers = nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads,", "== 'ESM-1b'), ) for _ in range(self.args.layers) ] ) self.contact_head", "enumerate(self.layers): x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights) if (layer_idx +", "= ESM1bLayerNorm(self.args.embed_dim) self.lm_head = RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def", "= tokens.ne(self.eos_idx).to(attentions) eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) attentions = attentions", "(1 - padding_mask.type_as(attentions)) attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2) attentions =", "_init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out", "action=\"store_true\", help=\"whether to apply bias to logits\" ) parser.add_argument( \"--ffn_embed_dim\",", "RobertaLMHead, ESM1bLayerNorm, ContactPredictionHead, ) class ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser):", "attention, which we remove attentions = attentions[..., :-1] if padding_mask", "mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths x = x", "self.cls_idx = alphabet.cls_idx self.eos_idx = alphabet.eos_idx if self.args.arch == 'roberta_large':", "= [] # (B, T, E) => (T, B, E)", "1:] batch_size, layers, heads, seqlen, _ = attentions.size() attentions =", "self.embed_scale * self.embed_tokens(tokens) if getattr(self.args, 'token_dropout', False): x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1),", "default=20, type=int, metavar=\"N\", help=\"number of attention heads\", ) def __init__(self,", "= F.linear(x, self.embed_out, bias=self.embed_out_bias) x = x.transpose(0, 1) # (T,", "H, T, T) attn_weights.append(attn.transpose(1, 0)) if self.model_version == 'ESM-1b': x", "attentions if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] = contacts", "= nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers = nn.ModuleList( [", "is licensed under the MIT license found in the #", "0.8 src_lengths = (~padding_mask).sum(-1) mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() /", "def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale = math.sqrt(self.args.embed_dim) self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx)", "= attentions[..., :-1, :-1] # remove cls token attentions if", "* attention_mask[:, None, None, :, :] result[\"attentions\"] = attentions if", "E) => (B, T, E) # last hidden representation should", "class ProteinBertModel(nn.Module): @classmethod def add_args(cls, parser): parser.add_argument( \"--num_layers\", default=36, type=int,", "= contacts return result def _predict_contacts_from_token_attentions(self, tokens, attentions): # remove", "self.lm_head(x) else: x = F.linear(x, self.embed_out, bias=self.embed_out_bias) x = x.transpose(0,", "if 0 in repr_layers: hidden_representations[0] = x if need_head_weights: attn_weights", "1) if self.model_version == \"ESM-1\": # ESM-1 models have an", "torch import torch.nn as nn import torch.nn.functional as F from", "[ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version ==", "attention_mask[:, None, None, :, :] result[\"attentions\"] = attentions if return_contacts:", "torch.stack(attn_weights, 1) if self.model_version == \"ESM-1\": # ESM-1 models have", "self.model_version = 'ESM-1b' self._init_submodules_esm1b() else: self.model_version = 'ESM-1' self._init_submodules_esm1() def", "SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx) self.embed_out = nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias =", "attentions.view(batch_size, layers * heads, seqlen, seqlen) return self.contact_head(attentions) def predict_contacts(self,", "# Copyright (c) Facebook, Inc. and its affiliates. # #", "seqlen, _ = attentions.size() attentions = attentions.view(batch_size, layers * heads,", "type=int, metavar=\"N\", help=\"embedding dimension\" ) parser.add_argument( \"--logit_bias\", action=\"store_true\", help=\"whether to", "last hidden representation should have layer norm applied if (layer_idx", "for layer_idx, layer in enumerate(self.layers): x, attn = layer(x, self_attn_padding_mask=padding_mask,", "E) result = {\"logits\": x, \"representations\": hidden_representations} if need_head_weights: #", "x T x T attentions = torch.stack(attn_weights, 1) if self.model_version", "import math import torch import torch.nn as nn import torch.nn.functional", "= alphabet.eos_idx if self.args.arch == 'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b()", "def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): if return_contacts: need_head_weights =", "RobertaLMHead( embed_dim=self.args.embed_dim, output_dim=self.alphabet_size, weight=self.embed_tokens.weight ) def _init_submodules_esm1(self): self._init_submodules_common() self.embed_scale =", "x = x * (1 - mask_ratio_train) / (1 -", "def __init__(self, args, alphabet): super().__init__() self.args = args self.alphabet_size =", "(T, B, E) => (B, T, E) result = {\"logits\":", "x if need_head_weights: attn_weights = [] # (B, T, E)", "need_head_weights=need_head_weights) if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1]", "self.embed_tokens = nn.Embedding( self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx ) self.layers = nn.ModuleList(", "if need_head_weights: attn_weights = [] # (B, T, E) =>", "x = x.transpose(0, 1) # (T, B, E) => (B,", "directory of this source tree. import math import torch import", "eos_mask.unsqueeze(2) attentions = attentions * eos_mask[:, None, None, :, :]", "hidden_representations} if need_head_weights: # attentions: B x L x H", "= attentions if return_contacts: contacts = self._predict_contacts_from_token_attentions(tokens, attentions) result[\"contacts\"] =", "in repr_layers: hidden_representations[0] = x if need_head_weights: attn_weights = []", "= None if self.args.final_bias: self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size)) def forward(self, tokens,", "batch_size, layers, heads, seqlen, _ = attentions.size() attentions = attentions.view(batch_size,", "T, T) attn_weights.append(attn.transpose(1, 0)) if self.model_version == 'ESM-1b': x =", "'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for _ in range(self.args.layers) ]", "alphabet.mask_idx self.cls_idx = alphabet.cls_idx self.eos_idx = alphabet.eos_idx if self.args.arch ==", "= attentions * eos_mask[:, None, None, :, :] attentions =", "self.emb_layer_norm_before(x) if padding_mask is not None: x = x *", "== 'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b() else: self.model_version = 'ESM-1'", "result = {\"logits\": x, \"representations\": hidden_representations} if need_head_weights: # attentions:", "Inc. and its affiliates. # # This source code is", "else: self.model_version = 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self): self.embed_tokens = nn.Embedding(", "if return_contacts: need_head_weights = True assert tokens.ndim == 2 padding_mask", "!= 'ESM-1b'), use_esm1b_layer_norm=(self.model_version == 'ESM-1b'), ) for _ in range(self.args.layers)", "+ self.embed_positions(tokens) if self.model_version == 'ESM-1b': x = self.emb_layer_norm_before(x) if", "- padding_mask.unsqueeze(-1).type_as(x)) repr_layers = set(repr_layers) hidden_representations = {} if 0", "type=int, metavar=\"N\", help=\"number of attention heads\", ) def __init__(self, args,", "tokens[:, 0].eq(self.cls_idx).all(): attentions = attentions[..., 1:, 1:] batch_size, layers, heads,", "metavar=\"N\", help=\"number of attention heads\", ) def __init__(self, args, alphabet):", "eos_mask[:, None, None, :, :] attentions = attentions[..., :-1, :-1]", "import torch.nn as nn import torch.nn.functional as F from .modules", "nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'), use_esm1b_layer_norm=(self.model_version", "is not None: attention_mask = (1 - padding_mask.type_as(attentions)) attention_mask =", "if padding_mask is not None: x = x * (1", "1) # (T, B, E) => (B, T, E) result", "= tokens.eq(self.padding_idx) # B, T x = self.embed_scale * self.embed_tokens(tokens)", "= attentions[..., 1:, 1:] batch_size, layers, heads, seqlen, _ =", "= 'ESM-1b' self._init_submodules_esm1b() else: self.model_version = 'ESM-1' self._init_submodules_esm1() def _init_submodules_common(self):", "heads, seqlen, _ = attentions.size() attentions = attentions.view(batch_size, layers *", "nn.Parameter( torch.zeros((self.alphabet_size, self.args.embed_dim)) ) self.embed_out_bias = None if self.args.final_bias: self.embed_out_bias", "= nn.ModuleList( [ TransformerLayer( self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads, add_bias_kv=(self.model_version != 'ESM-1b'),", "self.eos_idx = alphabet.eos_idx if self.args.arch == 'roberta_large': self.model_version = 'ESM-1b'", "if self.args.arch == 'roberta_large': self.model_version = 'ESM-1b' self._init_submodules_esm1b() else: self.model_version", "F.linear(x, self.embed_out, bias=self.embed_out_bias) x = x.transpose(0, 1) # (T, B,", "# ESM-1 models have an additional null-token for attention, which", "1) in repr_layers: hidden_representations[layer_idx + 1] = x x =", "B, T, T) => (B, H, T, T) attn_weights.append(attn.transpose(1, 0))", "len(alphabet) self.padding_idx = alphabet.padding_idx self.mask_idx = alphabet.mask_idx self.cls_idx = alphabet.cls_idx" ]
[ "on DEK dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value =", "2.0 (the \"License\"); # you may not use this file", "how many bytes the DEK length will be encoded. DEK_LEN_BYTES", "4 bytes (big endian) * Encrypted DEK: variable length, specified", "+ ciphertext def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes:", "remote: aead.Aead): self.key_template = key_template self.remote_aead = remote def encrypt(self,", "< DEK_LEN_BYTES: raise core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic", "aead.Aead): self.key_template = key_template self.remote_aead = remote def encrypt(self, plaintext:", "(DEK) which is used to encrypt the payload. The DEK", "len(ciphertext) # Recover DEK length if ct_len < DEK_LEN_BYTES: raise", "Wrap DEK key values with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'')", "be decrypted by the KMS, and then the DEK can", "Envelope encryption generates a data encryption key (DEK) which is", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "import tink_pb2 from tink import aead from tink import core", "# Get AEAD primitive based on DEK dek = tink_pb2.KeyData()", "encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes: # Get new", "DEK can be used to decrypt the ciphertext. For further", "Defines in how many bytes the DEK length will be", "< 0: raise core.TinkError # Decrypt DEK with remote AEAD", "dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext", "then send to a KMS to be encrypted and the", "encrypted DEK is attached to the ciphertext. In order to", "AEAD payload: variable length \"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate, remote:", "use this file except in compliance with the License. #", "big endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek", "with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes =", "is used to encrypt the payload. The DEK is then", "import struct from tink.proto import tink_pb2 from tink import aead", "bytes) -> bytes: ct_len = len(ciphertext) # Recover DEK length", "__init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template self.remote_aead =", "= self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive based on DEK", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. # You may obtain a copy of the License", "if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0:", "to encrypt the payload. The DEK is then send to", "# Recover DEK length if ct_len < DEK_LEN_BYTES: raise core.TinkError", "= tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value = dek_bytes dek.key_material_type =", "in how many bytes the DEK length will be encoded.", "the encrypted DEK: 4 bytes (big endian) * Encrypted DEK:", "# Extract ciphertext payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES +", "under the License is distributed on an \"AS IS\" BASIS,", "payload. The DEK is then send to a KMS to", "length \"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template =", "DEK: 4 bytes (big endian) * Encrypted DEK: variable length,", "encoded as big endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len", "License for the specific language governing permissions and # limitations", "which is used to encrypt the payload. The DEK is", "(ct_len - DEK_LEN_BYTES) or dek_len < 0: raise core.TinkError #", "the DEK first has to be decrypted by the KMS,", "DEK length if ct_len < DEK_LEN_BYTES: raise core.TinkError dek_len =", "the ciphertext. In order to decrypt the ciphertext, the DEK", "attached to the ciphertext. In order to decrypt the ciphertext,", "return enc_dek_len + encrypted_dek + ciphertext def decrypt(self, ciphertext: bytes,", "the ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure", "<reponame>bfloch/tink # Copyright 2020 Google LLC. # # Licensed under", "DEK_LEN_BYTES: raise core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check", "LLC. # # Licensed under the Apache License, Version 2.0", "b'') # Get AEAD primitive based on DEK dek =", "self.remote_aead = remote def encrypt(self, plaintext: bytes, associated_data: bytes) ->", "core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext =", "ciphertext def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes: ct_len", "self.key_template = key_template self.remote_aead = remote def encrypt(self, plaintext: bytes,", "to be decrypted by the KMS, and then the DEK", "associated_data: bytes) -> bytes: # Get new key from template", "see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as follows: * Length", "key values with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct", "decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes: ct_len = len(ciphertext)", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK length encoded", "import print_function import struct from tink.proto import tink_pb2 from tink", "encrypted_dek + ciphertext def decrypt(self, ciphertext: bytes, associated_data: bytes) ->", "DEK key values with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') #", "= core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data)", "annotations from __future__ import print_function import struct from tink.proto import", "tink import aead from tink import core # Defines in", "valid. if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len <", "import for type annotations from __future__ import print_function import struct", "if ct_len < DEK_LEN_BYTES: raise core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0]", "absolute_import from __future__ import division # Placeholder for import for", "permissions and # limitations under the License. \"\"\"Module for envelope", "-> bytes: # Get new key from template dek =", "data encryption key (DEK) which is used to encrypt the", "+ dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive", "# Placeholder for import for type annotations from __future__ import", "AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'')", "KMS, and then the DEK can be used to decrypt", "previous 4 bytes * AEAD payload: variable length \"\"\" def", "for import for type annotations from __future__ import print_function import", "bytes: # Get new key from template dek = core.Registry.new_key_data(self.key_template)", "Google LLC. # # Licensed under the Apache License, Version", "dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK length", "dek_len < 0: raise core.TinkError # Decrypt DEK with remote", "order to decrypt the ciphertext, the DEK first has to", "The ciphertext structure is as follows: * Length of the", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "tink_pb2 from tink import aead from tink import core #", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "associated_data: bytes) -> bytes: ct_len = len(ciphertext) # Recover DEK", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "encrypted DEK: 4 bytes (big endian) * Encrypted DEK: variable", "to in writing, software # distributed under the License is", "# Copyright 2020 Google LLC. # # Licensed under the", "# See the License for the specific language governing permissions", "\"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template", "or agreed to in writing, software # distributed under the", "= ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get", "required by applicable law or agreed to in writing, software", "Encrypted DEK: variable length, specified by the previous 4 bytes", "tink import core # Defines in how many bytes the", "length encoded as big endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "to decrypt the ciphertext, the DEK first has to be", "under the License. \"\"\"Module for envelope encryption with KMS.\"\"\" from", "core # Defines in how many bytes the DEK length", "information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as follows: *", "can be valid. if dek_len > (ct_len - DEK_LEN_BYTES) or", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes: ct_len =", "tink.proto import tink_pb2 from tink import aead from tink import", "remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes,", "will be encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope", "# Decrypt DEK with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES +", "be valid. if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len", "and the encrypted DEK is attached to the ciphertext. In", "express or implied. # See the License for the specific", "the ciphertext, the DEK first has to be decrypted by", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "is then send to a KMS to be encrypted and", "key (DEK) which is used to encrypt the payload. The", "writing, software # distributed under the License is distributed on", "new key from template dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek,", "4 bytes * AEAD payload: variable length \"\"\" def __init__(self,", "you may not use this file except in compliance with", "__future__ import absolute_import from __future__ import division # Placeholder for", "check if DEK length can be valid. if dek_len >", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "as big endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len +", "License. \"\"\"Module for envelope encryption with KMS.\"\"\" from __future__ import", "the License. \"\"\"Module for envelope encryption with KMS.\"\"\" from __future__", "ciphertext: bytes, associated_data: bytes) -> bytes: ct_len = len(ciphertext) #", "# limitations under the License. \"\"\"Module for envelope encryption with", "bytes * AEAD payload: variable length \"\"\" def __init__(self, key_template:", "CONDITIONS OF ANY KIND, either express or implied. # See", "from __future__ import division # Placeholder for import for type", "many bytes the DEK length will be encoded. DEK_LEN_BYTES =", "template dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "KMS.\"\"\" from __future__ import absolute_import from __future__ import division #", "variable length \"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template", "encryption with KMS.\"\"\" from __future__ import absolute_import from __future__ import", "dek.type_url = self.key_template.type_url dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead", "tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template self.remote_aead = remote def", "# Construct ciphertext, DEK length encoded as big endian enc_dek_len", "variable length, specified by the previous 4 bytes * AEAD", "0: raise core.TinkError # Decrypt DEK with remote AEAD encrypted_dek_bytes", "ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key values with", "from tink import aead from tink import core # Defines", "encryption. Envelope encryption generates a data encryption key (DEK) which", "enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek + ciphertext", "to be encrypted and the encrypted DEK is attached to", "def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template self.remote_aead", "OR CONDITIONS OF ANY KIND, either express or implied. #", "can be used to decrypt the ciphertext. For further information", "endian) * Encrypted DEK: variable length, specified by the previous", "decrypt the ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext", "the License is distributed on an \"AS IS\" BASIS, #", "# Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap DEK", "In order to decrypt the ciphertext, the DEK first has", "# Wrap DEK key values with remote encrypted_dek = self.remote_aead.encrypt(dek.value,", "first has to be decrypted by the KMS, and then", "aead.Aead) # Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap", "ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK length can be valid.", "bytes the DEK length will be encoded. DEK_LEN_BYTES = 4", "print_function import struct from tink.proto import tink_pb2 from tink import", "self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK length encoded as big", "+ encrypted_dek + ciphertext def decrypt(self, ciphertext: bytes, associated_data: bytes)", "dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload and decrypt", "and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:] return dek_aead.decrypt(ct_bytes, associated_data)", "struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK length can be", "law or agreed to in writing, software # distributed under", "has to be decrypted by the KMS, and then the", "be encrypted and the encrypted DEK is attached to the", "DEK length encoded as big endian enc_dek_len = struct.pack('>I', len(encrypted_dek))", "= len(ciphertext) # Recover DEK length if ct_len < DEK_LEN_BYTES:", "as follows: * Length of the encrypted DEK: 4 bytes", "ct_len < DEK_LEN_BYTES: raise core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] #", "may obtain a copy of the License at # #", "encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "payload: variable length \"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead):", "* Encrypted DEK: variable length, specified by the previous 4", "class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope encryption generates a data", "may not use this file except in compliance with the", "then the DEK can be used to decrypt the ciphertext.", "a data encryption key (DEK) which is used to encrypt", "encrypted and the encrypted DEK is attached to the ciphertext.", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with KMS.\"\"\" from __future__ import absolute_import from __future__ import division", "DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope encryption", "based on DEK dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value", "this file except in compliance with the License. # You", "ciphertext structure is as follows: * Length of the encrypted", "dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext,", "* Length of the encrypted DEK: 4 bytes (big endian)", "struct from tink.proto import tink_pb2 from tink import aead from", "the KMS, and then the DEK can be used to", "values with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext,", "ciphertext, the DEK first has to be decrypted by the", "# Defines in how many bytes the DEK length will", "DEK is attached to the ciphertext. In order to decrypt", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "limitations under the License. \"\"\"Module for envelope encryption with KMS.\"\"\"", "# # Licensed under the Apache License, Version 2.0 (the", "(big endian) * Encrypted DEK: variable length, specified by the", "raise core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as follows: * Length of", "Construct ciphertext, DEK length encoded as big endian enc_dek_len =", "key_template self.remote_aead = remote def encrypt(self, plaintext: bytes, associated_data: bytes)", "by the KMS, and then the DEK can be used", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as follows:", "2020 Google LLC. # # Licensed under the Apache License,", "dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive based on", "of the encrypted DEK: 4 bytes (big endian) * Encrypted", "= 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope encryption generates", "the DEK length will be encoded. DEK_LEN_BYTES = 4 class", "encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') #", "DEK length can be valid. if dek_len > (ct_len -", "= core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload and decrypt ct_bytes", "DEK: variable length, specified by the previous 4 bytes *", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value = dek_bytes dek.key_material_type", "= dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key values with remote", "primitive based on DEK dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url", "dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract", "ciphertext payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:] return", "or implied. # See the License for the specific language", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "aead.Aead) # Extract ciphertext payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES", "dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead)", "type annotations from __future__ import print_function import struct from tink.proto", "* AEAD payload: variable length \"\"\" def __init__(self, key_template: tink_pb2.KeyTemplate,", "KMS to be encrypted and the encrypted DEK is attached", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "ct_len = len(ciphertext) # Recover DEK length if ct_len <", "send to a KMS to be encrypted and the encrypted", "to decrypt the ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption. The", "Placeholder for import for type annotations from __future__ import print_function", "language governing permissions and # limitations under the License. \"\"\"Module", "length will be encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements", "structure is as follows: * Length of the encrypted DEK:", "Length of the encrypted DEK: 4 bytes (big endian) *", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "import core # Defines in how many bytes the DEK", "used to decrypt the ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption.", "For further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is as", "with remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK", "ciphertext. For further information see https://cloud.google.com/kms/docs/envelope-encryption. The ciphertext structure is", "and then the DEK can be used to decrypt the", "self.key_template.type_url dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek,", "the encrypted DEK is attached to the ciphertext. In order", "self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive based on DEK dek", "b'') # Construct ciphertext, DEK length encoded as big endian", "# # Unless required by applicable law or agreed to", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "generates a data encryption key (DEK) which is used to", "= dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) #", "__future__ import print_function import struct from tink.proto import tink_pb2 from", "Version 2.0 (the \"License\"); # you may not use this", "struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek + ciphertext def decrypt(self,", "encryption generates a data encryption key (DEK) which is used", "by the previous 4 bytes * AEAD payload: variable length", "implied. # See the License for the specific language governing", "DEK length will be encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead):", "under the Apache License, Version 2.0 (the \"License\"); # you", "length, specified by the previous 4 bytes * AEAD payload:", "- DEK_LEN_BYTES) or dek_len < 0: raise core.TinkError # Decrypt", "DEK with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes", "from tink.proto import tink_pb2 from tink import aead from tink", "import aead from tink import core # Defines in how", "by applicable law or agreed to in writing, software #", "is as follows: * Length of the encrypted DEK: 4", "remote def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes: #", "# Basic check if DEK length can be valid. if", "encrypt the payload. The DEK is then send to a", "remote encrypted_dek = self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK length", "Copyright 2020 Google LLC. # # Licensed under the Apache", "to a KMS to be encrypted and the encrypted DEK", "envelope encryption with KMS.\"\"\" from __future__ import absolute_import from __future__", "plaintext: bytes, associated_data: bytes) -> bytes: # Get new key", "bytes, associated_data: bytes) -> bytes: ct_len = len(ciphertext) # Recover", "decrypted by the KMS, and then the DEK can be", "bytes: ct_len = len(ciphertext) # Recover DEK length if ct_len", "the previous 4 bytes * AEAD payload: variable length \"\"\"", "\"\"\"Implements envelope encryption. Envelope encryption generates a data encryption key", "length if ct_len < DEK_LEN_BYTES: raise core.TinkError dek_len = struct.unpack('>I',", "4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope encryption generates a", "-> bytes: ct_len = len(ciphertext) # Recover DEK length if", "Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key", "specified by the previous 4 bytes * AEAD payload: variable", "decrypt the ciphertext, the DEK first has to be decrypted", "tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC", "key_template: tink_pb2.KeyTemplate, remote: aead.Aead): self.key_template = key_template self.remote_aead = remote", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "a KMS to be encrypted and the encrypted DEK is", "the specific language governing permissions and # limitations under the", "dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD primitive based", "ciphertext, DEK length encoded as big endian enc_dek_len = struct.pack('>I',", "associated_data) # Wrap DEK key values with remote encrypted_dek =", "applicable law or agreed to in writing, software # distributed", "encryption key (DEK) which is used to encrypt the payload.", "\"\"\"Module for envelope encryption with KMS.\"\"\" from __future__ import absolute_import", "ciphertext. In order to decrypt the ciphertext, the DEK first", "import absolute_import from __future__ import division # Placeholder for import", "from tink import core # Defines in how many bytes", "DEK is then send to a KMS to be encrypted", "enc_dek_len + encrypted_dek + ciphertext def decrypt(self, ciphertext: bytes, associated_data:", "or dek_len < 0: raise core.TinkError # Decrypt DEK with", "is attached to the ciphertext. In order to decrypt the", "in writing, software # distributed under the License is distributed", "Basic check if DEK length can be valid. if dek_len", "dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract ciphertext", "= self.remote_aead.encrypt(dek.value, b'') # Construct ciphertext, DEK length encoded as", "payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:] return dek_aead.decrypt(ct_bytes,", "length can be valid. if dek_len > (ct_len - DEK_LEN_BYTES)", "= struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK length can", "plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key values", "envelope encryption. Envelope encryption generates a data encryption key (DEK)", "The DEK is then send to a KMS to be", "import division # Placeholder for import for type annotations from", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "AEAD primitive based on DEK dek = tink_pb2.KeyData() dek.type_url =", "# Get new key from template dek = core.Registry.new_key_data(self.key_template) dek_aead", "License, Version 2.0 (the \"License\"); # you may not use", "bytes) -> bytes: # Get new key from template dek", "# You may obtain a copy of the License at", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "from template dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) #", "ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len] dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'') # Get AEAD", "> (ct_len - DEK_LEN_BYTES) or dek_len < 0: raise core.TinkError", "the License for the specific language governing permissions and #", "core.TinkError dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0] # Basic check if DEK", "__future__ import division # Placeholder for import for type annotations", "Apache License, Version 2.0 (the \"License\"); # you may not", "len(encrypted_dek)) return enc_dek_len + encrypted_dek + ciphertext def decrypt(self, ciphertext:", "either express or implied. # See the License for the", "DEK dek = tink_pb2.KeyData() dek.type_url = self.key_template.type_url dek.value = dek_bytes", "Get new key from template dek = core.Registry.new_key_data(self.key_template) dek_aead =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "Recover DEK length if ct_len < DEK_LEN_BYTES: raise core.TinkError dek_len", "DEK_LEN_BYTES) or dek_len < 0: raise core.TinkError # Decrypt DEK", "core.TinkError # Decrypt DEK with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES", "= core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext", "for envelope encryption with KMS.\"\"\" from __future__ import absolute_import from", "bytes, associated_data: bytes) -> bytes: # Get new key from", "used to encrypt the payload. The DEK is then send", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "division # Placeholder for import for type annotations from __future__", "KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption. Envelope encryption generates a data encryption", "def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes: # Get", "raise core.TinkError # Decrypt DEK with remote AEAD encrypted_dek_bytes =", "the payload. The DEK is then send to a KMS", "dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0: raise", "= tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload", "= self.key_template.type_url dek.value = dek_bytes dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead =", "Decrypt DEK with remote AEAD encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len]", "for type annotations from __future__ import print_function import struct from", "follows: * Length of the encrypted DEK: 4 bytes (big", "be encoded. DEK_LEN_BYTES = 4 class KmsEnvelopeAead(aead.Aead): \"\"\"Implements envelope encryption.", "DEK first has to be decrypted by the KMS, and", "\"License\"); # you may not use this file except in", "if DEK length can be valid. if dek_len > (ct_len", "Extract ciphertext payload and decrypt ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:]", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "from __future__ import absolute_import from __future__ import division # Placeholder", "# distributed under the License is distributed on an \"AS", "= remote def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes:", "core.Registry.primitive(dek, aead.Aead) # Encrypt plaintext ciphertext = dek_aead.encrypt(plaintext, associated_data) #", "# Unless required by applicable law or agreed to in", "to the ciphertext. In order to decrypt the ciphertext, the", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "the DEK can be used to decrypt the ciphertext. For", "governing permissions and # limitations under the License. \"\"\"Module for", "Get AEAD primitive based on DEK dek = tink_pb2.KeyData() dek.type_url", "aead from tink import core # Defines in how many", "dek_aead.encrypt(plaintext, associated_data) # Wrap DEK key values with remote encrypted_dek", "be used to decrypt the ciphertext. For further information see", "and # limitations under the License. \"\"\"Module for envelope encryption", "endian enc_dek_len = struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek +", "You may obtain a copy of the License at #", "from __future__ import print_function import struct from tink.proto import tink_pb2", "tink_pb2.KeyData.KeyMaterialType.SYMMETRIC dek_aead = core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload and", "key from template dek = core.Registry.new_key_data(self.key_template) dek_aead = core.Registry.primitive(dek, aead.Aead)", "bytes (big endian) * Encrypted DEK: variable length, specified by", "the Apache License, Version 2.0 (the \"License\"); # you may", "= struct.pack('>I', len(encrypted_dek)) return enc_dek_len + encrypted_dek + ciphertext def", "core.Registry.primitive(dek, aead.Aead) # Extract ciphertext payload and decrypt ct_bytes =", "= key_template self.remote_aead = remote def encrypt(self, plaintext: bytes, associated_data:" ]
[ "else: r = can.recv(0) if r[0] == 0x7FF+1 and r[3]", "reason): print('cb1') if reason == 0: print('pending') if reason ==", "print(can) try: can.send('abcde', 0x7FF + 1) except ValueError: print('failed') else:", "can # Testing extended IDs can = CAN(1, CAN.LOOPBACK, extframe", "r[0] == 0x7FF+1 and r[3] == b'abcde': print('passed') else: print('failed,", "2: print('overflow') def cb1a(bus, reason): print('cb1a') if reason == 0:", "reason == 2: print('overflow') def cb1(bus, reason): print('cb1') if reason", "if reason == 0: print('pending') if reason == 1: print('full')", "0x7FF) except ValueError: print('passed') else: print('failed') del can # Testing", "can.setfilter(0, CAN.MASK32, 0, (0, 0)) print(can) try: can.send('abcde', 0x7FF +", "CAN.MASK32, 0, (0, 0)) print(can) try: can.send('abcde', 0x7FF + 1)", "can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1)) print(can.recv(1))", "1: print('full') if reason == 2: print('overflow') def cb1(bus, reason):", "can # Test RxCallbacks can = CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16,", "CAN.MASK16, 0, (0, 0, 0, 0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0))", "0x7FF + 1) print(can.recv(0)) # Test too long message try:", "2: print('overflow') def cb0a(bus, reason): print('cb0a') if reason == 0:", "can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1)) print(can.recv(1)) can.send('11111111',1) can.send('55555555',5) print(can.recv(0))", "long message try: can.send('abcdefghi', 0x7FF) except ValueError: print('passed') else: print('failed')", "message try: can.send('abcdefghi', 0x7FF) except ValueError: print('passed') else: print('failed') del", "can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF +", "cb0a(bus, reason): print('cb0a') if reason == 0: print('pending') if reason", "if reason == 1: print('full') if reason == 2: print('overflow')", "print('cb0a') if reason == 0: print('pending') if reason == 1:", "if r[0] == 0x7FF+1 and r[3] == b'abcde': print('passed') else:", "can = CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1, 2, 3,", "== 1: print('full') if reason == 2: print('overflow') def cb1a(bus,", "0x7FF + 1) except ValueError: print('failed') else: r = can.recv(0)", "all filter can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0)) can.send('abcd',", "cb0(bus, reason): print('cb0') if reason == 0: print('pending') if reason", "del can # Testing extended IDs can = CAN(1, CAN.LOOPBACK,", "if reason == 2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1)", "print('overflow') def cb1(bus, reason): print('cb1') if reason == 0: print('pending')", "7, 8)) def cb0(bus, reason): print('cb0') if reason == 0:", "print('full') if reason == 2: print('overflow') def cb1(bus, reason): print('cb1')", "0)) print(can) try: can.send('abcde', 0x7FF + 1) except ValueError: print('failed')", "(5, 6, 7, 8)) def cb0(bus, reason): print('cb0') if reason", "Catch all filter can.setfilter(0, CAN.MASK32, 0, (0, 0)) print(can) try:", "print('passed') else: print('failed') del can # Testing extended IDs can", "-1) print(can.recv(0)) can.send('abcd', 0x7FF + 1) print(can.recv(0)) # Test too", "print(can.recv(0)) can.send('abcd', 0x7FF + 1) print(can.recv(0)) # Test too long", "can.send('abcd', 0x7FF + 1) print(can.recv(0)) # Test too long message", "RxCallbacks can = CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1, 2,", "if reason == 2: print('overflow') def cb0a(bus, reason): print('cb0a') if", "can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1)) print(can.recv(1)) can.send('11111111',1)", "print('cb1') if reason == 0: print('pending') if reason == 1:", "4)) can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8)) def cb0(bus,", "print(can.recv(0)) # Test too long message try: can.send('abcdefghi', 0x7FF) except", "print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1)) print(can.recv(1)) can.send('11111111',1) can.send('55555555',5) print(can.recv(0)) print(can.recv(1))", "print('full') if reason == 2: print('overflow') def cb1a(bus, reason): print('cb1a')", "1) except ValueError: print('failed') else: r = can.recv(0) if r[0]", "2, 3, 4)) can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))", "def cb0(bus, reason): print('cb0') if reason == 0: print('pending') if", "CAN CAN.initfilterbanks(14) can = CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) #", "CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) can.setfilter(1, CAN.LIST16,", "= CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))", "print('full') if reason == 2: print('overflow') def cb0a(bus, reason): print('cb0a')", "+ 1) print(can.recv(0)) # Test too long message try: can.send('abcdefghi',", "print('pending') if reason == 1: print('full') if reason == 2:", "reason): print('cb0a') if reason == 0: print('pending') if reason ==", "can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0))", "r = can.recv(0) if r[0] == 0x7FF+1 and r[3] ==", "print('failed') del can # Testing extended IDs can = CAN(1,", "0, 0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd',", "print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF + 1) print(can.recv(0))", "0, (0, 0, 0, 0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd',", "Test RxCallbacks can = CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1,", "= can.recv(0) if r[0] == 0x7FF+1 and r[3] == b'abcde':", "0, 0, 0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0))", "CAN.LOOPBACK, extframe = True) # Catch all filter can.setfilter(0, CAN.MASK32,", "cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7)", "reason == 1: print('full') if reason == 2: print('overflow') can.rxcallback(0,", "IDs can = CAN(1, CAN.LOOPBACK, extframe = True) # Catch", "= CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch all filter", "r[3] == b'abcde': print('passed') else: print('failed, wrong data received') del", "= True) # Catch all filter can.setfilter(0, CAN.MASK32, 0, (0,", "True) # Catch all filter can.setfilter(0, CAN.MASK32, 0, (0, 0))", "can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6)", "b'abcde': print('passed') else: print('failed, wrong data received') del can #", "can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF + 1) print(can.recv(0)) # Test", "ValueError: print('passed') else: print('failed') del can # Testing extended IDs", "print(can.any(0)) # Catch all filter can.setfilter(0, CAN.MASK16, 0, (0, 0,", "print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF + 1) print(can.recv(0)) #", "can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0))", "== 1: print('full') if reason == 2: print('overflow') can.rxcallback(0, cb0)", "pyb import CAN CAN.initfilterbanks(14) can = CAN(1) print(can) can.init(CAN.LOOPBACK) print(can)", "cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0))", "2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0,", "0, (0, 0)) print(can) try: can.send('abcde', 0x7FF + 1) except", "CAN(1, CAN.LOOPBACK, extframe = True) # Catch all filter can.setfilter(0,", "reason == 1: print('full') if reason == 2: print('overflow') def", "6, 7, 8)) def cb0(bus, reason): print('cb0') if reason ==", "== 2: print('overflow') def cb1a(bus, reason): print('cb1a') if reason ==", "# Test too long message try: can.send('abcdefghi', 0x7FF) except ValueError:", "CAN.LIST16, 1, (5, 6, 7, 8)) def cb0(bus, reason): print('cb0')", "== 0x7FF+1 and r[3] == b'abcde': print('passed') else: print('failed, wrong", "can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1,", "cb1a(bus, reason): print('cb1a') if reason == 0: print('pending') if reason", "if reason == 2: print('overflow') def cb1(bus, reason): print('cb1') if", "can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0)) can.send('abcd', 123) print(can.any(0))", "== 1: print('full') if reason == 2: print('overflow') def cb1(bus,", "reason): print('cb1a') if reason == 0: print('pending') if reason ==", "try: can.send('abcdefghi', 0x7FF) except ValueError: print('passed') else: print('failed') del can", "filter can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0)) can.send('abcd', 123)", "def cb1a(bus, reason): print('cb1a') if reason == 0: print('pending') if", "from pyb import CAN CAN.initfilterbanks(14) can = CAN(1) print(can) can.init(CAN.LOOPBACK)", "print('passed') else: print('failed, wrong data received') del can # Test", "ValueError: print('failed') else: r = can.recv(0) if r[0] == 0x7FF+1", "print('cb0') if reason == 0: print('pending') if reason == 1:", "print('full') if reason == 2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1)", "cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5)", "1) print(can.recv(0)) # Test too long message try: can.send('abcdefghi', 0x7FF)", "import CAN CAN.initfilterbanks(14) can = CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0))", "2: print('overflow') def cb1(bus, reason): print('cb1') if reason == 0:", "can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8)", "try: can.send('abcde', 0x7FF + 1) except ValueError: print('failed') else: r", "Test too long message try: can.send('abcdefghi', 0x7FF) except ValueError: print('passed')", "== 1: print('full') if reason == 2: print('overflow') def cb0a(bus,", "CAN.LIST16, 0, (1, 2, 3, 4)) can.setfilter(1, CAN.LIST16, 1, (5,", "can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4)", "and r[3] == b'abcde': print('passed') else: print('failed, wrong data received')", "if reason == 2: print('overflow') def cb1a(bus, reason): print('cb1a') if", "print(can) print(can.any(0)) # Catch all filter can.setfilter(0, CAN.MASK16, 0, (0,", "else: print('failed') del can # Testing extended IDs can =", "except ValueError: print('failed') else: r = can.recv(0) if r[0] ==", "Catch all filter can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))", "# Test RxCallbacks can = CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0,", "else: print('failed, wrong data received') del can # Test RxCallbacks", "can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1))", "filter can.setfilter(0, CAN.MASK32, 0, (0, 0)) print(can) try: can.send('abcde', 0x7FF", "+ 1) except ValueError: print('failed') else: r = can.recv(0) if", "can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) can.setfilter(1, CAN.LIST16, 1,", "can = CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch all", "== 0: print('pending') if reason == 1: print('full') if reason", "print('failed, wrong data received') del can # Test RxCallbacks can", "reason): print('cb0') if reason == 0: print('pending') if reason ==", "reason == 2: print('overflow') def cb0a(bus, reason): print('cb0a') if reason", "0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF", "print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch all filter can.setfilter(0, CAN.MASK16,", "(0, 0)) print(can) try: can.send('abcde', 0x7FF + 1) except ValueError:", "= CAN(1, CAN.LOOPBACK, extframe = True) # Catch all filter", "Testing extended IDs can = CAN(1, CAN.LOOPBACK, extframe = True)", "1, (5, 6, 7, 8)) def cb0(bus, reason): print('cb0') if", "extended IDs can = CAN(1, CAN.LOOPBACK, extframe = True) #", "can.recv(0) if r[0] == 0x7FF+1 and r[3] == b'abcde': print('passed')", "received') del can # Test RxCallbacks can = CAN(1, CAN.LOOPBACK)", "can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1))", "0: print('pending') if reason == 1: print('full') if reason ==", "all filter can.setfilter(0, CAN.MASK32, 0, (0, 0)) print(can) try: can.send('abcde',", "extframe = True) # Catch all filter can.setfilter(0, CAN.MASK32, 0,", "reason == 2: print('overflow') def cb1a(bus, reason): print('cb1a') if reason", "CAN.initfilterbanks(14) can = CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch", "cb1a) can.send('88888888',8) print(can.recv(0)) print(can.recv(0)) print(can.recv(0)) print(can.recv(1)) print(can.recv(1)) print(can.recv(1)) can.send('11111111',1) can.send('55555555',5)", "123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1) print(can.recv(0)) can.send('abcd', 0x7FF + 1)", "(0, 0, 0, 0)) can.send('abcd', 123) print(can.any(0)) print(can.recv(0)) can.send('abcd', -1)", "can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch all filter can.setfilter(0, CAN.MASK16, 0,", "print('cb1a') if reason == 0: print('pending') if reason == 1:", "too long message try: can.send('abcdefghi', 0x7FF) except ValueError: print('passed') else:", "0, (1, 2, 3, 4)) can.setfilter(1, CAN.LIST16, 1, (5, 6,", "def cb1(bus, reason): print('cb1') if reason == 0: print('pending') if", "== 2: print('overflow') def cb0a(bus, reason): print('cb0a') if reason ==", "can.send('abcdefghi', 0x7FF) except ValueError: print('passed') else: print('failed') del can #", "print('overflow') def cb0a(bus, reason): print('cb0a') if reason == 0: print('pending')", "(1, 2, 3, 4)) can.setfilter(1, CAN.LIST16, 1, (5, 6, 7,", "# Catch all filter can.setfilter(0, CAN.MASK16, 0, (0, 0, 0,", "data received') del can # Test RxCallbacks can = CAN(1,", "del can # Test RxCallbacks can = CAN(1, CAN.LOOPBACK) can.setfilter(0,", "def cb0a(bus, reason): print('cb0a') if reason == 0: print('pending') if", "1: print('full') if reason == 2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1,", "print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a)", "# Testing extended IDs can = CAN(1, CAN.LOOPBACK, extframe =", "can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8)) def cb0(bus, reason):", "1: print('full') if reason == 2: print('overflow') def cb0a(bus, reason):", "1: print('full') if reason == 2: print('overflow') def cb1a(bus, reason):", "0x7FF+1 and r[3] == b'abcde': print('passed') else: print('failed, wrong data", "CAN(1, CAN.LOOPBACK) can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4)) can.setfilter(1,", "print('overflow') def cb1a(bus, reason): print('cb1a') if reason == 0: print('pending')", "wrong data received') del can # Test RxCallbacks can =", "== 2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2) can.send('33333333',3)", "3, 4)) can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8)) def", "# Catch all filter can.setfilter(0, CAN.MASK32, 0, (0, 0)) print(can)", "can.send('abcde', 0x7FF + 1) except ValueError: print('failed') else: r =", "8)) def cb0(bus, reason): print('cb0') if reason == 0: print('pending')", "cb1(bus, reason): print('cb1') if reason == 0: print('pending') if reason", "reason == 2: print('overflow') can.rxcallback(0, cb0) can.rxcallback(1, cb1) can.send('11111111',1) can.send('22222222',2)", "can.send('22222222',2) can.send('33333333',3) can.rxcallback(0, cb0a) can.send('44444444',4) can.send('55555555',5) can.send('66666666',6) can.send('77777777',7) can.rxcallback(1, cb1a)", "can = CAN(1, CAN.LOOPBACK, extframe = True) # Catch all", "except ValueError: print('passed') else: print('failed') del can # Testing extended", "reason == 0: print('pending') if reason == 1: print('full') if", "== 2: print('overflow') def cb1(bus, reason): print('cb1') if reason ==", "CAN(1) print(can) can.init(CAN.LOOPBACK) print(can) print(can.any(0)) # Catch all filter can.setfilter(0,", "print('failed') else: r = can.recv(0) if r[0] == 0x7FF+1 and", "== b'abcde': print('passed') else: print('failed, wrong data received') del can" ]
[ "return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async", "self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block", "miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) # new work if interval", "self.assertEqual(work.height, 0) # guardian: diff 1000 -> 1, any number", "= DoubleSHA256(work) sol = solver.mine(100, 200).nonce self.assertGreater(sol, 100) # ensure", "ConsensusType.POW_SHA3SHA3, create, add, remote=True, # fake pk, will succeed in", "then end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self):", "= self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"),", "work = await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) # new", "self.added_blocks = [] @staticmethod def get_mining_params(rounds: Optional[int] = None): #", "self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go())", "42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async def", "block.header.nonce = mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self):", "def create(retry=True): if len(self.added_blocks) >= 5: return None # stop", "mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async def go(): miner =", "typing import Optional from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal", "RootBlock, RootBlockHeader from quarkchain.p2p import ecies from quarkchain.utils import sha3_256", "invalid pow proof res = await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res)", "added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner", "await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) # work map cleaned", "= mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async def go(): miner", "1, any number should work res = await miner.submit_work(work.hash, i,", "the game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block):", "quarkchain.core import RootBlock, RootBlockHeader from quarkchain.p2p import ecies from quarkchain.utils", "validate_seal from quarkchain.config import ConsensusType from quarkchain.core import RootBlock, RootBlockHeader", "+= 10 work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h)", "asyncio import time import unittest from typing import Optional from", "= 0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering the", "0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally: i += 1", "submitted block doesn't exist res = await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\"))", "add(block): nonlocal miner self.added_blocks.append(block) for consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH,", "block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH,", "target time is hit ret = {\"target_block_time\": 0.0, \"is_test\": True}", "test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block = RootBlock( RootBlockHeader(create_time=42,", "**kwargs): m = Miner( consensus, create_func, add_func, self.get_mining_params, **kwargs )", "from typing import Optional from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork,", "in range(42, 100): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) #", "def add(block): nonlocal miner self.added_blocks.append(block) for consensus in ( ConsensusType.POW_SIMULATE,", "0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering the diff", "= RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async def create(retry=True): return", "extra_data=b\"{}\", difficulty=5) ) async def create(retry=True): return block async def", "miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res", "RootBlockHeader from quarkchain.p2p import ecies from quarkchain.utils import sha3_256 class", "miner try: if i % 2 == 0: raise Exception(\"(╯°□°)╯︵", "doesn't exist res = await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver", "sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff =", "Miner( consensus, create_func, add_func, self.get_mining_params, **kwargs ) m.enabled = True", "from quarkchain.config import ConsensusType from quarkchain.core import RootBlock, RootBlockHeader from", "= await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) # submitted block", "{}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self):", "5: return None # stop the game return RootBlock( RootBlockHeader(create_time=int(time.time())),", "for consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner =", "def test_submit_work(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\",", "list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache hit now += 1 work", "return ret def test_mine_new_block_normal_case(self): async def create(retry=True): if len(self.added_blocks) >=", "RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal i, miner try:", "res = await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks),", "miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5),", "1 work = await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) #", "miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self):", "blocks and then end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5)", "= self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"),", "mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header,", "= mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None,", "sha3_256 class TestMiner(unittest.TestCase): def setUp(self): super().setUp() def miner_gen(consensus, create_func, add_func,", "Optional[int] = None): # guarantee target time is hit ret", ") async def create(retry=True): return block async def add(block_to_add): self.added_blocks.append(block_to_add)", ">= 5: return None # stop the game return RootBlock(", "loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff = 1000 block", "miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True, # fake pk,", "add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async def", "return m self.miner_gen = miner_gen self.added_blocks = [] @staticmethod def", "test_get_work(self): now = 42 async def create(retry=True): nonlocal now return", "┻━┻\") else: self.added_blocks.append(block) finally: i += 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE,", "block doesn't exist res = await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res)", "self.assertGreater(sol, 100) # ensure non-solution is tried non_sol = sol", "self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go()) def", "return None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block):", "loop.run_until_complete(go()) def test_submit_work(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42,", "also check internal state afterwards res = await miner.submit_work(work.hash, sol,", "up if too much time passed now += 100 await", "work if interval passed now += 10 work = await", "% 2 == 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally:", "now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) )", "but fail in real world when # adding the block", "self.assertFalse(res) # valid submission, also check internal state afterwards res", "1000 -> 1, any number should work res = await", "loop.run_until_complete(go()) def test_get_work(self): now = 42 async def create(retry=True): nonlocal", "create, None, remote=True) async def go(): nonlocal now # no", "miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header,", "mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async def", "= Miner( consensus, create_func, add_func, self.get_mining_params, **kwargs ) m.enabled =", "work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce", "= RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(), 42,", "= self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError):", "= MiningWork(block.header.get_hash_for_mining(), 42, 5) # only process one block, which", "miner self.added_blocks.append(block) for consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ):", "# guardian: diff 1000 -> 1, any number should work", "miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only 2 blocks can", "async def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError):", "one block, which is passed in. `None` means termination right", "any number should work res = await miner.submit_work(work.hash, i, sha3_256(b\"\"))", "= 1000 block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce", "= await miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian: diff 1000 ->", "# stop the game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async", "ret = {\"target_block_time\": 0.0, \"is_test\": True} if rounds is not", "try: if i % 2 == 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\")", "tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal i, miner try: if", "1000 block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce =", "asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now = 42 block = RootBlock(", "0, sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work) sol = solver.mine(100, 200).nonce", "5: return None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def", "len(self.added_blocks) >= 5: return None # stop the game return", "pk, will succeed in test but fail in real world", "else: self.added_blocks.append(block) finally: i += 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create,", "i, miner try: if i % 2 == 0: raise", "= 0 async def create(retry=True): nonlocal i if i >=", "= self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only 2 blocks can be", "non_sol = sol - 1 # invalid pow proof res", "def create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3,", "self.assertNotEqual(work.hash, h) # work map cleaned up if too much", "1 # invalid pow proof res = await miner.submit_work(work.hash, non_sol,", "100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only new work itself", "await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) # new work if", "= 42 async def create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\"))", "def go(): for i in range(42, 100): work = await", "self.assertFalse(res) solver = DoubleSHA256(work) sol = solver.mine(100, 200).nonce self.assertGreater(sol, 100)", "sha3_256(b\"\")) self.assertFalse(res) # valid submission, also check internal state afterwards", "loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now = 42 block", ") work = MiningWork(block.header.get_hash_for_mining(), 42, 5) # only process one", "self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i = 0 async def create(retry=True):", "miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only new work itself loop =", "self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering the diff should pass", "with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go())", "= await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) # work map", "remote=True) async def go(): work = await miner.get_work(now=now) self.assertEqual(work.height, 0)", "0) # guardian: diff 1000 -> 1, any number should", "= [] @staticmethod def get_mining_params(rounds: Optional[int] = None): # guarantee", "test_submit_work_with_guardian(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000)", "succeed in test but fail in real world when #", "= list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache hit now += 1", "the root chain guardian_private_key=ecies.generate_privkey(), ) async def go(): for i", "RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal i, miner", "self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) #", "5) # submitted block doesn't exist res = await miner.submit_work(b\"lolwut\",", "sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop =", "RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(), 42, 5) #", ") async def add(block): nonlocal i, miner try: if i", "nonlocal i, miner try: if i % 2 == 0:", "RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0 with self.assertRaises(ValueError):", "import asyncio import time import unittest from typing import Optional", "5 blocks and then end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks),", "too much time passed now += 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map),", "miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3)", "for i in range(42, 100): work = await miner.get_work(now=now) self.assertEqual(work.height,", "== 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally: i +=", ") async def create(retry=True): return block async def add(_): pass", "def test_mine_new_block_normal_case(self): async def create(retry=True): if len(self.added_blocks) >= 5: return", "setUp(self): super().setUp() def miner_gen(consensus, create_func, add_func, **kwargs): m = Miner(", ") async def add(block): nonlocal miner self.added_blocks.append(block) for consensus in", "self.added_blocks.append(block) finally: i += 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add)", "def miner_gen(consensus, create_func, add_func, **kwargs): m = Miner( consensus, create_func,", "m = Miner( consensus, create_func, add_func, self.get_mining_params, **kwargs ) m.enabled", "difficulty=1000) ) async def create(retry=True): return block async def add(_):", "time import unittest from typing import Optional from quarkchain.cluster.miner import", "<filename>quarkchain/cluster/tests/test_miner.py import asyncio import time import unittest from typing import", "block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async def go():", "remote=True) async def go(): nonlocal now # no current work,", "which is passed in. `None` means termination right after miner.input_q.put((None,", "# work map cleaned up if too much time passed", ") m.enabled = True return m self.miner_gen = miner_gen self.added_blocks", "await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) # submitted block doesn't", "passed now += 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only", "async def create(retry=True): return block async def add(block_to_add): self.added_blocks.append(block_to_add) miner", "miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async def go(): nonlocal", "None, remote=True) async def go(): nonlocal now # no current", "miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async def go(): work", "time is hit ret = {\"target_block_time\": 0.0, \"is_test\": True} if", "consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus,", "nonlocal miner self.added_blocks.append(block) for consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3,", "now = 42 async def create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now,", "{\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce =", "passed now += 10 work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2)", "work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) # work", "self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async def go(): work = await", "await miner.submit_work(b\"\", 42, b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self):", "= miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner", "from quarkchain.p2p import ecies from quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase):", "now += 1 work = await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map),", "MiningWork(block.header.get_hash_for_mining(), 42, 5) # only process one block, which is", "import Optional from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal from", "async def create(retry=True): return block async def add(_): pass miner", "miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def", "import unittest from typing import Optional from quarkchain.cluster.miner import DoubleSHA256,", "right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q,", "def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block = RootBlock(", "self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async def go():", "miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop", "\"is_test\": True} if rounds is not None: ret[\"rounds\"] = rounds", "= await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) # new work", "extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async def go():", "**kwargs ) m.enabled = True return m self.miner_gen = miner_gen", "pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True, # fake", "True return m self.miner_gen = miner_gen self.added_blocks = [] @staticmethod", "def go(): nonlocal now # no current work, will generate", "create_func, add_func, self.get_mining_params, **kwargs ) m.enabled = True return m", "None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal", "test_only_remote(self): async def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with", "solver.mine(100, 200).nonce self.assertGreater(sol, 100) # ensure non-solution is tried non_sol", "# only process one block, which is passed in. `None`", "get_mining_params(rounds: Optional[int] = None): # guarantee target time is hit", "will generate a new one work = await miner.get_work(now=now) self.assertEqual(len(work),", "= solver.mine(100, 200).nonce self.assertGreater(sol, 100) # ensure non-solution is tried", "cleaned up if too much time passed now += 100", "= await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1)", "submission, also check internal state afterwards res = await miner.submit_work(work.hash,", "await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work)", "= asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now = 42 block =", "ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce", "miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian: diff 1000 -> 1, any", "be added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self):", "can be added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def", "# only new work itself loop = asyncio.get_event_loop() loop.run_until_complete(go()) def", "10 work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) #", "in test but fail in real world when # adding", "add, remote=True, # fake pk, will succeed in test but", "= self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True, # fake pk, will", "create, add, remote=True) async def go(): work = await miner.get_work(now=now)", "ret[\"rounds\"] = rounds return ret def test_mine_new_block_normal_case(self): async def create(retry=True):", "RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async def create(retry=True): return block async", "def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError): await", "import sha3_256 class TestMiner(unittest.TestCase): def setUp(self): super().setUp() def miner_gen(consensus, create_func,", "self.miner_gen(consensus, create, add) # should generate 5 blocks and then", "i, sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff", "Miner, MiningWork, validate_seal from quarkchain.config import ConsensusType from quarkchain.core import", "real world when # adding the block to the root", "1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now =", "return block async def add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create,", "right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q,", "h) # cache hit now += 1 work = await", "# submitted block doesn't exist res = await miner.submit_work(b\"lolwut\", 0,", "miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get()", "miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\") loop = asyncio.get_event_loop()", "quarkchain.config import ConsensusType from quarkchain.core import RootBlock, RootBlockHeader from quarkchain.p2p", "now += 10 work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash,", "# should generate 5 blocks and then end loop =", "if len(self.added_blocks) >= 5: return None # stop the game", "# new work if interval passed now += 10 work", "loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42,", "= RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0 with", "def add(_): pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True,", "create(retry=True): if len(self.added_blocks) >= 5: return None # stop the", "asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff = 1000 block = RootBlock(", "internal state afterwards res = await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res)", "nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None,", "# ensure non-solution is tried non_sol = sol - 1", "= asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3,", "sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop()", "non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid submission, also check internal state", "check internal state afterwards res = await miner.submit_work(work.hash, sol, sha3_256(b\"\"))", "= sol - 1 # invalid pow proof res =", ") async def go(): for i in range(42, 100): work", "miner.submit_work(b\"\", 42, b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now", "self.assertEqual(work.difficulty, 5) # submitted block doesn't exist res = await", "loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)", "2) def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block =", "+= 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only new work", "# invalid pow proof res = await miner.submit_work(work.hash, non_sol, sha3_256(b\"\"))", "res = await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid submission,", "if i >= 5: return None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"),", "miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) # submitted block doesn't exist", "# fake pk, will succeed in test but fail in", "return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal miner", "nonlocal now # no current work, will generate a new", "blocks can be added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2)", "RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async def create(retry=True): return block async", "add) # only 2 blocks can be added loop =", "await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid submission, also check", "end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i", "self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) # work map cleaned up if", "quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase): def setUp(self): super().setUp() def miner_gen(consensus,", "1) # new work if interval passed now += 10", "m.enabled = True return m self.miner_gen = miner_gen self.added_blocks =", "rounds return ret def test_mine_new_block_normal_case(self): async def create(retry=True): if len(self.added_blocks)", "miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h)", "to the root chain guardian_private_key=ecies.generate_privkey(), ) async def go(): for", "miner = self.miner_gen(consensus, create, add) # should generate 5 blocks", "res = await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop()", "block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0", "miner_gen self.added_blocks = [] @staticmethod def get_mining_params(rounds: Optional[int] = None):", "hit ret = {\"target_block_time\": 0.0, \"is_test\": True} if rounds is", "difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(), 42, 5) # only", "5) # only process one block, which is passed in.", "nonlocal i if i >= 5: return None return RootBlock(", "block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async def create(retry=True):", "import DoubleSHA256, Miner, MiningWork, validate_seal from quarkchain.config import ConsensusType from", "termination right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q,", "loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff = 1000 block = RootBlock( RootBlockHeader(create_time=42,", "m self.miner_gen = miner_gen self.added_blocks = [] @staticmethod def get_mining_params(rounds:", "( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus, create, add)", "solver = DoubleSHA256(work) sol = solver.mine(100, 200).nonce self.assertGreater(sol, 100) #", "DoubleSHA256, Miner, MiningWork, validate_seal from quarkchain.config import ConsensusType from quarkchain.core", "self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async def go(): nonlocal now #", "now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) )", "world when # adding the block to the root chain", "will succeed in test but fail in real world when", "100) # ensure non-solution is tried non_sol = sol -", "raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally: i += 1 miner", "= await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid submission, also", "await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only new work itself loop", "extra_data=b\"{}\", difficulty=1000) ) async def create(retry=True): return block async def", "await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def", "asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now = 42 block = RootBlock(", "quarkchain.p2p import ecies from quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase): def", "= await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0]", "much time passed now += 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1)", "work itself loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now =", "validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3,", "i in range(42, 100): work = await miner.get_work(now=now) self.assertEqual(work.height, 0)", "return None # stop the game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"),", "loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i =", "means termination right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3},", "no current work, will generate a new one work =", "200).nonce self.assertGreater(sol, 100) # ensure non-solution is tried non_sol =", "miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner =", "non-solution is tried non_sol = sol - 1 # invalid", "self.miner_gen = miner_gen self.added_blocks = [] @staticmethod def get_mining_params(rounds: Optional[int]", "async def go(): for i in range(42, 100): work =", "asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i = 0 async", "new work if interval passed now += 10 work =", "pow proof res = await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) #", "MiningWork, validate_seal from quarkchain.config import ConsensusType from quarkchain.core import RootBlock,", "finally: i += 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add) #", "work = MiningWork(block.header.get_hash_for_mining(), 42, 5) # only process one block,", "def test_only_remote(self): async def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)", "h) self.assertEqual(len(miner.work_map), 1) # new work if interval passed now", "Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally: i += 1 miner =", "mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None)", "RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async def create(retry=True): return block", "tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(), 42, 5) # only process", "miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res", "= await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go())", "go(): nonlocal now # no current work, will generate a", "difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3)", "ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce", "= RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async def create(retry=True): return", "self.assertEqual(work.hash, h) # cache hit now += 1 work =", "async def add(_): pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add,", "# no current work, will generate a new one work", "guardian: diff 1000 -> 1, any number should work res", "cache hit now += 1 work = await miner.get_work(now=now) self.assertEqual(work.hash,", "self.added_blocks.append(block) for consensus in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner", "in real world when # adding the block to the", "5) def test_simulate_mine_handle_block_exception(self): i = 0 async def create(retry=True): nonlocal", "self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), )", "None) with self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42,", "None: ret[\"rounds\"] = rounds return ret def test_mine_new_block_normal_case(self): async def", "block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(),", "a new one work = await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map),", "create, add, remote=True, # fake pk, will succeed in test", "miner.get_work(now=now) self.assertEqual(len(miner.work_map), 2) self.assertNotEqual(work.hash, h) # work map cleaned up", "create(retry=True): nonlocal i if i >= 5: return None return", "self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now = 42", "generate 5 blocks and then end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async())", "async def create(retry=True): nonlocal i if i >= 5: return", "consensus, create_func, add_func, self.get_mining_params, **kwargs ) m.enabled = True return", "self.assertTrue(res) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff = 1000", "sol = solver.mine(100, 200).nonce self.assertGreater(sol, 100) # ensure non-solution is", "tried non_sol = sol - 1 # invalid pow proof", "self.assertEqual(len(miner.work_map), 1) # new work if interval passed now +=", "ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus, create, add) #", "miner_gen(consensus, create_func, add_func, **kwargs): m = Miner( consensus, create_func, add_func,", "work map cleaned up if too much time passed now", "guarantee target time is hit ret = {\"target_block_time\": 0.0, \"is_test\":", "create, add) # only 2 blocks can be added loop", "stop the game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def", "add) # should generate 5 blocks and then end loop", "def create(retry=True): return block async def add(_): pass miner =", "= True return m self.miner_gen = miner_gen self.added_blocks = []", "self.get_mining_params, **kwargs ) m.enabled = True return m self.miner_gen =", "= asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i = 0", "= await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work) sol", "0.0, \"is_test\": True} if rounds is not None: ret[\"rounds\"] =", "self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), )", "# only 2 blocks can be added loop = asyncio.get_event_loop()", "create(retry=True): return block async def add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3,", "ensure non-solution is tried non_sol = sol - 1 #", "{\"target_block_time\": 0.0, \"is_test\": True} if rounds is not None: ret[\"rounds\"]", "test_submit_work(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5)", "block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async def create(retry=True):", "ecies from quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase): def setUp(self): super().setUp()", "tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) #", "test_simulate_mine_handle_block_exception(self): i = 0 async def create(retry=True): nonlocal i if", "i if i >= 5: return None return RootBlock( RootBlockHeader(create_time=int(time.time())),", "await miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\") loop =", "work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce", "miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash =", "= 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async", "tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal miner self.added_blocks.append(block) for consensus", "): miner = self.miner_gen(consensus, create, add) # should generate 5", "1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache hit now", "new work itself loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now", "add, remote=True) async def go(): work = await miner.get_work(now=now) self.assertEqual(work.height,", "await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work) sol =", "def add(block): nonlocal i, miner try: if i % 2", "miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid submission, also check internal", "res = await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work)", "now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True)", "def setUp(self): super().setUp() def miner_gen(consensus, create_func, add_func, **kwargs): m =", "fake pk, will succeed in test but fail in real", "chain guardian_private_key=ecies.generate_privkey(), ) async def go(): for i in range(42,", "when # adding the block to the root chain guardian_private_key=ecies.generate_privkey(),", "state afterwards res = await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map,", "block.header.nonce = 0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering", "with self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\")", "add_func, self.get_mining_params, **kwargs ) m.enabled = True return m self.miner_gen", "# adding the block to the root chain guardian_private_key=ecies.generate_privkey(), )", "miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5),", "create(retry=True): return block async def add(_): pass miner = self.miner_gen(", "new one work = await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1)", "0 async def create(retry=True): nonlocal i if i >= 5:", "import time import unittest from typing import Optional from quarkchain.cluster.miner", "{})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res =", "interval passed now += 10 work = await miner.get_work(now=now) self.assertEqual(len(miner.work_map),", ">= 5: return None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async", "if too much time passed now += 100 await miner.get_work(now=now)", "3) self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache", "= 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async", "the block to the root chain guardian_private_key=ecies.generate_privkey(), ) async def", "RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=1000) ) async def create(retry=True): return block", "RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal miner self.added_blocks.append(block)", "block async def add(_): pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create,", "@staticmethod def get_mining_params(rounds: Optional[int] = None): # guarantee target time", "`None` means termination right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\":", "await miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian: diff 1000 -> 1,", "= None): # guarantee target time is hit ret =", "async def go(): nonlocal now # no current work, will", "guardian_private_key=ecies.generate_privkey(), ) async def go(): for i in range(42, 100):", "1) # only new work itself loop = asyncio.get_event_loop() loop.run_until_complete(go())", "means termination right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH},", "is passed in. `None` means termination right after miner.input_q.put((None, {}))", "{})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, ) mined_res =", "# valid submission, also check internal state afterwards res =", "import ConsensusType from quarkchain.core import RootBlock, RootBlockHeader from quarkchain.p2p import", "= {\"target_block_time\": 0.0, \"is_test\": True} if rounds is not None:", "from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal from quarkchain.config import", "work = await miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian: diff 1000", "= miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH)", "return block async def add(_): pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3,", "i += 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only", "100): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian: diff", "42 async def create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner", "= asyncio.get_event_loop() loop.run_until_complete(go()) def test_validate_seal_with_adjusted_diff(self): diff = 1000 block =", "adding the block to the root chain guardian_private_key=ecies.generate_privkey(), ) async", "0) self.assertEqual(work.difficulty, 5) # submitted block doesn't exist res =", "miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError): await miner.get_work() with", "only process one block, which is passed in. `None` means", "is not None: ret[\"rounds\"] = rounds return ret def test_mine_new_block_normal_case(self):", "import RootBlock, RootBlockHeader from quarkchain.p2p import ecies from quarkchain.utils import", "True} if rounds is not None: ret[\"rounds\"] = rounds return", "now += 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) # only new", "if interval passed now += 10 work = await miner.get_work(now=now)", "fail in real world when # adding the block to", "go(): for i in range(42, 100): work = await miner.get_work(now=now)", "i >= 5: return None return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), )", "ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block =", "await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash,", "block async def add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add,", "is tried non_sol = sol - 1 # invalid pow", "[] @staticmethod def get_mining_params(rounds: Optional[int] = None): # guarantee target", "ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus, create, add) # should", "ConsensusType from quarkchain.core import RootBlock, RootBlockHeader from quarkchain.p2p import ecies", "add_func, **kwargs): m = Miner( consensus, create_func, add_func, self.get_mining_params, **kwargs", "diff 1000 -> 1, any number should work res =", "self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True, # fake pk, will succeed", "in. `None` means termination right after miner.input_q.put((None, {})) miner.mine_loop( work,", "game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal", "miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash", "= self.miner_gen(consensus, create, add) # should generate 5 blocks and", "self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) # submitted block doesn't exist res", "= miner_gen self.added_blocks = [] @staticmethod def get_mining_params(rounds: Optional[int] =", "work, will generate a new one work = await miner.get_work(now=now)", "block to the root chain guardian_private_key=ecies.generate_privkey(), ) async def go():", "in ( ConsensusType.POW_SIMULATE, ConsensusType.POW_ETHASH, ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus, create,", "from quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase): def setUp(self): super().setUp() def", "self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1) # new work if interval passed", "current work, will generate a new one work = await", "class TestMiner(unittest.TestCase): def setUp(self): super().setUp() def miner_gen(consensus, create_func, add_func, **kwargs):", "i % 2 == 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block)", "h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache hit now +=", "self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError): await", "hit now += 1 work = await miner.get_work(now=now) self.assertEqual(work.hash, h)", "go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None) with self.assertRaises(ValueError): await miner.get_work()", "self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work_with_guardian(self): now", "with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering the diff should", "-> 1, any number should work res = await miner.submit_work(work.hash,", "b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now = 42", "test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block = RootBlock( RootBlockHeader(create_time=42,", "async def create(retry=True): if len(self.added_blocks) >= 5: return None #", "2) self.assertNotEqual(work.hash, h) # work map cleaned up if too", "async def create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner =", "work res = await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop =", "proof res = await miner.submit_work(work.hash, non_sol, sha3_256(b\"\")) self.assertFalse(res) # valid", "- 1 # invalid pow proof res = await miner.submit_work(work.hash,", "after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, )", "ConsensusType.POW_QKCHASH) def test_only_remote(self): async def go(): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None,", "None, None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work", "afterwards res = await miner.submit_work(work.hash, sol, sha3_256(b\"\")) self.assertTrue(res) self.assertEqual(miner.work_map, {})", "TestMiner(unittest.TestCase): def setUp(self): super().setUp() def miner_gen(consensus, create_func, add_func, **kwargs): m", "self.assertEqual(len(miner.work_map), 1) # only new work itself loop = asyncio.get_event_loop()", "= asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now = 42 async def", "now # no current work, will generate a new one", "async def go(): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty,", "should work res = await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res) loop", "+= 1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only 2", "after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_QKCHASH}, miner.input_q, miner.output_q, )", "def get_mining_params(rounds: Optional[int] = None): # guarantee target time is", "def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block = RootBlock(", "# cache hit now += 1 work = await miner.get_work(now=now)", "and then end loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def", "= self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async def go(): work =", "{\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get() block.header.nonce =", "range(42, 100): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) # guardian:", "miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work) sol = solver.mine(100,", "async def add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True)", "go(): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) #", "map cleaned up if too much time passed now +=", "import ecies from quarkchain.utils import sha3_256 class TestMiner(unittest.TestCase): def setUp(self):", "exist res = await miner.submit_work(b\"lolwut\", 0, sha3_256(b\"\")) self.assertFalse(res) solver =", "# significantly lowering the diff should pass validate_seal(block.header, ConsensusType.POW_SHA3SHA3, adjusted_diff=1)", "2 == 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else: self.added_blocks.append(block) finally: i", "passed in. `None` means termination right after miner.input_q.put((None, {})) miner.mine_loop(", "DoubleSHA256(work) sol = solver.mine(100, 200).nonce self.assertGreater(sol, 100) # ensure non-solution", "rounds is not None: ret[\"rounds\"] = rounds return ret def", "async def add(block): nonlocal miner self.added_blocks.append(block) for consensus in (", "generate a new one work = await miner.get_work(now=now) self.assertEqual(len(work), 3)", "not None: ret[\"rounds\"] = rounds return ret def test_mine_new_block_normal_case(self): async", "ret def test_mine_new_block_normal_case(self): async def create(retry=True): if len(self.added_blocks) >= 5:", "create, add) # should generate 5 blocks and then end", "if i % 2 == 0: raise Exception(\"(╯°□°)╯︵ ┻━┻\") else:", "remote=True, # fake pk, will succeed in test but fail", "work = await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5) # submitted", "h) # work map cleaned up if too much time", "1 miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only 2 blocks", "mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self):", "def add(block_to_add): self.added_blocks.append(block_to_add) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True) async", "42, b\"\") loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now =", "unittest from typing import Optional from quarkchain.cluster.miner import DoubleSHA256, Miner,", "self.assertEqual(len(miner.work_map), 1) h = list(miner.work_map.keys())[0] self.assertEqual(work.hash, h) # cache hit", "i = 0 async def create(retry=True): nonlocal i if i", "process one block, which is passed in. `None` means termination", "root chain guardian_private_key=ecies.generate_privkey(), ) async def go(): for i in", "block, which is passed in. `None` means termination right after", "self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\", 42, b\"\") loop", "= asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now = 42 block =", "= mined_res.nonce block.header.mixhash = mined_res.mixhash validate_seal(block.header, ConsensusType.POW_QKCHASH) def test_only_remote(self): async", "valid submission, also check internal state afterwards res = await", "only new work itself loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self):", "loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 5) def test_simulate_mine_handle_block_exception(self): i = 0 async def", "validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly lowering the diff should pass validate_seal(block.header,", "work = await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h =", "def test_get_work(self): now = 42 async def create(retry=True): nonlocal now", "None, None) with self.assertRaises(ValueError): await miner.get_work() with self.assertRaises(ValueError): await miner.submit_work(b\"\",", "create(retry=True): nonlocal now return RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create,", "async def add(block): nonlocal i, miner try: if i %", "Optional from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal from quarkchain.config", "loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now = 42 block", "def test_simulate_mine_handle_block_exception(self): i = 0 async def create(retry=True): nonlocal i", "None): # guarantee target time is hit ret = {\"target_block_time\":", "# guarantee target time is hit ret = {\"target_block_time\": 0.0,", "ConsensusType.POW_SHA3SHA3) # significantly lowering the diff should pass validate_seal(block.header, ConsensusType.POW_SHA3SHA3,", "ConsensusType.POW_SHA3SHA3, ): miner = self.miner_gen(consensus, create, add) # should generate", "quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal from quarkchain.config import ConsensusType", "loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now = 42 async", "42, 5) # only process one block, which is passed", "create_func, add_func, **kwargs): m = Miner( consensus, create_func, add_func, self.get_mining_params,", ") mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce block.header.mixhash = mined_res.mixhash", "2 blocks can be added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks),", "one work = await miner.get_work(now=now) self.assertEqual(len(work), 3) self.assertEqual(len(miner.work_map), 1) h", "def test_submit_work_with_guardian(self): now = 42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\",", "test but fail in real world when # adding the", "only 2 blocks can be added loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async())", "add(_): pass miner = self.miner_gen( ConsensusType.POW_SHA3SHA3, create, add, remote=True, #", "time passed now += 100 await miner.get_work(now=now) self.assertEqual(len(miner.work_map), 1) #", "itself loop = asyncio.get_event_loop() loop.run_until_complete(go()) def test_submit_work(self): now = 42", "test_mine_new_block_normal_case(self): async def create(retry=True): if len(self.added_blocks) >= 5: return None", "from quarkchain.core import RootBlock, RootBlockHeader from quarkchain.p2p import ecies from", "RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal miner self.added_blocks.append(block) for", "def test_validate_seal_with_adjusted_diff(self): diff = 1000 block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff),", ") block.header.nonce = 0 with self.assertRaises(ValueError): validate_seal(block.header, ConsensusType.POW_SHA3SHA3) # significantly", "def go(): work = await miner.get_work(now=now) self.assertEqual(work.height, 0) self.assertEqual(work.difficulty, 5)", "return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), ) async def add(block): nonlocal i,", "difficulty=5) ) async def create(retry=True): return block async def add(block_to_add):", "diff = 1000 block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), )", "loop = asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner =", "asyncio.get_event_loop() loop.run_until_complete(miner._mine_new_block_async()) self.assertEqual(len(self.added_blocks), 2) def test_sha3sha3(self): miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None,", "self.miner_gen(ConsensusType.POW_SIMULATE, create, add) # only 2 blocks can be added", "42 block = RootBlock( RootBlockHeader(create_time=42, extra_data=b\"{}\", difficulty=5) ) async def", "validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def test_qkchash(self): miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None) block", "= self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async def go(): nonlocal now", "= rounds return ret def test_mine_new_block_normal_case(self): async def create(retry=True): if", ") mined_res = miner.output_q.get() block.header.nonce = mined_res.nonce validate_seal(block.header, ConsensusType.POW_SHA3SHA3) def", "+= 1 work = await miner.get_work(now=now) self.assertEqual(work.hash, h) self.assertEqual(len(miner.work_map), 1)", "termination right after miner.input_q.put((None, {})) miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q,", "should generate 5 blocks and then end loop = asyncio.get_event_loop()", "number should work res = await miner.submit_work(work.hash, i, sha3_256(b\"\")) self.assertTrue(res)", "self.assertEqual(miner.work_map, {}) self.assertEqual(len(self.added_blocks), 1) self.assertIsNone(miner.current_work) loop = asyncio.get_event_loop() loop.run_until_complete(go()) def", "def create(retry=True): nonlocal i if i >= 5: return None", "if rounds is not None: ret[\"rounds\"] = rounds return ret", "RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"), ) block.header.nonce = 0 with self.assertRaises(ValueError): validate_seal(block.header,", "super().setUp() def miner_gen(consensus, create_func, add_func, **kwargs): m = Miner( consensus,", "add(block): nonlocal i, miner try: if i % 2 ==", "RootBlock(RootBlockHeader(create_time=now, extra_data=b\"{}\")) miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True) async def", "None) block = RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work =", "None # stop the game return RootBlock( RootBlockHeader(create_time=int(time.time())), tracking_data=\"{}\".encode(\"utf-8\"), )", "RootBlock( RootBlockHeader(create_time=42, difficulty=5), tracking_data=\"{}\".encode(\"utf-8\"), ) work = MiningWork(block.header.get_hash_for_mining(), 42, 5)", "sol - 1 # invalid pow proof res = await", "sha3_256(b\"\")) self.assertFalse(res) solver = DoubleSHA256(work) sol = solver.mine(100, 200).nonce self.assertGreater(sol,", "miner.mine_loop( work, {\"consensus_type\": ConsensusType.POW_SHA3SHA3}, miner.input_q, miner.output_q, ) mined_res = miner.output_q.get()", "test_validate_seal_with_adjusted_diff(self): diff = 1000 block = RootBlock( RootBlockHeader(create_time=42, difficulty=diff), tracking_data=\"{}\".encode(\"utf-8\"),", "def create(retry=True): return block async def add(block_to_add): self.added_blocks.append(block_to_add) miner =", "asyncio.get_event_loop() loop.run_until_complete(go()) def test_get_work(self): now = 42 async def create(retry=True):", "is hit ret = {\"target_block_time\": 0.0, \"is_test\": True} if rounds" ]