hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795b45763754ac21f407eb47726a3677aaaf0223 | 86,079 | py | Python | python3/html5lib/constants.py | gsnedders/html5lib | a426e4a96f0660b83f3f0bbe6c8160c6f625f199 | [
"MIT"
] | 6 | 2015-04-17T02:33:56.000Z | 2021-07-02T02:38:23.000Z | python3/html5lib/constants.py | gsnedders/html5lib | a426e4a96f0660b83f3f0bbe6c8160c6f625f199 | [
"MIT"
] | null | null | null | python3/html5lib/constants.py | gsnedders/html5lib | a426e4a96f0660b83f3f0bbe6c8160c6f625f199 | [
"MIT"
] | 3 | 2015-05-23T04:49:48.000Z | 2021-02-02T21:12:20.000Z | from __future__ import absolute_import, division, unicode_literals
import string, gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid chracter in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-soldius-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unepxected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html":"http://www.w3.org/1999/xhtml",
"mathml":"http://www.w3.org/1998/Math/MathML",
"svg":"http://www.w3.org/2000/svg",
"xlink":"http://www.w3.org/1999/xlink",
"xml":"http://www.w3.org/XML/1998/namespace",
"xmlns":"http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c),ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay","controls")),
"video": frozenset(("autoplay","controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0:"\uFFFD",
0x0d:"\u000D",
0x80:"\u20AC",
0x81:"\u0081",
0x81:"\u0081",
0x82:"\u201A",
0x83:"\u0192",
0x84:"\u201E",
0x85:"\u2026",
0x86:"\u2020",
0x87:"\u2021",
0x88:"\u02C6",
0x89:"\u2030",
0x8A:"\u0160",
0x8B:"\u2039",
0x8C:"\u0152",
0x8D:"\u008D",
0x8E:"\u017D",
0x8F:"\u008F",
0x90:"\u0090",
0x91:"\u2018",
0x92:"\u2019",
0x93:"\u201C",
0x94:"\u201D",
0x95:"\u2022",
0x96:"\u2013",
0x97:"\u2014",
0x98:"\u02DC",
0x99:"\u2122",
0x9A:"\u0161",
0x9B:"\u203A",
0x9C:"\u0153",
0x9D:"\u009D",
0x9E:"\u017E",
0x9F:"\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype":0,
"Characters":1,
"SpaceCharacters":2,
"StartTag":3,
"EndTag":4,
"EmptyTag":5,
"Comment":6,
"ParseError":7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v,k) for k,v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| 27.938656 | 94 | 0.500854 |
795b4589aeb6804e3122d17892260955d8405ad2 | 8,755 | py | Python | deltaframe/core.py | tkanngiesser/deltaframe | 3311bef620a4142a6557eea16594926fa95f7ef6 | [
"Apache-2.0"
] | null | null | null | deltaframe/core.py | tkanngiesser/deltaframe | 3311bef620a4142a6557eea16594926fa95f7ef6 | [
"Apache-2.0"
] | 2 | 2021-09-28T05:45:27.000Z | 2022-02-26T10:21:18.000Z | deltaframe/core.py | tkanngiesser/deltaframe | 3311bef620a4142a6557eea16594926fa95f7ef6 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['get_added', 'get_deleted', 'get_modified', 'get_delta', 'log_delta', 'build_latest']
# Cell
import pandas as pd
import numpy as np
# Cell
def get_added(df_old, df_new, unique_id, trans_col="transaction", trans_vaL="added"):
"""Returns the added rows in df_new
Parameters
----------
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
trans_col: str
name of column to track transaction (default is "transaction")
trans_val: str
name of value to reflect transaction status (default is "added")
Returns
-------
pd.DataFrame
dataframe that contains added rows
"""
cols = list(df_old.columns)
new_rows = (pd
.merge(df_old, df_new, how="outer", on=unique_id, indicator=True, suffixes=("_foo",""))
.query('_merge == "right_only"')
)
new_rows = new_rows[cols]
new_rows[trans_col] = trans_vaL
return new_rows
# Cell
def get_deleted(df_old, df_new, unique_id, trans_col="transaction", trans_val="deleted"):
"""Returns the deleted rows that are not any longer in df_new
Parameters
----------
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
trans_col: str
name of column to track transaction (default is "transaction")
trans_val: str
name of value to reflect transaction status (default is "deleted")
Returns
-------
pd.DataFrame
dataframe that contains deleted rows
"""
cols = list(df_old.columns)
deleted_rows = (pd
.merge(df_new, df_old, how="outer", on=unique_id, indicator=True, suffixes=("_foo",""))
.query('_merge == "right_only"')
)
deleted_rows = deleted_rows[cols]
deleted_rows[trans_col] = trans_val
return deleted_rows
# Cell
def get_modified(df_old, df_new, unique_id, added_rows=None, trans_col="transaction", trans_val="modified"):
"""Returns the modified rows in df_new
Parameters
----------
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
added_rows: pd.DataFrame
added rows from calling get_added function
trans_col: str
name of column to track transaction (default is "transaction")
trans_val: str
name of value to reflect transaction status (default is "modified")
Returns
-------
pd.DataFrame
dataframe that contains modified rows
"""
cols = list(df_new.columns)
if added_rows is not None:
df_new = df_new[~df_new.isin(list(added_rows[unique_id].values))].dropna()
modified_rows = df_old.merge(df_new, indicator=True, how='outer')
modified_rows = modified_rows[modified_rows['_merge'] == 'right_only']
modified_rows = modified_rows[cols]
modified_rows[trans_col] = trans_val
return modified_rows
# Cell
def get_delta(df_old, df_new, unique_id, sort_by=None, trans_col="transaction", trans_val_added="added", trans_val_deleted="deleted", trans_val_modified="modified"):
"""Returns delta (added, deleted, modified) betwen df_old and df_new
Parameters
----------
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
sort_by: str or list
cols to sort result
added_rows: pd.DataFrame
added rows from calling get_added function
trans_col: str
name of column to track transaction (default is "transaction")
trans_val_added: str
name of value to reflect transaction status (default is "added")
trans_val_deleted: str
name of value to reflect transaction status (default is "deleted")
trans_val_modified: str
name of value to reflect transaction status (default is "modified")
Returns
-------
pd.DataFrame
dataframe that contains delta between df_old and df_new
"""
added_rows = get_added(df_old=df_old, df_new=df_new, unique_id=unique_id,trans_col=trans_col, trans_vaL=trans_val_added)
deleted_rows = get_deleted(df_old=df_old, df_new=df_new, unique_id=unique_id, trans_col=trans_col, trans_val=trans_val_deleted)
modified_rows = get_modified(df_old=df_old, df_new=df_new, unique_id=unique_id, added_rows=added_rows, trans_col=trans_col, trans_val=trans_val_modified)
df = added_rows.append([deleted_rows, modified_rows])
if sort_by:
df = df.sort_values(by=sort_by)
return df
# Cell
def log_delta(df_log, df_old, df_new, unique_id, sort_by=None, trans_col="transaction", trans_val_added="added", trans_val_deleted="deleted", trans_val_modified="modified"):
"""Returns logged delta (added, deleted, modified) betwen df_old and df_new
Parameters
----------
df_log : pd.DataFrame
dataframe with logged delta - if no log exists yet set df_log=None
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
sort_by: str or list
cols to sort result
added_rows: pd.DataFrame
added rows from calling get_added function
trans_col: str
name of column to track transaction (default is "transaction")
trans_val_added: str
name of value to reflect transaction status (default is "added")
trans_val_deleted: str
name of value to reflect transaction status (default is "deleted")
trans_val_modified: str
name of value to reflect transaction status (default is "modified")
Returns
-------
pd.DataFrame
dataframe that contains logged delta
"""
if df_log is None:
df_log = df_old
df_log[trans_col] = trans_val_added
else:
subset = list(df_log.columns)
subset.remove(trans_col)
added_rows = get_added(df_old=df_old, df_new=df_new, unique_id=unique_id, trans_col=trans_col, trans_vaL=trans_val_added)
deleted_rows = get_deleted(df_old=df_old, df_new=df_new, unique_id=unique_id, trans_col=trans_col, trans_val=trans_val_deleted)
modified_rows = get_modified(df_new=df_new, df_old=df_old, unique_id=unique_id, added_rows=added_rows, trans_col=trans_col, trans_val=trans_val_modified)
df_log = df_log.append(modified_rows, ignore_index=True)
df_log = df_log.drop_duplicates(subset=subset, keep="first")
df_log = df_log.append(added_rows, ignore_index=True)
df_log = df_log.append(deleted_rows, ignore_index=True)
if sort_by:
df_log = df_log.sort_values(by=sort_by)
return df_log
# Cell
def build_latest(df_old, df_new, unique_id, sort_by=None, trans_col="transaction"):
"""Returns logged the latest dataframe based on changes from new dataframe (added, removed, modified)
Parameters
----------
df_old : pd.DataFrame
dataframe with previous information
df_new: pd.DataFrame
dataframe with new information
unique_id: str or list
unique identifier(s)
sort_by: str or list
cols to sort result
trans_col: str
name of column to track transaction (default is "transaction")
Returns
-------
pd.DataFrame
dataframe that contains latest state
"""
added_rows = get_added(df_old=df_old, df_new=df_new, unique_id=unique_id)
deleted_rows = get_deleted(df_old=df_old, df_new=df_new, unique_id=unique_id)
modified_rows = get_modified(df_old=df_old, df_new=df_new, unique_id=unique_id, added_rows=added_rows)
df_old = df_old[df_old[unique_id].isin(list(deleted_rows[unique_id].values)) == False]
df_old = df_old[df_old[unique_id].isin(list(modified_rows[unique_id].values)) == False]
df_latest = df_old.append(added_rows)
df_latest = df_latest.append(modified_rows)
df_latest.drop(columns=[trans_col], inplace=True)
if sort_by: df_latest = df_latest.sort_values(by=sort_by)
return df_latest | 39.436937 | 173 | 0.665448 |
795b465bcb51c2910c5308939543f4972d14743b | 1,628 | py | Python | tests/test_prometheus_server.py | MetaRed/qualified | 5045316000b1009dfb6d76e495c190b68a0902fa | [
"MIT"
] | 1 | 2018-03-20T15:04:23.000Z | 2018-03-20T15:04:23.000Z | tests/test_prometheus_server.py | MetaRed/qualified | 5045316000b1009dfb6d76e495c190b68a0902fa | [
"MIT"
] | null | null | null | tests/test_prometheus_server.py | MetaRed/qualified | 5045316000b1009dfb6d76e495c190b68a0902fa | [
"MIT"
] | null | null | null | import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_prometheus_user(User):
u = User("prometheus")
assert u.exists
assert u.group == 'prometheus'
assert u.shell == '/sbin/nologin'
assert u.home == '/home/prometheus'
def test_prometheus_install_path(File):
d = File('/opt/prometheus')
assert d.exists
assert d.is_directory
def test_prometheus_config_path(File):
d = File('/etc/prometheus')
assert d.exists
assert d.is_directory
def test_prometheus_log_path(File):
d = File('/var/log/prometheus')
assert d.exists
assert d.is_directory
def test_prometheus_pid_path(File):
d = File('/var/run/prometheus')
assert d.exists
assert d.is_directory
def test_prometheus_db_path(File):
d = File('/var/lib/prometheus')
assert d.exists
assert d.is_directory
def test_prometheus_startup_script(File):
f = File('/etc/init.d/prometheus')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_prometheus_conf_file(File):
f = File('/etc/prometheus/prometheus.yml')
assert f.exists
assert f.user == 'prometheus'
assert f.group == 'prometheus'
def test_wget_is_installed(Package):
pack = Package("wget")
assert pack.is_installed
def test_prometheus_socket_is_listening(Socket):
sock = Socket("tcp://:::9090")
assert sock.is_listening
def test_prometheus_service(Service):
service = Service("prometheus")
assert service.is_running
assert service.is_enabled
| 22.30137 | 63 | 0.706388 |
795b46f82c68b034858c2f67679c2dce2d022082 | 567 | py | Python | examples/pyboost_dev/crc_example.py | electronicvisions/pyplusplus | 4d88bb8754d22654a61202ae8adc222807953e38 | [
"BSL-1.0"
] | 9 | 2016-06-07T19:14:53.000Z | 2020-02-28T09:06:19.000Z | examples/pyboost_dev/crc_example.py | electronicvisions/pyplusplus | 4d88bb8754d22654a61202ae8adc222807953e38 | [
"BSL-1.0"
] | 1 | 2018-08-15T11:33:40.000Z | 2018-08-15T11:33:40.000Z | examples/pyboost_dev/crc_example.py | electronicvisions/pyplusplus | 4d88bb8754d22654a61202ae8adc222807953e38 | [
"BSL-1.0"
] | 5 | 2016-06-23T09:37:00.000Z | 2019-12-18T13:51:29.000Z | import os
import sys
from pyboost import crc
if __name__ == '__main__':
if sys.argv:
files = sys.argv
else:
files = [ sys.executable ]
try:
result = crc.crc_32_type()
for file_name in files:
ifs = file( file_name, 'rb' )
for line in ifs:
result.process_bytes( line )
print hex( result.checksum() ).upper()
except Exception, error:
sys.stderr.write( "Found an exception with'%s'%s" %( str( error ), os.linesep ) )
sys.exit( 1 ) | 28.35 | 90 | 0.530864 |
795b4853a025e9da889ea4061504b5850c414b7f | 1,726 | py | Python | tests/test_vcs_show.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 11 | 2018-10-10T03:14:33.000Z | 2022-01-05T14:18:15.000Z | tests/test_vcs_show.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 196 | 2018-03-21T19:44:56.000Z | 2021-12-21T21:56:24.000Z | tests/test_vcs_show.py | scottwittenburg/vcs | 5b9f17fb78f7ab186fc0132ab81ada043a7ba348 | [
"BSD-3-Clause"
] | 5 | 2019-12-09T21:54:45.000Z | 2022-03-20T04:22:14.000Z | import basevcstest
class TestVCS(basevcstest.VCSBaseTest):
def testShowElements(self):
self.x.show("taylordiagram")
assert(self.x.listelements("taylordiagram") == ["default"])
assert(
self.x.listelements() == [
'1d',
'3d_dual_scalar',
'3d_scalar',
'3d_vector',
'boxfill',
'colormap',
'display',
'fillarea',
'font',
'fontNumber',
'format',
'isofill',
'isoline',
'line',
'list',
'marker',
'meshfill',
'projection',
'scatter',
'streamline',
'taylordiagram',
'template',
'textcombined',
'textorientation',
'texttable',
'vector',
'xvsy',
'xyvsy',
'yxvsx'])
assert(
self.x.listelements("fontNumber") == [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17, 18, 19, 20, 21, 22, 23, 24, 25])
self.x.show("textcombined")
before = self.x.listelements("textcombined")
t = self.x.createtext()
after = self.x.listelements("textcombined")
assert(before != after)
assert(t.name in after)
| 26.96875 | 67 | 0.348204 |
795b488460817bcb551f236154dfa3e3feb94742 | 27,797 | py | Python | plotly_study/validators/barpolar/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/validators/barpolar/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/validators/barpolar/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="widthsrc", parent_name="barpolar", **kwargs):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="barpolar", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="barpolar", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
import _plotly_utils.basevalidators
class UnselectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="unselected", parent_name="barpolar", **kwargs):
super(UnselectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Unselected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
plotly_study.graph_objects.barpolar.unselected.Marker
instance or dict with compatible properties
textfont
plotly_study.graph_objects.barpolar.unselected.Textfo
nt instance or dict with compatible properties
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="barpolar", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="barpolar", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ThetaunitValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="thetaunit", parent_name="barpolar", **kwargs):
super(ThetaunitValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["radians", "degrees", "gradians"]),
**kwargs
)
import _plotly_utils.basevalidators
class ThetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="thetasrc", parent_name="barpolar", **kwargs):
super(ThetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class Theta0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="theta0", parent_name="barpolar", **kwargs):
super(Theta0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ThetaValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="theta", parent_name="barpolar", **kwargs):
super(ThetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="barpolar", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="barpolar", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SubplotValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="subplot", parent_name="barpolar", **kwargs):
super(SubplotValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "polar"),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="barpolar", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="barpolar", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="selectedpoints", parent_name="barpolar", **kwargs):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="barpolar", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
plotly_study.graph_objects.barpolar.selected.Marker
instance or dict with compatible properties
textfont
plotly_study.graph_objects.barpolar.selected.Textfont
instance or dict with compatible properties
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class RsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="rsrc", parent_name="barpolar", **kwargs):
super(RsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class R0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="r0", parent_name="barpolar", **kwargs):
super(R0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class RValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="r", parent_name="barpolar", **kwargs):
super(RValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="barpolar", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class OffsetsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="offsetsrc", parent_name="barpolar", **kwargs):
super(OffsetsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class OffsetValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="offset", parent_name="barpolar", **kwargs):
super(OffsetValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="barpolar", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="barpolar", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="barpolar", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="barpolar", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
plotly_study.graph_objects.barpolar.marker.ColorBar
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
line
plotly_study.graph_objects.barpolar.marker.Line
instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="legendgroup", parent_name="barpolar", **kwargs):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="barpolar", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="barpolar", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertextsrc", parent_name="barpolar", **kwargs):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="barpolar", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="barpolar", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertemplate", parent_name="barpolar", **kwargs):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="barpolar", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on plot.ly for align
.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="barpolar", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="barpolar", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["r", "theta", "text", "name"]),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class DthetaValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dtheta", parent_name="barpolar", **kwargs):
super(DthetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class DrValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dr", parent_name="barpolar", **kwargs):
super(DrValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="barpolar", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="barpolar", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class BasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="basesrc", parent_name="barpolar", **kwargs):
super(BasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BaseValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="base", parent_name="barpolar", **kwargs):
super(BaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 35.230672 | 87 | 0.615894 |
795b492ec09fefe907b0105451569890dc0511f9 | 10,776 | py | Python | q2_phylogenomics/tests/test_assemble.py | gregcaporaso/q2-phylogenomics | 2a9f953f6142b110dde2a7b26f34c42fe7d2f8ed | [
"BSD-3-Clause"
] | null | null | null | q2_phylogenomics/tests/test_assemble.py | gregcaporaso/q2-phylogenomics | 2a9f953f6142b110dde2a7b26f34c42fe7d2f8ed | [
"BSD-3-Clause"
] | 4 | 2020-04-10T18:17:39.000Z | 2020-04-15T14:40:09.000Z | q2_phylogenomics/tests/test_assemble.py | gregcaporaso/q2-phylogenomics | 2a9f953f6142b110dde2a7b26f34c42fe7d2f8ed | [
"BSD-3-Clause"
] | 3 | 2020-03-24T16:31:49.000Z | 2020-04-06T21:12:17.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import hashlib
import qiime2
from qiime2.plugin.testing import TestPluginBase
import pandas as pd
from q2_phylogenomics._format import (
SAMFormat, SAMFilesDirFmt, PileUpFilesDirFmt, PileUpTSVFormat)
class TestMapPaired(TestPluginBase):
package = 'q2_phylogenomics.tests'
def setUp(self):
super().setUp()
self.demuxed_art = qiime2.Artifact.load(
self.get_data_path('paired-end.qza'))
self.paired_mapped_unsorted = qiime2.Artifact.load(
self.get_data_path('paired-end-mapped-unsorted.qza'))
self.indexed_genome = qiime2.Artifact.load(
self.get_data_path('sars2-indexed.qza'))
self.sorted_alignment_maps = qiime2.Artifact.load(
self.get_data_path('sorted-alignment-maps.qza'))
self.pileups = qiime2.Artifact.load(
self.get_data_path('pileups.qza')
)
def test_map_paired_mapped_only(self):
obs_art, = self.plugin.methods['map_paired_reads'](
self.demuxed_art, self.indexed_genome)
obs = obs_art.view(SAMFilesDirFmt)
exp = [('sample_a.sam', 10,
('SARS2:6:73:567:7631#0', 'SARS2:6:73:233:3421#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0')),
('sample_b.sam', 10,
('SARS2:6:73:941:1973#0', 'SARS2:6:73:552:2457#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0')),
('sample_c.sam', 10,
('SARS2:6:73:231:3321#0', 'SARS2:6:73:552:2457#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0'))]
obs_sams = obs.sams.iter_views(SAMFormat)
for (obs_fn, obs_sam), (exp_fn, num_records, mapped_ids, unmapped_ids)\
in zip(obs_sams, exp):
self.assertEqual(str(obs_fn), exp_fn)
with open(str(obs_sam)) as sam_f:
obs_mapped_ids = [line.split('\t')[0]
for line in sam_f
if not line.startswith('@')]
self.assertEqual(len(obs_mapped_ids), num_records)
for e in mapped_ids:
self.assertIn(e, obs_mapped_ids)
for e in unmapped_ids:
self.assertNotIn(e, obs_mapped_ids)
def test_map_paired_not_mapped_only(self):
obs_art, = self.plugin.methods['map_paired_reads'](
self.demuxed_art, self.indexed_genome, mapped_only=False)
obs = obs_art.view(SAMFilesDirFmt)
exp = [('sample_a.sam', 12,
('SARS2:6:73:567:7631#0', 'SARS2:6:73:233:3421#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0')),
('sample_b.sam', 12,
('SARS2:6:73:941:1973#0', 'SARS2:6:73:552:2457#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0')),
('sample_c.sam', 12,
('SARS2:6:73:231:3321#0', 'SARS2:6:73:552:2457#0'),
('SARS2:6:73:356:9806#0', 'SARS2:6:73:356:9806#0'))]
obs_sams = obs.sams.iter_views(SAMFormat)
for (obs_fn, obs_sam), (exp_fn, num_records, mapped_ids, unmapped_ids)\
in zip(obs_sams, exp):
self.assertEqual(str(obs_fn), exp_fn)
with open(str(obs_sam)) as sam_f:
obs_mapped_ids = [line.split('\t')[0]
for line in sam_f
if not line.startswith('@')]
self.assertEqual(len(obs_mapped_ids), num_records)
for e in mapped_ids:
self.assertIn(e, obs_mapped_ids)
for e in unmapped_ids:
self.assertIn(e, obs_mapped_ids)
def test_map_paired_alt_ceil_coefficient(self):
obs_art, = self.plugin.methods['map_paired_reads'](
self.demuxed_art, self.indexed_genome, ceil_coefficient=0.5)
obs = obs_art.view(SAMFilesDirFmt)
obs_sams = obs.sams.iter_views(SAMFormat)
for _, obs_sam in obs_sams:
with open(str(obs_sam)) as sam_f:
self.assertIn('--n-ceil 0,0.5', sam_f.read())
def test_sort_alignment_maps(self):
obs_art, = self.plugin.methods['sort_alignment_maps'](
self.paired_mapped_unsorted)
obs = obs_art.view(SAMFilesDirFmt)
exp_mapped_positions = \
[1, 1, 192, 211, 402, 421, 612, 631, 823, 841]
obs_sams = obs.sams.iter_views(SAMFormat)
for _, obs_sam in obs_sams:
with open(str(obs_sam)) as sam_f:
obs_mapped_positions = \
[int(line.split('\t')[3])
for line in sam_f
if not line.startswith('@')]
self.assertEqual(obs_mapped_positions,
exp_mapped_positions)
def test_remove_duplicates(self):
obs_art, = self.plugin.methods['remove_duplicates'](
self.sorted_alignment_maps)
obs = obs_art.view(SAMFilesDirFmt)
obs_sams = obs.sams.iter_views(SAMFormat)
for _, obs_sam in obs_sams:
with open(str(obs_sam)) as sam_f:
obs_mapped_ids = [line.split('\t')[0]
for line in sam_f
if not line.startswith('@')]
# one occurance of duplicate alignment is retained...
self.assertIn('NB501727:157:HFWHJBGXF:2:22105:18312:6802',
obs_mapped_ids)
# and the other isn't
self.assertNotIn('NB501727:157:HFWHJBGXF:3:23610:2922:9731',
obs_mapped_ids)
def test_make_pileups(self):
obs_art, = self.plugin.methods['make_pileups'](
self.sorted_alignment_maps, self.indexed_genome
)
obs = obs_art.view(PileUpFilesDirFmt)
obs_pileups = obs.pileups.iter_views(PileUpTSVFormat)
for _, obs_pileup in obs_pileups:
obs_df = pd.read_csv(str(obs_pileup), header=None, sep='\t', )
# expected values are derived from running samtools
# directly on these input files
self.assertEqual(obs_df.shape, (345, 6))
self.assertEqual(list(obs_df.iloc[:, 0]), ['NC_045512.2'] * 345)
def test_consensus_sequence_min_depth_1(self):
# this min depth value gets actual sequence with the test data
obs_table_art, obs_seq_art = self.plugin.methods['consensus_sequence'](
self.pileups, min_depth=1,
)
obs_table = obs_table_art.view(pd.DataFrame)
# table tests
# two different genomes across four samples
self.assertEqual(obs_table.shape, (4, 2))
# expected sample ids
self.assertEqual(
set(['sample_1', 'sample_2', 'sample_3', 'empty']),
set(obs_table.index))
# expected feature ids
seq1_md5 = 'e8b3172acb8547d54deb27e85b596233' # in samples 1 & 2
seq2_md5 = '940f1f1bb24a34601d20afbac3147543' # in sample 3
self.assertEqual(set([seq1_md5, seq2_md5]), set(obs_table.columns))
# expected total counts
self.assertEqual(obs_table.loc['sample_1'].sum(), 1)
self.assertEqual(obs_table.loc['sample_2'].sum(), 1)
self.assertEqual(obs_table.loc['sample_3'].sum(), 1)
self.assertEqual(obs_table.loc['empty'].sum(), 0)
self.assertEqual(
obs_table.loc[:, seq1_md5].sum(), 2)
self.assertEqual(
obs_table.loc[:, seq2_md5].sum(), 1)
self.assertTrue(obs_table[seq1_md5]['sample_1'])
self.assertFalse(obs_table[seq2_md5]['sample_1'])
self.assertTrue(obs_table[seq1_md5]['sample_2'])
self.assertFalse(obs_table[seq2_md5]['sample_2'])
self.assertFalse(obs_table[seq1_md5]['sample_3'])
self.assertTrue(obs_table[seq2_md5]['sample_3'])
self.assertFalse(obs_table[seq1_md5]['empty'])
self.assertFalse(obs_table[seq2_md5]['empty'])
# sequence collection tests
self.obs_seq = obs_seq_art.view(pd.Series)
self.assertEqual(len(self.obs_seq), 2)
# confirm expected sequences by computing their md5 here
# and comparing to the expected sequence ids (which were
# determined independently)
self.assertEqual(
hashlib.md5(str(self.obs_seq[seq1_md5]).
encode('utf-8')).hexdigest(),
seq1_md5)
self.assertEqual(
hashlib.md5(str(self.obs_seq[seq2_md5]).
encode('utf-8')).hexdigest(),
seq2_md5)
def test_consensus_sequence_min_depth_default(self):
# this min depth value results in sequences that are
# 100% N with the test data
obs_table_art, obs_seq_art = self.plugin.methods['consensus_sequence'](
self.pileups,
)
obs_table = obs_table_art.view(pd.DataFrame)
# table tests
# one genome across four samples
self.assertEqual(obs_table.shape, (4, 1))
# expected sample ids
self.assertEqual(
set(['sample_1', 'sample_2', 'sample_3', 'empty']),
set(obs_table.index))
# expected feature ids
seq1_md5 = '53b4ce3236e629aebc69f8a2b5abc96b' # in samples 1-3 (all N)
self.assertEqual([seq1_md5], obs_table.columns)
# expected total counts
self.assertEqual(obs_table.loc['sample_1'].sum(), 1)
self.assertEqual(obs_table.loc['sample_2'].sum(), 1)
self.assertEqual(obs_table.loc['sample_3'].sum(), 1)
self.assertEqual(obs_table.loc['empty'].sum(), 0)
self.assertEqual(
obs_table.loc[:, seq1_md5].sum(), 3)
self.assertTrue(obs_table[seq1_md5]['sample_1'])
self.assertTrue(obs_table[seq1_md5]['sample_2'])
self.assertTrue(obs_table[seq1_md5]['sample_3'])
self.assertFalse(obs_table[seq1_md5]['empty'])
# sequence collection tests
self.obs_seq = obs_seq_art.view(pd.Series)
self.assertEqual(len(self.obs_seq), 1)
# confirm expected sequences by computing their md5 here
# and comparing to the expected sequence ids (which were
# determined independently)
self.assertEqual(
hashlib.md5(str(self.obs_seq[seq1_md5]).
encode('utf-8')).hexdigest(),
seq1_md5)
if __name__ == '__main__':
unittest.main()
| 42.592885 | 79 | 0.58454 |
795b49fc9a8c3749f2052b1b30f93998884e9321 | 495 | py | Python | setup.py | aurieh/ircproto | 2053e1795a6937ee2a780bddd31ec73e5d5fefc6 | [
"BSD-3-Clause"
] | 2 | 2018-04-05T17:35:24.000Z | 2018-04-06T03:28:34.000Z | setup.py | aurieh/ircproto | 2053e1795a6937ee2a780bddd31ec73e5d5fefc6 | [
"BSD-3-Clause"
] | null | null | null | setup.py | aurieh/ircproto | 2053e1795a6937ee2a780bddd31ec73e5d5fefc6 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
setup(
name='ircproto',
description='ircproto - protocol-only IRC library',
long_description='Protocol-only, bring your own IO IRC library for Python.',
license='BSD',
author='Aurieh',
url='https://github.com/aurieh/ircproto',
packages=['ircproto'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Topic :: Communications :: Chat :: Internet Relay Chat'
])
| 30.9375 | 80 | 0.642424 |
795b4a3e83912e109adfc86e6793b2291c92c1c9 | 13,312 | py | Python | tests/test_fields.py | kkszysiu/restea | 34ef2ee683a259a05e530514e743a9f69fa5e663 | [
"MIT"
] | null | null | null | tests/test_fields.py | kkszysiu/restea | 34ef2ee683a259a05e530514e743a9f69fa5e663 | [
"MIT"
] | null | null | null | tests/test_fields.py | kkszysiu/restea | 34ef2ee683a259a05e530514e743a9f69fa5e663 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import mock
import pytest
import datetime
from restea.fields import (
Boolean,
Dict,
Field,
FieldSet,
Integer,
List,
String,
Regex,
URL,
Email,
DateTime,
CommaSeparatedListField,
)
def create_field_set_helper(no_fields=False):
if no_fields:
return FieldSet(), None, None
f1 = mock.Mock(spec=Field())
f2 = mock.Mock(spec=Field())
return FieldSet(field1=f1, field2=f2), f1, f2
def test_field_set_init():
fs, f1, f2 = create_field_set_helper()
assert fs.fields == {'field1': f1, 'field2': f2}
f1.set_name.assert_called_with('field1')
f2.set_name.assert_called_with('field2')
def test_feild_set_field_names():
fs, _, _ = create_field_set_helper()
assert fs.field_names == set(['field1', 'field2'])
def test_feild_set_field_names_empty():
fs, _, _ = create_field_set_helper(no_fields=True)
assert fs.field_names == set()
def test_field_set_required_fields():
fs, f1, f2 = create_field_set_helper()
f1.required = True
f2.required = False
assert fs.get_required_field_names({}) == set(['field1'])
def test_field_set_required_fields_callable():
fs, f1, f2 = create_field_set_helper()
def foo(data):
return data.get('field2') == 0
f1.required = foo
f2.required = False
assert fs.get_required_field_names({'field2': 0}) == set(['field1'])
assert fs.get_required_field_names({}) == set([])
f1.required = lambda data: data.get('field2') == 0
f2.required = False
assert fs.get_required_field_names({'field2': 0}) == set(['field1'])
assert fs.get_required_field_names({}) == set([])
def test_field_set_validate():
fs, f1, f2 = create_field_set_helper()
f1.validate.return_value = 1
f2.validate.return_value = 2
res = fs.validate({'field1': '1', 'field2': '2', 'field3': 'wrong!'})
assert res == {'field1': 1, 'field2': 2}
f1.validate.assert_called_with('1')
f2.validate.assert_called_with('2')
def test_feild_set_validate_requred_fields_missing():
fs, f1, _ = create_field_set_helper()
f1.requred = True
with pytest.raises(FieldSet.Error) as e:
fs.validate({'field2': '2'})
assert 'Field "field1" is missing' in str(e)
def test_field_init():
f = Field(setting1=1, setting2=2, required=True)
assert f._name is None
assert f._settings == {'setting1': 1, 'setting2': 2}
assert f.required
def test_feld_init_not_required():
f = Field(setting1=1)
assert not f.required
def test_field_set_name():
f = Field()
f.set_name('test')
assert f._name == 'test'
def test_field_validate_field_base_should_be_abstract():
f = Field()
with pytest.raises(NotImplementedError):
f._validate_field('test')
def test_field_get_settings_validator():
f = Field()
f._validate_my_setting = mock.Mock()
assert f._get_setting_validator('my_setting') == f._validate_my_setting
def test_field_get_settings_validator_raise_configration_error():
f = Field()
f.set_name('test')
with pytest.raises(FieldSet.ConfigurationError) as e:
f._get_setting_validator('my_setting')
assert 'Setting "my_setting" is ' + \
'not supported for field "test"' in str(e)
def test_field_validate():
f = Field(my_setting=1)
f.set_name('test')
f._validate_field = mock.Mock(return_value='value')
f._validate_my_setting = mock.Mock(return_value='value')
assert f.validate('value') == 'value'
f._validate_field.assert_called_with('value')
f._validate_my_setting.assert_called_with(1, 'value')
def test_field_validate_raises_on_field_validation():
f = Field(my_setting=1)
f.set_name('test')
field_error_message = 'Field error message'
f._validate_field = mock.Mock(
side_effect=FieldSet.Error(field_error_message)
)
f._validate_my_setting = mock.Mock()
with pytest.raises(FieldSet.Error) as e:
f.validate('value')
assert field_error_message in str(e)
assert not f._validate_my_setting.called
def test_field_validate_raises_on_setting_validation():
f = Field(my_setting=1)
f.set_name('test')
f._validate_field = mock.Mock()
my_setting_error_message = 'my setting error message'
f._validate_my_setting = mock.Mock(
side_effect=FieldSet.Error(my_setting_error_message)
)
with pytest.raises(FieldSet.Error) as e:
f.validate('value')
assert my_setting_error_message in str(e)
f._validate_field.assert_called_with('value')
def test_integer_field_validate():
f = Integer()
assert f._validate_field(1000) == 1000
def test_integer_field_validate_decimal():
f = Integer()
assert f._validate_field(10.10) == 10
def test_integer_field_validate_numberic_str():
f = Integer()
assert f._validate_field('10') == 10
def test_integer_field_validate_non_acceptable_value():
f = Integer()
for fail_val in ('should not work', None, '10.10'):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "{}" is not a number'.format(f._name) in str(e)
def test_integer_field_range_success():
f = Integer()
assert f._validate_range((1, 10), 1) == 1
assert f._validate_range((1, 10), 5) == 5
assert f._validate_range((1, 10), 10) == 10
def test_integer_field_range_fail():
f = Integer()
for fail_val in (100, 0, -5):
with pytest.raises(FieldSet.Error):
f._validate_range((1, 10), fail_val)
def test_string_validate_max_length():
f = String()
f._validate_max_length(4, 'text')
def test_string_validate_max_length_fail():
f = String()
with pytest.raises(FieldSet.Error) as e:
f._validate_max_length(4, 'text1')
assert 'Field "{}" is longer than expected'.format(f._name) in str(e)
def test_string_validate():
f = String()
assert f._validate_field('test') == 'test'
def test_string_validate_not_acceptable_value():
f = String()
for fail_val in (10, None, list):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "{}" is not a string'.format(f._name) in str(e)
def test_regex_validate_pattern():
p = r'\d{1,3}'
f = Regex(pattern=p)
for value in ('123', '0', '10'):
assert f._validate_pattern(p, value)[0] == value
def test_regex_validate_pattern_use_first_found():
p = r'\d{1,3}'
f = Regex(use_first_found=True, pattern=p)
for value in ('123', '0', '10'):
assert f._validate_pattern(p, value) == value
def test_regex_validate_pattern_list_patterns():
p = [r'\d{1,3}', r'[a-z]{2,3}']
f = Regex(pattern=p)
for value in ('100', '0', 'te', 'tes'):
assert f._validate_pattern(p, value)[0] == value
def test_regex_validate_pattern_fail():
p = r'\d{3}'
f = Regex(pattern=p)
for value in ('not_a_number', 'other12thing'):
with pytest.raises(FieldSet.Error):
f._validate_pattern(value, p)
def test_regex_validate_pattern_list_patterns_fails():
p = [r'\d{3}', r'[a-z]{100}']
f = Regex(pattern=p)
for value in ('not_a_number', 'other12thing'):
with pytest.raises(FieldSet.Error):
f._validate_pattern(p, value)
def test_url_validate_pattern():
f = URL()
for value in ('http://google.com/ncr', 'https://www.rebelmouse.com'):
assert f._validate_pattern(f.regex, value)[0] == value
def test_url_validate_pattern_use_first_found():
f = URL(use_first_found=True)
for value in ('http://google.com/ncr', 'https://www.rebelmouse.com'):
assert f._validate_pattern(f.regex, value) == value
def test_url_validate_fail():
f = URL()
for value in ('not_a_url', 'otherthing'):
with pytest.raises(FieldSet.Error):
f._validate_pattern(f.regex, value)
def test_valid_email():
email = Email()
assert email.validate('t@r.co') == 't@r.co'
def test_invalid_email():
email = Email()
with pytest.raises(FieldSet.Error) as error:
assert email.validate('foo_bar.com')
assert str(error.value) == '"foo_bar.com" is not a valid email'
with pytest.raises(FieldSet.Error) as error:
assert email.validate('foo@ bar.com')
assert str(error.value) == '"foo@ bar.com" is not a valid email'
with pytest.raises(FieldSet.Error) as error:
assert email.validate('foo@barcom')
assert str(error.value) == '"foo@barcom" is not a valid email'
with pytest.raises(FieldSet.Error) as error:
assert email.validate('foo@bar.c')
assert str(error.value) == '"foo@bar.c" is not a valid email'
def test_boolean_validate_true():
f = Boolean()
assert f._validate_field(True) is True
def test_boolean_validate_false():
f = Boolean()
assert f._validate_field(False) is False
def test_boolean_validate_non_acceptable_value():
f = Boolean()
f.set_name('foo')
for fail_val in (10, None, [], {}, 'bar'):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "foo" is not a boolean' in str(e)
def test_datetime_validate_acceptable_value():
f = DateTime()
expected_date = datetime.datetime(2015, 10, 6, 16, 29, 19, 776000)
res = f._validate_field(1444148959776)
assert res == expected_date
def test_datetime_validate_non_acceptable_value():
f = DateTime()
f.set_name('foo')
for fail_val in (None, 'foobar', []):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "foo" can\'t be parsed' in str(e)
def test_datetime_validate_acceptable_value_not_ms_precision():
f = DateTime(ms_precision=False)
expected_date = datetime.datetime(2015, 10, 6, 16, 29, 19)
res = f._validate_field(1444148959)
assert res == expected_date
def test_list_validate_empty():
element_field = mock.Mock()
f = List(element_field=element_field)
assert f.validate([]) == []
assert element_field.mock_calls == []
def test_list_validate():
element_field = mock.Mock()
element_field.validate.side_effect = lambda x: x
f = List(element_field=element_field)
assert f.validate(['foo', 'bar', 'baz']) == ['foo', 'bar', 'baz']
assert element_field.mock_calls == [
mock.call.validate('foo'),
mock.call.validate('bar'),
mock.call.validate('baz'),
]
def test_list_validate_fail():
def mock_validate(value):
element_field.validate.side_effect = FieldSet.Error
return value
element_field = mock.Mock()
element_field.validate.side_effect = mock_validate
f = List(element_field=element_field)
f.set_name('foo')
with pytest.raises(FieldSet.Error) as e:
f.validate(['bar', 7, 'baz'])
assert 'One of the elements on field "foo" failed to validate' in str(e)
assert element_field.mock_calls == [
mock.call.validate('bar'),
mock.call.validate(7),
]
def test_list_non_acceptable_value():
element_field = mock.Mock()
f = List(element_field=element_field)
f.set_name('foo')
for fail_val in (10, None, {}, 'bar', True):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "foo" is not a list' in str(e)
assert element_field.mock_calls == []
def test_dict_validate():
f = Dict()
di = {
'foo': 'bar',
4: True
}
assert f._validate_field(di.copy()) == di
def test_dict_validate_non_acceptable_value():
f = Dict()
f.set_name('foo')
for fail_val in (10, None, [], 'bar', True):
with pytest.raises(FieldSet.Error) as e:
f._validate_field(fail_val)
assert 'Field "foo" is not a dict' in str(e)
def test_comma_separated_list_does_not_allow_more_requests():
limit_per_request = 10
field = CommaSeparatedListField(
limit_per_request=limit_per_request, cast_func=int
)
field.set_name('foo')
value_list = range(0, limit_per_request + 1)
string_list = field.separator.join(map(str, value_list))
with pytest.raises(FieldSet.Error) as e:
field.validate(string_list)
assert 'Field "foo" has more items than allowed in the settings' in str(e)
def test_comma_separated_list_with_string_values_and_integer_type_cast():
field = CommaSeparatedListField(cast_func=int)
field.set_name('foo')
with pytest.raises(FieldSet.Error) as e:
field.validate('a;b;c;d')
assert 'Field "foo" can\'t be parsed as a list' in str(e)
def test_comma_separated_list_custom_separator_not_informed_on_constructor():
field = CommaSeparatedListField(cast_func=int)
field.set_name('foo')
with pytest.raises(FieldSet.Error) as e:
field.validate('1,2,3,4')
assert 'Field "foo" can\'t be parsed as a list' in str(e)
def test_constructor_with_custom_separator():
field = CommaSeparatedListField(separator=',')
field.set_name('foo')
assert field.validate('a,b,c,d') == ['a', 'b', 'c', 'd']
def test_constructor_with_custom_cast_and_separator():
field = CommaSeparatedListField(cast_func=int, separator=',')
field.set_name('foo')
assert field.validate('1,2,3,4') == [1, 2, 3, 4]
| 27.849372 | 78 | 0.668645 |
795b4aff0a0fc4f3af01aa38c585c7745e6fe11b | 5,350 | py | Python | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | 1 | 2019-09-18T06:37:02.000Z | 2019-09-18T06:37:02.000Z | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | test/Deprecated/Options/help.py | EmanueleCannizzaro/scons | 6baa4e65cdf4df6951473545b69435711864e509 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Deprecated/Options/help.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the Options help messages.
"""
import os
import re
import TestSCons
str_True = str(True)
str_False = str(False)
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
workpath = test.workpath()
qtpath = os.path.join(workpath, 'qt')
libpath = os.path.join(qtpath, 'lib')
libdirvar = os.path.join('$qtdir', 'lib')
qtpath_re = re.escape(qtpath)
libpath_re = re.escape(libpath)
libdirvar_re = re.escape(libdirvar)
test.subdir(qtpath)
test.subdir(libpath)
test.write('SConstruct', """
from SCons.Options import BoolOption, EnumOption, ListOption, \
PackageOption, PathOption
list_of_libs = Split('x11 gl qt ical')
qtdir = r'%(qtpath)s'
opts = Options(args=ARGUMENTS)
opts.AddOptions(
BoolOption('warnings', 'compilation with -Wall and similiar', 1),
BoolOption('profile', 'create profiling informations', 0),
EnumOption('debug', 'debug output and symbols', 'no',
allowed_values=('yes', 'no', 'full'),
map={}, ignorecase=0), # case sensitive
EnumOption('guilib', 'gui lib to use', 'gtk',
allowed_values=('motif', 'gtk', 'kde'),
map={}, ignorecase=1), # case insensitive
EnumOption('some', 'some option', 'xaver',
allowed_values=('xaver', 'eins'),
map={}, ignorecase=2), # make lowercase
ListOption('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs),
PackageOption('x11',
'use X11 installed here (yes = search some places)',
'yes'),
PathOption('qtdir', 'where the root of Qt is installed', qtdir),
PathOption('qt_libraries',
'where the Qt library is installed',
r'%(libdirvar)s'),
)
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
print env['warnings']
print env['profile']
Default(env.Alias('dummy', None))
""" % locals())
expected_stdout = """\
scons: Reading SConscript files ...
%(str_True)s
%(str_False)s
scons: done reading SConscript files.
warnings: compilation with -Wall and similiar \\(yes|no\\)
default: 1
actual: %(str_True)s
profile: create profiling informations \\(yes|no\\)
default: 0
actual: %(str_False)s
debug: debug output and symbols \\(yes|no|full\\)
default: no
actual: no
guilib: gui lib to use \\(motif|gtk|kde\\)
default: gtk
actual: gtk
some: some option \\(xaver|eins\\)
default: xaver
actual: xaver
shared: libraries to build as shared libraries
\\(all|none|comma-separated list of names\\)
allowed names: x11 gl qt ical
default: all
actual: x11 gl qt ical
x11: use X11 installed here \\(yes = search some places\\)
\\( yes | no | /path/to/x11 \\)
default: yes
actual: %(str_True)s
qtdir: where the root of Qt is installed \\( /path/to/qtdir \\)
default: %(qtpath_re)s
actual: %(qtpath_re)s
qt_libraries: where the Qt library is installed \\( /path/to/qt_libraries \\)
default: %(libdirvar_re)s
actual: %(libpath_re)s
Use scons -H for help about command-line options.
""" % locals()
file_expr = TestSCons.file_expr
expected_stderr = """
scons: warning: The Options class is deprecated; use the Variables class instead.
%(file_expr)s
scons: warning: The BoolOption\\(\\) function is deprecated; use the BoolVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The EnumOption\\(\\) function is deprecated; use the EnumVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The ListOption\\(\\) function is deprecated; use the ListVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The PackageOption\\(\\) function is deprecated; use the PackageVariable\\(\\) function instead.
%(file_expr)s
scons: warning: The PathOption\\(\\) function is deprecated; use the PathVariable\\(\\) function instead.
%(file_expr)s""" % locals()
test.run(arguments='-h', stdout=expected_stdout, stderr=expected_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 31.28655 | 111 | 0.68729 |
795b4c298d996e5d00615cd780e95a48e81bf9be | 2,096 | py | Python | tests/config/test_generate.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | 1 | 2019-05-01T11:05:51.000Z | 2019-05-01T11:05:51.000Z | tests/config/test_generate.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | null | null | null | tests/config/test_generate.py | cleveritcz/synapse | caead3e45968a9f753da7bc11ee588ab4efda858 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import re
import shutil
import tempfile
from synapse.config.homeserver import HomeServerConfig
from tests import unittest
class ConfigGenerationTestCase(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.file = os.path.join(self.dir, "homeserver.yaml")
def tearDown(self):
shutil.rmtree(self.dir)
def test_generate_config_generates_files(self):
HomeServerConfig.load_or_generate_config(
"",
[
"--generate-config",
"-c",
self.file,
"--report-stats=yes",
"-H",
"lemurs.win",
],
)
self.assertSetEqual(
set(
[
"homeserver.yaml",
"lemurs.win.log.config",
"lemurs.win.signing.key",
]
),
set(os.listdir(self.dir)),
)
self.assert_log_filename_is(
os.path.join(self.dir, "lemurs.win.log.config"),
os.path.join(os.getcwd(), "homeserver.log"),
)
def assert_log_filename_is(self, log_config_file, expected):
with open(log_config_file) as f:
config = f.read()
# find the 'filename' line
matches = re.findall(r"^\s*filename:\s*(.*)$", config, re.M)
self.assertEqual(1, len(matches))
self.assertEqual(matches[0], expected)
| 29.942857 | 74 | 0.589218 |
795b4c95cc9451afdaef545a8359749eec195110 | 9,520 | py | Python | r2r_src/param.py | ZhuFengdaaa/MG-AuxRN | 3dee0b6c1293dd521a53e95b0cbed696ceaa93f9 | [
"MIT-0",
"MIT"
] | 22 | 2020-09-20T02:54:11.000Z | 2022-01-18T16:24:59.000Z | r2r_src/param.py | ZhuFengdaaa/MG-AuxRN | 3dee0b6c1293dd521a53e95b0cbed696ceaa93f9 | [
"MIT-0",
"MIT"
] | 3 | 2020-10-04T03:50:15.000Z | 2021-12-13T05:15:22.000Z | r2r_src/param.py | ZhuFengdaaa/MG-AuxRN | 3dee0b6c1293dd521a53e95b0cbed696ceaa93f9 | [
"MIT-0",
"MIT"
] | 3 | 2021-02-06T06:47:19.000Z | 2021-12-10T19:27:23.000Z | import argparse
import os
import torch
class Param:
def __init__(self):
self.parser = argparse.ArgumentParser(description="")
# General
self.parser.add_argument('--iters', type=int, default=80000)
self.parser.add_argument('--name', type=str, default='default')
self.parser.add_argument('--train', type=str, default='listener')
# Data preparation
self.parser.add_argument('--maxInput', type=int, default=80, help="max input instruction")
self.parser.add_argument('--maxDecode', type=int, default=120, help="max input instruction")
self.parser.add_argument('--maxAction', type=int, default=35, help='Max Action sequence')
self.parser.add_argument('--batchSize', type=int, default=64)
self.parser.add_argument('--ignoreid', type=int, default=-100)
self.parser.add_argument('--feature_size', type=int, default=2048)
self.parser.add_argument("--loadOptim",action="store_const", default=False, const=True)
# Load the model from
self.parser.add_argument("--speaker", default=None)
self.parser.add_argument("--listener", default=None)
self.parser.add_argument("--load", type=str, default=None)
# More Paths from
self.parser.add_argument("--aug", default=None)
# Listener Model Config
self.parser.add_argument("--zeroInit", dest='zero_init', action='store_const', default=False, const=True)
self.parser.add_argument("--mlWeight", dest='ml_weight', type=float, default=0.2)
self.parser.add_argument("--teacherWeight", dest='teacher_weight', type=float, default=1.)
self.parser.add_argument("--accumulateGrad", dest='accumulate_grad', action='store_const', default=False, const=True)
self.parser.add_argument("--features", type=str, default='imagenet')
# Env Dropout Param
self.parser.add_argument('--featdropout', type=float, default=0.3)
# SSL configuration
self.parser.add_argument("--selfTrain", dest='self_train', action='store_const', default=False, const=True)
# Submision configuration
self.parser.add_argument("--candidates", type=int, default=1)
self.parser.add_argument("--paramSearch", dest='param_search', action='store_const', default=False, const=True)
self.parser.add_argument("--submit", action='store_const', default=False, const=True)
self.parser.add_argument("--beam", action="store_const", default=False, const=True)
self.parser.add_argument("--alpha", type=float, default=0.5)
# aux loss
self.parser.add_argument("--aux_option", action='store_const', default=False, const=True)
self.parser.add_argument('--speWeight', type=float, default=0)
self.parser.add_argument('--proWeight', type=float, default=0)
self.parser.add_argument('--matWeight', type=float, default=0)
self.parser.add_argument('--feaWeight', type=float, default=0)
self.parser.add_argument('--angWeight', type=float, default=0)
self.parser.add_argument("--modspe", action='store_const', default=False, const=True)
self.parser.add_argument("--modpro", action='store_const', default=False, const=True)
self.parser.add_argument("--modmat", action='store_const', default=False, const=True)
self.parser.add_argument("--modfea", action='store_const', default=False, const=True)
self.parser.add_argument("--mask_fea", action='store_const', default=False, const=True)
self.parser.add_argument("--no_train_rl", action='store_const', default=False, const=True)
self.parser.add_argument("--mat_mask", action='store_const', default=False, const=True)
self.parser.add_argument("--mat_detach", action='store_const', default=False, const=True)
self.parser.add_argument("--mat_norm", action='store_const', default=False, const=True)
self.parser.add_argument("--mat_mul", action='store_const', default=False, const=True)
# Training Configurations
self.parser.add_argument('--optim', type=str, default='rms') # rms, adam
self.parser.add_argument('--lr', type=float, default=0.0001, help="The learning rate")
self.parser.add_argument('--decay', dest='weight_decay', type=float, default=0.)
self.parser.add_argument('--dropout', type=float, default=0.5)
self.parser.add_argument('--feedback', type=str, default='sample',
help='How to choose next position, one of ``teacher``, ``sample`` and ``argmax``')
self.parser.add_argument('--teacher', type=str, default='final',
help="How to get supervision. one of ``next`` and ``final`` ")
self.parser.add_argument('--epsilon', type=float, default=0.1)
self.parser.add_argument('--save_iter', type=int, default=40000)
# Model hyper params:
self.parser.add_argument('--rnnDim', dest="rnn_dim", type=int, default=512)
self.parser.add_argument('--wemb', type=int, default=256)
self.parser.add_argument('--aemb', type=int, default=64)
self.parser.add_argument('--proj', type=int, default=512)
self.parser.add_argument("--fast", dest="fast_train", action="store_const", default=False, const=True)
self.parser.add_argument("--valid", action="store_const", default=False, const=True)
self.parser.add_argument("--candidate", dest="candidate_mask",
action="store_const", default=False, const=True)
self.parser.add_argument("--bidir", type=bool, default=True) # This is not full option
self.parser.add_argument("--encode", type=str, default="word") # sub, word, sub_ctx
self.parser.add_argument("--subout", dest="sub_out", type=str, default="max") # tanh, max
self.parser.add_argument("--attn", type=str, default="soft") # soft, mono, shift, dis_shift
self.parser.add_argument("--angleFeatSize", dest="angle_feat_size", type=int, default=128)
# self.parser.add_argument("--views", dest='views',type=int,default=36)
# A2C
self.parser.add_argument("--gamma", default=0.9, type=float)
self.parser.add_argument("--normalize", dest="normalize_loss", default="total", type=str, help='batch or total')
# polyaxon
self.parser.add_argument("--upload", action='store_const', default=False, const=True)
# Multi_head
self.parser.add_argument("--headNum", dest="headNum", type=int, default=2)
self.parser.add_argument("--multiMode", type=str, default="") # vis,can,ins
# object
self.parser.add_argument('--objdir', type=str, default='0_8')
self.parser.add_argument("--objthr", dest='objthr', type=float, default=0.99)
self.parser.add_argument("--angleObjSize", dest="angle_bbox_size", type=int, default=8)
self.parser.add_argument("--insEmb", dest="instEmb", type=int, default=300)
self.parser.add_argument("--insHE", dest= "instHE", type=int, default=16)
self.parser.add_argument("--sparseObj", dest='sparseObj', action='store_const', default=False, const=True)
self.parser.add_argument("--catRN", dest='catRN', action='store_const', default=False, const=True)
self.parser.add_argument("--addRN", dest='addRN', action='store_const', default=False, const=True)
self.parser.add_argument("--denseObj", dest='denseObj', action='store_const', default=False, const=True)
self.parser.add_argument("--longCat", dest='longCat', action='store_const', default=False, const=True)
self.parser.add_argument("--objInputMode", type=str, default="sg") # tanh,sg,sm
# self.parser.add_argument("--objType", type=str, default="dense") # dense, denseRN, sparseRN, denSpaRN,
# self.parser.add_argument("--catAngleBbox", dest='catAngleBbox', action='store_const', default=False, const=True)
self.parser.add_argument("--catfeat", type=str, default="none") # none,he,bbox,angle,bboxAngle
self.args = self.parser.parse_args()
if self.args.optim == 'rms':
print("Optimizer: Using RMSProp")
self.args.optimizer = torch.optim.RMSprop
elif self.args.optim == 'adam':
print("Optimizer: Using Adam")
self.args.optimizer = torch.optim.Adam
elif self.args.optim == 'sgd':
print("Optimizer: sgd")
self.args.optimizer = torch.optim.SGD
else:
assert False
param = Param()
args = param.args
args.TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
args.TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
args.IMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'
args.CANDIDATE_FEATURES = 'img_features/ResNet-152-candidate.tsv'
args.features_fast = 'img_features/ResNet-152-imagenet-fast.tsv'
args.SPARSE_OBJ_FEATURES = 'obj_features/%s/panorama_objs_Features_nms_%s.npy'%(args.objdir, args.objdir)
args.DENSE_OBJ_FEATURES1 = 'obj_features/%s/panorama_objs_DenseFeatures_nms1_%s.npy'%(args.objdir, args.objdir)
args.DENSE_OBJ_FEATURES2 = 'obj_features/%s/panorama_objs_DenseFeatures_nms2_%s.npy'%(args.objdir, args.objdir)
args.BBOX_FEATURES = 'obj_features/%s/panorama_objs_bbox_%s.npy'%(args.objdir, args.objdir)
args.log_dir = 'snap/%s' % args.name
args.R2R_Aux_path = '.'
args.upload_path = 'lyx'
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
DEBUG_FILE = open(os.path.join('snap', args.name, "debug.log"), 'w')
| 59.130435 | 125 | 0.673004 |
795b4d0fd4efa5d1fc51341c288f83324f613c2f | 314 | py | Python | setup.py | LuckyScott/pg-es-fdw | 8d0ecd6b3cade0e985718057868c0de970b107d3 | [
"MIT"
] | 102 | 2015-07-08T09:53:22.000Z | 2021-03-19T02:20:42.000Z | setup.py | LuckyScott/pg-es-fdw | 8d0ecd6b3cade0e985718057868c0de970b107d3 | [
"MIT"
] | 6 | 2016-06-17T20:42:10.000Z | 2018-02-07T13:02:55.000Z | setup.py | LuckyScott/pg-es-fdw | 8d0ecd6b3cade0e985718057868c0de970b107d3 | [
"MIT"
] | 34 | 2015-09-29T15:03:23.000Z | 2019-11-22T06:33:00.000Z | ###
### Author:
### Time-stamp: <2010-03-04 23:04:21 dwa>
from setuptools import setup
if __name__ == '__main__':
setup(
name='dite',
version='0.0.1',
author='Mikulas Dite',
license='Postgresql',
packages=['dite']
)
## Local Variables: ***
## mode:python ***
## coding: utf-8 ***
## End: ***
| 16.526316 | 41 | 0.576433 |
795b4d1d687858ccb68f5f6d5375fe2a2c077080 | 6,445 | py | Python | molecule/common/tests/test_instances.py | tarantool/ansible-cartridge | 14d86752d582f43f0d8efb27dbaa1175ff6e8ac2 | [
"BSD-2-Clause"
] | 17 | 2019-09-02T15:31:56.000Z | 2022-03-29T18:49:59.000Z | molecule/common/tests/test_instances.py | tarantool/ansible-cartridge | 14d86752d582f43f0d8efb27dbaa1175ff6e8ac2 | [
"BSD-2-Clause"
] | 171 | 2019-10-24T15:34:34.000Z | 2022-03-29T09:18:46.000Z | molecule/common/tests/test_instances.py | tarantool/ansible-cartridge | 14d86752d582f43f0d8efb27dbaa1175ff6e8ac2 | [
"BSD-2-Clause"
] | 14 | 2019-12-23T08:27:06.000Z | 2021-07-06T15:53:49.000Z | import os
from yaml import CLoader as Loader
import utils
testinfra_hosts = utils.get_testinfra_hosts()
def check_conf_file(conf_file, instance_id, conf):
assert conf_file is not None, 'Config file should exists'
assert conf_file.exists
assert conf_file.user == 'tarantool'
assert conf_file.group == 'tarantool'
loader = Loader(conf_file.content_string)
conf_file_dict = loader.get_data()
assert instance_id in conf_file_dict
assert conf_file_dict[instance_id] == conf
def test_systemd_services(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_name = instance_vars['inventory_hostname']
if not utils.instance_is_stateboard(instance_vars):
service_name = '%s@%s' % (app_name, instance_name)
else:
service_name = '%s-stateboard' % app_name
service = host.service(service_name)
if utils.instance_is_expelled(instance_vars):
assert not service.is_running
assert not service.is_enabled
else:
assert service.is_running
assert service.is_enabled
def test_dirs(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_id = utils.get_instance_id(app_name, instance_vars)
conf_dir = instance_vars.get('cartridge_conf_dir', '/etc/tarantool/conf.d')
run_dir = instance_vars.get('cartridge_run_dir', '/var/run/tarantool')
data_dir = instance_vars.get('cartridge_data_dir', '/var/lib/tarantool')
install_dir = instance_vars.get('cartridge_app_install_dir', '/usr/share/tarantool')
instances_dir = instance_vars.get('cartridge_app_instances_dir', '/usr/share/tarantool')
multiversion = instance_vars.get('cartridge_multiversion', False)
if not multiversion:
dist_dir_path = os.path.join(install_dir, app_name)
else:
package_path = instance_vars.get('cartridge_package_path')
package_basename = os.path.basename(package_path)
package_name_version, ext = os.path.splitext(package_basename)
if ext == '.gz' and package_name_version.endswith('.tar'):
package_name_version, _ = os.path.splitext(package_name_version)
dist_dir_path = os.path.join(install_dir, package_name_version)
dist_dir = host.file(dist_dir_path)
assert dist_dir.exists
if multiversion:
instance_dist_dir = host.file(os.path.join(instances_dir, instance_id))
assert instance_dist_dir.exists
assert instance_dist_dir.is_symlink
assert instance_dist_dir.linked_to == dist_dir_path
conf_file = host.file(os.path.join(conf_dir, '%s.yml' % instance_id))
default_conf_file = host.file(os.path.join(conf_dir, '%s.yml' % app_name))
pid_file = host.file(os.path.join(run_dir, '%s.pid' % instance_id))
console_sock_file = host.file(os.path.join(run_dir, '%s.control' % instance_id))
work_dir_file = host.file(os.path.join(data_dir, instance_id))
if not utils.instance_is_expelled(instance_vars):
assert conf_file.exists
assert default_conf_file.exists
assert console_sock_file.exists
assert work_dir_file.exists
else:
assert not conf_file.exists
assert not pid_file.exists
assert not console_sock_file.exists
assert not work_dir_file.exists
def test_configs(host):
app_name = utils.get_app_name()
machine_instances = utils.get_machine_instances(host)
assert machine_instances
default_conf = utils.get_cluster_var('cartridge_defaults', default={})
not_save_cookie_in_app_config = utils.get_cluster_var('cartridge_not_save_cookie_in_app_config', False)
if not not_save_cookie_in_app_config:
default_conf.update(cluster_cookie=utils.get_cluster_cookie())
default_conf_file = None
for instance in machine_instances:
instance_vars = utils.get_instance_vars(instance)
instance_id = utils.get_instance_id(app_name, instance_vars)
instance_conf = instance_vars['config']
if instance_conf.get('memtx_memory') == '{{ common_memtx_memory }}':
instance_conf['memtx_memory'] = 268436000
conf_dir = instance_vars.get('cartridge_conf_dir', '/etc/tarantool/conf.d')
conf_file = host.file(os.path.join(conf_dir, '%s.yml' % instance_id))
default_conf_file = host.file(os.path.join(conf_dir, '%s.yml' % app_name))
if not utils.instance_is_expelled(instance_vars):
check_conf_file(conf_file, instance_id, instance_conf)
check_conf_file(default_conf_file, app_name, default_conf)
def test_instances():
configured_instances = utils.get_configured_instances()
# Select one instance to be control
admin_api_url = utils.get_admin_api_url()
# Get all started instances
query = '''
query {
servers {
uri
alias
zone
}
}
'''
session = utils.get_authorized_session()
response = session.post(admin_api_url, json={'query': query})
data = utils.get_response_data(response)
started_instances = data['servers']
started_instances = {i['alias']: i for i in started_instances}
# filter out expelled instances and stateboard
configured_instances = {
i: instance_vars for i, instance_vars in configured_instances.items()
if not utils.instance_is_expelled(instance_vars) and not utils.instance_is_stateboard(instance_vars)
}
# Check if all configured instances are started and available
assert len(configured_instances) == len(started_instances)
assert set(configured_instances.keys()) == set(started_instances.keys())
assert all([
configured_instances[i]['config']['advertise_uri'] == started_instances[i]['uri']
for i in configured_instances
])
assert all([
configured_instances[i].get('zone') == started_instances[i]['zone']
for i in configured_instances
])
| 36.005587 | 108 | 0.691699 |
795b4f8386af37e5e59146042a94c0fee32016bc | 1,277 | py | Python | tests/test_collection_crs.py | hobu/Fiona | d6ec15f1c2e7a860bc57d35a51e36aeb48377a1a | [
"BSD-3-Clause"
] | null | null | null | tests/test_collection_crs.py | hobu/Fiona | d6ec15f1c2e7a860bc57d35a51e36aeb48377a1a | [
"BSD-3-Clause"
] | null | null | null | tests/test_collection_crs.py | hobu/Fiona | d6ec15f1c2e7a860bc57d35a51e36aeb48377a1a | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import fiona
import fiona.crs
from .conftest import WGS84PATTERN, requires_gdal2
def test_collection_crs_wkt(path_coutwildrnp_shp):
with fiona.open(path_coutwildrnp_shp) as src:
assert re.match(WGS84PATTERN, src.crs_wkt)
def test_collection_no_crs_wkt(tmpdir, path_coutwildrnp_shp):
"""crs members of a dataset with no crs can be accessed safely."""
filename = str(tmpdir.join("test.shp"))
with fiona.open(path_coutwildrnp_shp) as src:
profile = src.meta
del profile['crs']
del profile['crs_wkt']
with fiona.open(filename, 'w', **profile) as dst:
assert dst.crs_wkt == ""
assert dst.crs == {}
@requires_gdal2
def test_collection_create_crs_wkt(tmpdir):
"""A collection can be created using crs_wkt"""
filename = str(tmpdir.join("test.shp"))
wkt = 'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295],AUTHORITY["EPSG","4326"]]'
with fiona.open(filename, 'w', schema={'geometry': 'Point', 'properties': {'foo': 'int'}}, crs_wkt=wkt, driver='GeoJSON') as dst:
assert dst.crs_wkt == wkt
with fiona.open(filename) as col:
assert col.crs_wkt.startswith('GEOGCS["WGS 84')
| 34.513514 | 176 | 0.697729 |
795b4fb75e8cd8b10fadb9bf387c63ba3cbf2c1a | 442 | py | Python | angr/procedures/libc/vsnprintf.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 2 | 2018-05-02T17:41:36.000Z | 2020-05-18T02:49:16.000Z | angr/procedures/libc/vsnprintf.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/libc/vsnprintf.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2022-02-10T02:29:38.000Z | 2022-02-10T02:29:38.000Z | import angr
######################################
# __vsnprintf
######################################
class vsnprintf(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, str_ptr, size, fmt, ap): #pylint:disable=unused-argument
# This function returns
# Add another exit to the retn_addr that is at the top of the stack now
self.state.memory.store(str_ptr, "\x00")
return size - 1
| 26 | 79 | 0.558824 |
795b506687de7bde60ac9026adfa7d5cbb35cd58 | 2,334 | py | Python | products/views.py | pythonmentor/teiva-p11 | 4be26edbad6e490c3991ca1ce5680e889b2ab75a | [
"MIT",
"Unlicense"
] | null | null | null | products/views.py | pythonmentor/teiva-p11 | 4be26edbad6e490c3991ca1ce5680e889b2ab75a | [
"MIT",
"Unlicense"
] | null | null | null | products/views.py | pythonmentor/teiva-p11 | 4be26edbad6e490c3991ca1ce5680e889b2ab75a | [
"MIT",
"Unlicense"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import render
from products.forms import SearchForm
from products.managers import ProductManager
from favoritecart.helpers import redirect_to_login
from favoritecart.cart import FavoriteCart
def sub_list(request):
"""Display the list of substitute for one selected product."""
if request.method == "POST":
form = SearchForm(request.POST)
if form.is_valid():
(
substitute,
selected_product,
) = ProductManager.search_from_user_input(form.cleaned_data)
return render(
request,
"products/sub_list.html",
{"product": substitute, "searched": selected_product},
)
else:
print("form is not valid !")
raise Http404
else:
form = SearchForm()
return render(request, "products/sub_list.html", {"form": form})
def save(request):
"""Save the product into favorite."""
if request.method == "POST":
data = request.POST
if not request.user.is_authenticated:
cart = FavoriteCart(request)
cart.add(
{
"product": data["product-searched-id"],
"substitute": data["substitute-searched-id"],
"user": "user",
}
)
return redirect_to_login(
request, "pages/myfood.html", login_url="/accounts/login/"
)
ProductManager.save_product(request, data)
favs = ProductManager.get_fav(request)
return render(request, "pages/myfood.html", {"favorites": favs})
else:
raise Http404
@login_required(login_url="/accounts/login/")
def fav(request):
"""Display favorites saved by user."""
favs = ProductManager.get_fav(request)
return render(request, "pages/myfood.html", {"favorites": favs})
def detail(request):
"""Display detail of a selected product."""
if request.method == "POST":
data = request.POST
product_detail = ProductManager.get_detail(data)
return render(
request, "pages/detail.html", {"product": product_detail}
)
else:
raise Http404
| 31.540541 | 74 | 0.600257 |
795b50d5f50cec669b015ad0f905961876d8082c | 5,094 | py | Python | RESTful_Face_Web/RESTful_Face_Web/runtime_db/runtime_database.py | luzhoutao/face | ae5910f57ae86412e8a56adbefe4f260dde216f0 | [
"MIT"
] | null | null | null | RESTful_Face_Web/RESTful_Face_Web/runtime_db/runtime_database.py | luzhoutao/face | ae5910f57ae86412e8a56adbefe4f260dde216f0 | [
"MIT"
] | null | null | null | RESTful_Face_Web/RESTful_Face_Web/runtime_db/runtime_database.py | luzhoutao/face | ae5910f57ae86412e8a56adbefe4f260dde216f0 | [
"MIT"
] | null | null | null | # retrieve project path settings
# database connections
from django.db import connections
# system operation
import os
# logging
import logging, traceback
log = logging.getLogger(__name__)
# abstract class
from abc import ABCMeta, abstractmethod
from RESTful_Face_Web import settings
# database manager to create on the fly and initialize it !
# https://stackoverflow.com/questions/6585373/django-multiple-and-dynamic-databases
class BaseDBManager():
__metaclass__ = ABCMeta
@abstractmethod
def create_database(self, name):
raise NotImplementedError
@abstractmethod
def create_table(self, db_name, Model, table_name):
raise NotImplementedError
@abstractmethod
def drop_database(self, name):
raise NotImplementedError
class SQLiteManager(BaseDBManager):
def create_database(self, name):
from RESTful_Face_Web import settings
name = str(name)
filename = os.path.join(settings.BASE_DIR, 'db_'+name+'.sqlite3')
# tell Django there is a new database
setting_str = '''settings.DATABASES['%s'] = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '%s',
}'''% (name, filename)
exec(setting_str)
save_db_settings_to_file(setting_str, name)
# create database file
file = open(filename, 'w+')
file.close()
def create_table(self, db_name, Model, table_name):
# initialize the database file
import sqlite3
from RESTful_Face_Web import settings
filename = os.path.join(settings.BASE_DIR, 'db_'+db_name+'.sqlite3')
conn = sqlite3.connect(filename)
[conn.execute(sql) for sql in Model.generate_sqlite()]
conn.close()
log.info("Created sqlite database %s and initialize a table '%s'!"%(db_name, table_name))
def drop_database(self, name):
try:
connections.databases.pop(name)
os.remove(os.path.join(settings.DB_SETTINGS_BASE_DIR, name + '.dbconf'))
os.remove(os.path.join(settings.BASE_DIR, 'db_' + name + '.sqlite3'))
log.info("Database %s is moved!" % (name))
except FileNotFoundError:
log.warning("DB %s has already been moved!" % (name))
log.error(traceback.format_exc())
except KeyError:
log.warning("Database %s is not in use!"% (name))
class MySQLManager(BaseDBManager):
def create_database(self, name):
from RESTful_Face_Web import settings
name = str(name)
# tell Django there is a new mysql database
setting_str = '''settings.DATABASES['%s'] = {
'ENGINE': 'django.db.backends.mysql',
'USER': '%s',
'PASSWORD': '%s',
'NAME': 'company%s',
'HOST': '%s',
'PORT': '3306' ,
}'''% (name, settings.MYSQL_USER, settings.MYSQL_PASSWORD, name, settings.MYSQL_HOST)
exec(setting_str)
save_db_settings_to_file(setting_str, name)
# create database
import pymysql
conn = pymysql.connect(host=settings.MYSQL_HOST, user=settings.MYSQL_USER, password=settings.MYSQL_PASSWORD)
cursor = conn.cursor()
cursor.execute("create database company%s" % (name))
conn.commit()
conn.close()
def create_table(self, db_name, Model, table_name):
import pymysql
from RESTful_Face_Web import settings
# initializer database
conn = pymysql.connect(host=settings.MYSQL_HOST, user=settings.MYSQL_USER, password=settings.MYSQL_PASSWORD,
db='company' + db_name)
cursor = conn.cursor()
[cursor.execute(sql) for sql in Model.generate_mysql()]
conn.commit()
conn.close()
log.info("Created mysql database %s and initialize a table '%s'!" % (db_name, table_name))
def drop_database(self, name):
from RESTful_Face_Web import settings
print("dropping database, ", name)
# delete setting files
try:
connections.databases.pop(name)
os.remove(os.path.join(settings.DB_SETTINGS_BASE_DIR, name + '.dbconf'))
log.info("DB setting file %s.dbconf is moved!" % (name))
except FileNotFoundError:
log.warning("DB setting file %s.dbconf has already been moved!" % (name))
log.error(traceback.format_exc())
except KeyError:
log.warning('Database %s is not in use!' % (name))
# delete database
import pymysql
conn = pymysql.connect(host=settings.MYSQL_HOST, user=settings.MYSQL_USER, password=settings.MYSQL_PASSWORD)
cursor = conn.cursor()
cursor.execute('drop database company%s;' % (name))
conn.commit()
conn.close()
log.info("Database company%s has been dropped !" % (name))
def save_db_settings_to_file(setting_str, name):
from RESTful_Face_Web import settings
filename = os.path.join(settings.DB_SETTINGS_BASE_DIR, name+'.dbconf')
file = open(filename, 'w+')
file.write(setting_str)
file.close()
log.info('Database settings file %s.dbconf saved!'%(name))
| 34.653061 | 116 | 0.647821 |
795b522afd082ec065eaf754b6cc531182ba2051 | 9,669 | py | Python | neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron._i18n import _LE, _LW
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import manager
LOG = logging.getLogger(__name__)
class DhcpAgentNotifyAPI(object):
"""API for plugin to notify DHCP agent.
This class implements the client side of an rpc interface. The server side
is neutron.agent.dhcp.agent.DhcpAgent. For more information about changing
rpc interfaces, please see doc/source/devref/rpc_api.rst.
"""
# It seems dhcp agent does not support bulk operation
VALID_RESOURCES = ['network', 'subnet', 'port']
VALID_METHOD_NAMES = ['network.create.end',
'network.update.end',
'network.delete.end',
'subnet.create.end',
'subnet.update.end',
'subnet.delete.end',
'port.create.end',
'port.update.end',
'port.delete.end']
def __init__(self, topic=topics.DHCP_AGENT, plugin=None):
self._plugin = plugin
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
# register callbacks for router interface changes
registry.subscribe(self._after_router_interface_created,
resources.ROUTER_INTERFACE, events.AFTER_CREATE)
registry.subscribe(self._after_router_interface_deleted,
resources.ROUTER_INTERFACE, events.AFTER_DELETE)
@property
def plugin(self):
if self._plugin is None:
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
def _schedule_network(self, context, network, existing_agents):
"""Schedule the network to new agents
:return: all agents associated with the network
"""
new_agents = self.plugin.schedule_network(context, network) or []
if new_agents:
for agent in new_agents:
self._cast_message(
context, 'network_create_end',
{'network': {'id': network['id']}}, agent['host'])
elif not existing_agents:
LOG.warning(_LW('Unable to schedule network %s: no agents '
'available; will retry on subsequent port '
'and subnet creation events.'),
network['id'])
return new_agents + existing_agents
def _get_enabled_agents(self, context, network, agents, method, payload):
"""Get the list of agents who can provide services."""
if not agents:
return []
network_id = network['id']
enabled_agents = agents
if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
enabled_agents = [x for x in agents if x.admin_state_up]
active_agents = [x for x in agents if x.is_active]
len_enabled_agents = len(enabled_agents)
len_active_agents = len(active_agents)
if len_active_agents < len_enabled_agents:
LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents "
"associated with network '%(net_id)s' "
"are marked as active, so notifications "
"may be sent to inactive agents."),
{'active': len_active_agents,
'total': len_enabled_agents,
'net_id': network_id})
if not enabled_agents:
num_ports = self.plugin.get_ports_count(
context, {'network_id': [network_id]})
notification_required = (
num_ports > 0 and len(network['subnets']) >= 1)
if notification_required:
LOG.error(_LE("Will not send event %(method)s for network "
"%(net_id)s: no agent available. Payload: "
"%(payload)s"),
{'method': method,
'net_id': network_id,
'payload': payload})
return enabled_agents
def _is_reserved_dhcp_port(self, port):
return port.get('device_id') == n_const.DEVICE_ID_RESERVED_DHCP_PORT
def _notify_agents(self, context, method, payload, network_id):
"""Notify all the agents that are hosting the network."""
# fanout is required as we do not know who is "listening"
no_agents = not utils.is_extension_supported(
self.plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS)
fanout_required = method == 'network_delete_end' or no_agents
# we do nothing on network creation because we want to give the
# admin the chance to associate an agent to the network manually
cast_required = method != 'network_create_end'
if fanout_required:
self._fanout_message(context, method, payload)
elif cast_required:
admin_ctx = (context if context.is_admin else context.elevated())
network = self.plugin.get_network(admin_ctx, network_id)
agents = self.plugin.get_dhcp_agents_hosting_networks(
context, [network_id])
# schedule the network first, if needed
schedule_required = (
method == 'subnet_create_end' or
method == 'port_create_end' and
not self._is_reserved_dhcp_port(payload['port']))
if schedule_required:
agents = self._schedule_network(admin_ctx, network, agents)
if not agents:
LOG.debug("Network %s is not hosted by any dhcp agent",
network_id)
return
enabled_agents = self._get_enabled_agents(
context, network, agents, method, payload)
for agent in enabled_agents:
self._cast_message(
context, method, payload, agent.host, agent.topic)
def _cast_message(self, context, method, payload, host,
topic=topics.DHCP_AGENT):
"""Cast the payload to the dhcp agent running on the host."""
cctxt = self.client.prepare(topic=topic, server=host)
cctxt.cast(context, method, payload=payload)
def _fanout_message(self, context, method, payload):
"""Fanout the payload to all dhcp agents."""
cctxt = self.client.prepare(fanout=True)
cctxt.cast(context, method, payload=payload)
def network_removed_from_agent(self, context, network_id, host):
self._cast_message(context, 'network_delete_end',
{'network_id': network_id}, host)
def network_added_to_agent(self, context, network_id, host):
self._cast_message(context, 'network_create_end',
{'network': {'id': network_id}}, host)
def agent_updated(self, context, admin_state_up, host):
self._cast_message(context, 'agent_updated',
{'admin_state_up': admin_state_up}, host)
def _after_router_interface_created(self, resource, event, trigger,
**kwargs):
self._notify_agents(kwargs['context'], 'port_create_end',
{'port': kwargs['port']},
kwargs['port']['network_id'])
def _after_router_interface_deleted(self, resource, event, trigger,
**kwargs):
self._notify_agents(kwargs['context'], 'port_delete_end',
{'port_id': kwargs['port']['id']},
kwargs['port']['network_id'])
def notify(self, context, data, method_name):
# data is {'key' : 'value'} with only one key
if method_name not in self.VALID_METHOD_NAMES:
return
obj_type = list(data.keys())[0]
if obj_type not in self.VALID_RESOURCES:
return
obj_value = data[obj_type]
network_id = None
if obj_type == 'network' and 'id' in obj_value:
network_id = obj_value['id']
elif obj_type in ['port', 'subnet'] and 'network_id' in obj_value:
network_id = obj_value['network_id']
if not network_id:
return
method_name = method_name.replace(".", "_")
if method_name.endswith("_delete_end"):
if 'id' in obj_value:
self._notify_agents(context, method_name,
{obj_type + '_id': obj_value['id']},
network_id)
else:
self._notify_agents(context, method_name, data, network_id)
| 44.353211 | 79 | 0.600165 |
795b536ba1f9fda7e28b8e0347967f98446cd5f2 | 6,831 | py | Python | bnpy/viz/ProposalViz.py | zhaottcrystal/bnpy | 0195a0228e9e698799e52a6dfa1d051e82b43fd0 | [
"BSD-3-Clause"
] | 1 | 2019-05-14T19:56:53.000Z | 2019-05-14T19:56:53.000Z | bnpy/viz/ProposalViz.py | zhaottcrystal/bnpy | 0195a0228e9e698799e52a6dfa1d051e82b43fd0 | [
"BSD-3-Clause"
] | null | null | null | bnpy/viz/ProposalViz.py | zhaottcrystal/bnpy | 0195a0228e9e698799e52a6dfa1d051e82b43fd0 | [
"BSD-3-Clause"
] | 1 | 2020-09-01T13:21:18.000Z | 2020-09-01T13:21:18.000Z | import numpy as np
import os
import sys
import glob
from birthmove import BLogger
from viz.PlotUtil import pylab
CELL_WIDTH = 200
SQIMG_WIDTH = 200
WIDEIMG_WIDTH = 600
htmlstart = """
<html>
<style>
td.comment {
border: 0px;
width: %dpx;
text-align: center;
padding-bottom: 10px;
padding-left: 10px;
}
td.png {
border: 0px;
text-align: left;
padding-bottom: 10px;
padding-left: 10px;
}
tr {
outline: thin solid black;
}
</style>
<body>
<div align=center>
""" % (CELL_WIDTH)
htmlend = """
</div>
</body>
</html>
"""
def plotELBOtermsForProposal(
curLdict, propLdictList,
xs=None,
ymin=-0.5,
ymax=0.5,
savefilename=None,
**kwargs):
''' Create trace plot of ELBO gain/loss relative to current model.
'''
pylab.figure()
L = len(propLdictList)
if xs is None:
xs = np.arange(0, L)
legendKeys = []
for key in curLdict:
if key.count('_') == 0:
legendKeys.append(key)
for key in legendKeys:
if key.count('total'):
linewidth= 4
alpha = 1
style = '-'
else:
linewidth = 3
alpha = 0.5
style = '--'
ys = np.asarray([propLdictList[i][key] for i in range(L)])
ys -= curLdict[key]
pylab.plot(xs, ys, style,
color=_getLineColorFromELBOKey(key),
linewidth=linewidth,
alpha=alpha,
label=key)
L = L + 1
xlims = np.asarray([-0.75*L, L-0.5])
pylab.xlim(xlims)
pylab.xticks(xs)
pylab.plot(xlims, np.zeros_like(xlims), 'k:')
pylab.xlabel('num proposal steps')
pylab.ylabel('L gain (prop - current)')
pylab.legend(loc='lower left', fontsize=12)
pylab.subplots_adjust(left=0.2)
if savefilename is not None:
pylab.savefig(savefilename, pad_inches=0)
pylab.close('all')
def plotDocUsageForProposal(docUsageByUID, savefilename=None, **kwargs):
''' Make trace plot of doc usage for each component.
'''
pylab.figure()
L = 0
maxVal = 0
for k, uid in enumerate(docUsageByUID):
ys = np.asarray(docUsageByUID[uid])
xs = np.arange(0, ys.size)
if k < 6: # only a few labels fit well on a legend
pylab.plot(xs, ys, label=uid)
else:
pylab.plot(xs, ys)
L = np.maximum(L, ys.size)
maxVal = np.maximum(maxVal, ys.max())
# Use big chunk of left-hand side of plot for legend display
xlims = np.asarray([-0.75*L, L-0.5])
pylab.xlim(xlims)
pylab.xticks(np.arange(1, L))
pylab.ylim([0, 1.1*maxVal])
pylab.xlabel('num proposal steps')
pylab.ylabel('num docs using each comp')
pylab.legend(loc='upper left', fontsize=12)
pylab.subplots_adjust(left=0.2)
if savefilename is not None:
pylab.savefig(savefilename, pad_inches=0)
pylab.close('all')
def makeSingleProposalHTMLStr(DebugInfo, b_debugOutputDir='', **kwargs):
''' Create string representing complete HTML page for one proposal.
Returns
-------
s : string
hold plain-text HTML content
'''
htmlstr = htmlstart
htmlstr += "<table>"
# Row 1: original comps
htmlstr += "<tr>"
htmlstr += '<td class="comment">Original model.<br />Before proposal.</td>'
htmlstr += '<td class="png">%s</td>' % (
makeImgTag("OrigComps.png"))
htmlstr += "</tr>\n"
assert BLogger.RecentMessages is not None
# Row : status report
htmlstr += "<tr>"
htmlstr += '<td class="comment">Proposal summary:</td>'
htmlstr += '<td><pre>%s</pre></td>' % ('\n'.join(BLogger.RecentMessages))
htmlstr += "</tr>\n"
if os.path.exists(
os.path.join(b_debugOutputDir, "ProposalTrace_ELBO.png")):
htmlstr += "<tr>"
htmlstr += '<td class="comment">ELBO gain at each refine step.</td>'
htmlstr += '<td class="png">%s</td>' % (
makeImgTag("ProposalTrace_ELBO.png"))
htmlstr += "</tr>\n"
if os.path.exists(
os.path.join(b_debugOutputDir, "ProposalTrace_DocUsage.png")):
htmlstr += "<tr>"
htmlstr += '<td class="comment">Number of documents used by each' + \
" new comp at each refinement step</td>"
htmlstr += '<td class="png">%s</td>' % (
makeImgTag("ProposalTrace_DocUsage.png"))
htmlstr += "</tr>\n"
if os.path.exists(os.path.join(b_debugOutputDir, "NewComps_Init.png")):
htmlstr += "<tr>"
htmlstr += '<td class="comment">Proposed initial topics.</td>'
htmlstr += '<td class="png">%s</td>' % (
makeImgTag("NewComps_Init.png"))
htmlstr += "</tr>\n"
fnames = glob.glob(os.path.join(b_debugOutputDir,"NewComps_Step*.png"))
mnames = glob.glob(os.path.join(b_debugOutputDir,"NewComps_AfterM*.png"))
for stepID in range(len(fnames)):
basenameWithPNG = "NewComps_Step%d.png" % (stepID+1)
htmlstr += "<tr>"
htmlstr += '<td class="comment">After refinement step %d.</td>' % (
stepID+1)
htmlstr += '<td class="png">%s</td>' % (
makeImgTag(basenameWithPNG))
htmlstr += "</tr>\n"
if len(mnames) > 0 and (stepID+1) == DebugInfo['mergestep']:
basenameWithPNG = "NewComps_AfterMerge.png"
htmlstr += "<tr>"
htmlstr += '<td class="comment">After merge cleanup.</td>'
htmlstr += '<td class="png">%s</td>' % (
makeImgTag(basenameWithPNG))
htmlstr += "</tr>\n"
mnames = glob.glob(os.path.join(b_debugOutputDir,"MergeComps_*.png"))
for mergeID in range(len(mnames)):
basenameWithPNG = "MergeComps_%d.png" % (mergeID+1)
htmlstr += "<tr>"
htmlstr += '<td class="comment">Cleanup Phase: Merged Pair %d</td>' %(
mergeID+1)
htmlstr += '<td class="png">%s</td>' % (
makeImgTag(basenameWithPNG))
htmlstr += "</tr>\n"
htmlstr += "</table>"
htmlstr += htmlend
return htmlstr
def makeImgTag(imgbasename="ELBOGain"):
if imgbasename.count("Trace"):
width = SQIMG_WIDTH
else:
width = WIDEIMG_WIDTH
htmltag = "<img src=%s width=%d>" % (imgbasename, width)
return htmltag
def _getLineColorFromELBOKey(key):
''' Helper method to assign line colors by ELBO term name
Returns
-------
s : str representing a color value to matplotlib
'''
if key.count('total'):
return 'k'
elif key.count('data'):
return 'b'
elif key.count('entrop'):
return 'r'
elif key.count('alloc'):
return 'c'
else:
return 'm'
| 29.7 | 79 | 0.563168 |
795b5380472dd5d180ef97d5cb9cfc6952e5fec9 | 24,485 | py | Python | lib/sqlalchemy/testing/assertions.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | null | null | null | lib/sqlalchemy/testing/assertions.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | null | null | null | lib/sqlalchemy/testing/assertions.py | lvillis/sqlalchemy | 889d05c444264bf1b6d11386459d3360cc529d27 | [
"MIT"
] | null | null | null | # testing/assertions.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
import contextlib
from itertools import filterfalse
import re
import sys
import warnings
from . import assertsql
from . import config
from . import engines
from . import mock
from .exclusions import db_spec
from .util import fail
from .. import exc as sa_exc
from .. import schema
from .. import sql
from .. import types as sqltypes
from .. import util
from ..engine import default
from ..engine import url
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..util import decorator
def expect_warnings(*messages, **kw):
"""Context manager which expects one or more warnings.
With no arguments, squelches all SAWarning and RemovedIn20Warning emitted via
sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise
pass string expressions that will match selected warnings via regex;
all non-matching warnings are sent through.
The expect version **asserts** that the warnings were in fact seen.
Note that the test suite sets SAWarning warnings to raise exceptions.
""" # noqa
return _expect_warnings(
(sa_exc.RemovedIn20Warning, sa_exc.SAWarning), messages, **kw
)
@contextlib.contextmanager
def expect_warnings_on(db, *messages, **kw):
"""Context manager which expects one or more warnings on specific
dialects.
The expect version **asserts** that the warnings were in fact seen.
"""
spec = db_spec(db)
if isinstance(db, str) and not spec(config._current):
yield
else:
with expect_warnings(*messages, **kw):
yield
def emits_warning(*messages):
"""Decorator form of expect_warnings().
Note that emits_warning does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings(assert_=False, *messages):
return fn(*args, **kw)
return decorate
def expect_deprecated(*messages, **kw):
return _expect_warnings(sa_exc.SADeprecationWarning, messages, **kw)
def expect_deprecated_20(*messages, **kw):
return _expect_warnings(sa_exc.Base20DeprecationWarning, messages, **kw)
def emits_warning_on(db, *messages):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
Note that emits_warning_on does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_warnings_on(db, assert_=False, *messages):
return fn(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
Note that uses_deprecated does **not** assert that the warnings
were in fact seen.
"""
@decorator
def decorate(fn, *args, **kw):
with expect_deprecated(*messages, assert_=False):
return fn(*args, **kw)
return decorate
_FILTERS = None
_SEEN = None
_EXC_CLS = None
@contextlib.contextmanager
def _expect_warnings(
exc_cls,
messages,
regex=True,
assert_=True,
raise_on_any_unexpected=False,
):
global _FILTERS, _SEEN, _EXC_CLS
if regex:
filters = [re.compile(msg, re.I | re.S) for msg in messages]
else:
filters = list(messages)
if _FILTERS is not None:
# nested call; update _FILTERS and _SEEN, return. outer
# block will assert our messages
assert _SEEN is not None
assert _EXC_CLS is not None
_FILTERS.extend(filters)
_SEEN.update(filters)
_EXC_CLS += (exc_cls,)
yield
else:
seen = _SEEN = set(filters)
_FILTERS = filters
_EXC_CLS = (exc_cls,)
if raise_on_any_unexpected:
def real_warn(msg, *arg, **kw):
raise AssertionError("Got unexpected warning: %r" % msg)
else:
real_warn = warnings.warn
def our_warn(msg, *arg, **kw):
if isinstance(msg, _EXC_CLS):
exception = type(msg)
msg = str(msg)
elif arg:
exception = arg[0]
else:
exception = None
if not exception or not issubclass(exception, _EXC_CLS):
return real_warn(msg, *arg, **kw)
if not filters and not raise_on_any_unexpected:
return
for filter_ in filters:
if (regex and filter_.match(msg)) or (
not regex and filter_ == msg
):
seen.discard(filter_)
break
else:
real_warn(msg, *arg, **kw)
with mock.patch("warnings.warn", our_warn), mock.patch(
"sqlalchemy.util.SQLALCHEMY_WARN_20", True
), mock.patch("sqlalchemy.util.deprecations.SQLALCHEMY_WARN_20", True):
try:
yield
finally:
_SEEN = _FILTERS = _EXC_CLS = None
if assert_:
assert not seen, "Warnings were not seen: %s" % ", ".join(
"%r" % (s.pattern if regex else s) for s in seen
)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
_assert_no_stray_pool_connections()
def _assert_no_stray_pool_connections():
engines.testing_reaper.assert_all_closed()
def eq_regex(a, b, msg=None):
assert re.match(b, a), msg or "%r !~ %r" % (a, b)
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def le_(a, b, msg=None):
"""Assert a <= b, with repr messaging on failure."""
assert a <= b, msg or "%r != %r" % (a, b)
def is_instance_of(a, b, msg=None):
assert isinstance(a, b), msg or "%r is not an instance of %r" % (a, b)
def is_none(a, msg=None):
is_(a, None, msg=msg)
def is_not_none(a, msg=None):
is_not(a, None, msg=msg)
def is_true(a, msg=None):
is_(bool(a), True, msg=msg)
def is_false(a, msg=None):
is_(bool(a), False, msg=msg)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
# deprecated. See #5429
is_not_ = is_not
def in_(a, b, msg=None):
"""Assert a in b, with repr messaging on failure."""
assert a in b, msg or "%r not in %r" % (a, b)
def not_in(a, b, msg=None):
"""Assert a in not b, with repr messaging on failure."""
assert a not in b, msg or "%r is in %r" % (a, b)
# deprecated. See #5429
not_in_ = not_in
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a,
fragment,
)
def eq_ignore_whitespace(a, b, msg=None):
a = re.sub(r"^\s+?|\n", "", a)
a = re.sub(r" {2,}", " ", a)
b = re.sub(r"^\s+?|\n", "", b)
b = re.sub(r" {2,}", " ", b)
assert a == b, msg or "%r != %r" % (a, b)
def _assert_proper_exception_context(exception):
"""assert that any exception we're catching does not have a __context__
without a __cause__, and that __suppress_context__ is never set.
Python 3 will report nested as exceptions as "during the handling of
error X, error Y occurred". That's not what we want to do. we want
these exceptions in a cause chain.
"""
if (
exception.__context__ is not exception.__cause__
and not exception.__suppress_context__
):
assert False, (
"Exception %r was correctly raised but did not set a cause, "
"within context %r as its cause."
% (exception, exception.__context__)
)
def assert_raises(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw, check_context=True)
def assert_raises_context_ok(except_cls, callable_, *args, **kw):
return _assert_raises(except_cls, callable_, args, kw)
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
return _assert_raises(
except_cls, callable_, args, kwargs, msg=msg, check_context=True
)
def assert_raises_message_context_ok(
except_cls, msg, callable_, *args, **kwargs
):
return _assert_raises(except_cls, callable_, args, kwargs, msg=msg)
def _assert_raises(
except_cls, callable_, args, kwargs, msg=None, check_context=False
):
with _expect_raises(except_cls, msg, check_context) as ec:
callable_(*args, **kwargs)
return ec.error
class _ErrorContainer:
error = None
@contextlib.contextmanager
def _expect_raises(except_cls, msg=None, check_context=False):
ec = _ErrorContainer()
if check_context:
are_we_already_in_a_traceback = sys.exc_info()[0]
try:
yield ec
success = False
except except_cls as err:
ec.error = err
success = True
if msg is not None:
# I'm often pdbing here, and "err" above isn't
# in scope, so assign the string explicitly
error_as_string = str(err)
assert re.search(msg, error_as_string, re.UNICODE), "%r !~ %s" % (
msg,
error_as_string,
)
if check_context and not are_we_already_in_a_traceback:
_assert_proper_exception_context(err)
print(str(err).encode("utf-8"))
# it's generally a good idea to not carry traceback objects outside
# of the except: block, but in this case especially we seem to have
# hit some bug in either python 3.10.0b2 or greenlet or both which
# this seems to fix:
# https://github.com/python-greenlet/greenlet/issues/242
del ec
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def expect_raises(except_cls, check_context=True):
return _expect_raises(except_cls, check_context=check_context)
def expect_raises_message(except_cls, msg, check_context=True):
return _expect_raises(except_cls, msg=msg, check_context=check_context)
class AssertsCompiledSQL:
def assert_compile(
self,
clause,
result,
params=None,
checkparams=None,
for_executemany=False,
check_literal_execute=None,
check_post_param=None,
dialect=None,
checkpositional=None,
check_prefetch=None,
use_default_dialect=False,
allow_dialect_select=False,
supports_default_values=True,
supports_default_metavalue=True,
literal_binds=False,
render_postcompile=False,
schema_translate_map=None,
render_schema_translate=False,
default_schema_name=None,
from_linting=False,
):
if use_default_dialect:
dialect = default.DefaultDialect()
dialect.supports_default_values = supports_default_values
dialect.supports_default_metavalue = supports_default_metavalue
elif allow_dialect_select:
dialect = None
else:
if dialect is None:
dialect = getattr(self, "__dialect__", None)
if dialect is None:
dialect = config.db.dialect
elif dialect == "default":
dialect = default.DefaultDialect()
dialect.supports_default_values = supports_default_values
dialect.supports_default_metavalue = supports_default_metavalue
elif dialect == "default_enhanced":
dialect = default.StrCompileDialect()
elif isinstance(dialect, str):
dialect = url.URL.create(dialect).get_dialect()()
if default_schema_name:
dialect.default_schema_name = default_schema_name
kw = {}
compile_kwargs = {}
if schema_translate_map:
kw["schema_translate_map"] = schema_translate_map
if params is not None:
kw["column_keys"] = list(params)
if literal_binds:
compile_kwargs["literal_binds"] = True
if render_postcompile:
compile_kwargs["render_postcompile"] = True
if for_executemany:
kw["for_executemany"] = True
if render_schema_translate:
kw["render_schema_translate"] = True
if from_linting or getattr(self, "assert_from_linting", False):
kw["linting"] = sql.FROM_LINTING
from sqlalchemy import orm
if isinstance(clause, orm.Query):
stmt = clause._statement_20()
stmt._label_style = LABEL_STYLE_TABLENAME_PLUS_COL
clause = stmt
if compile_kwargs:
kw["compile_kwargs"] = compile_kwargs
class DontAccess:
def __getattribute__(self, key):
raise NotImplementedError(
"compiler accessed .statement; use "
"compiler.current_executable"
)
class CheckCompilerAccess:
def __init__(self, test_statement):
self.test_statement = test_statement
self._annotations = {}
self.supports_execution = getattr(
test_statement, "supports_execution", False
)
if self.supports_execution:
self._execution_options = test_statement._execution_options
if hasattr(test_statement, "_returning"):
self._returning = test_statement._returning
if hasattr(test_statement, "_inline"):
self._inline = test_statement._inline
if hasattr(test_statement, "_return_defaults"):
self._return_defaults = test_statement._return_defaults
@property
def _variant_mapping(self):
return self.test_statement._variant_mapping
def _default_dialect(self):
return self.test_statement._default_dialect()
def compile(self, dialect, **kw):
return self.test_statement.compile.__func__(
self, dialect=dialect, **kw
)
def _compiler(self, dialect, **kw):
return self.test_statement._compiler.__func__(
self, dialect, **kw
)
def _compiler_dispatch(self, compiler, **kwargs):
if hasattr(compiler, "statement"):
with mock.patch.object(
compiler, "statement", DontAccess()
):
return self.test_statement._compiler_dispatch(
compiler, **kwargs
)
else:
return self.test_statement._compiler_dispatch(
compiler, **kwargs
)
# no construct can assume it's the "top level" construct in all cases
# as anything can be nested. ensure constructs don't assume they
# are the "self.statement" element
c = CheckCompilerAccess(clause).compile(dialect=dialect, **kw)
if isinstance(clause, sqltypes.TypeEngine):
cache_key_no_warnings = clause._static_cache_key
if cache_key_no_warnings:
hash(cache_key_no_warnings)
else:
cache_key_no_warnings = clause._generate_cache_key()
if cache_key_no_warnings:
hash(cache_key_no_warnings[0])
param_str = repr(getattr(c, "params", {}))
param_str = param_str.encode("utf-8").decode("ascii", "ignore")
print(("\nSQL String:\n" + str(c) + param_str).encode("utf-8"))
cc = re.sub(r"[\n\t]", "", str(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
if check_prefetch is not None:
eq_(c.prefetch, check_prefetch)
if check_literal_execute is not None:
eq_(
{
c.bind_names[b]: b.effective_value
for b in c.literal_execute_params
},
check_literal_execute,
)
if check_post_param is not None:
eq_(
{
c.bind_names[b]: b.effective_value
for b in c.post_compile_params
},
check_post_param,
)
class ComparesTables:
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert isinstance(reflected_c.type, type(c.type)), msg % (
reflected_c.type,
c.type,
)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
{f.column.name for f in c.foreign_keys},
{f.column.name for f in reflected_c.foreign_keys},
)
if c.server_default:
assert isinstance(
reflected_c.server_default, schema.FetchedValue
)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(
c2.type
), "On column %r, type '%s' doesn't correspond to type '%s'" % (
c1.name,
c1.type,
c2.type,
)
class AssertsExecutionResults:
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list_):
self.assert_(
len(result) == len(list_),
"result list is not the same size as test list, "
+ "for class "
+ class_.__name__,
)
for i in range(0, len(list_)):
self.assert_row(class_, result[i], list_[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(
rowobj.__class__ is class_, "item class is not " + repr(class_)
)
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(
getattr(rowobj, key) == value,
"attribute %s value %s does not match %s"
% (key, getattr(rowobj, key), value),
)
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = {immutabledict(e) for e in expected}
for wrong in filterfalse(lambda o: isinstance(o, cls), found):
fail(
'Unexpected type "%s", expected "%s"'
% (type(wrong).__name__, cls.__name__)
)
if len(found) != len(expected):
fail(
'Unexpected object count "%s", expected "%s"'
% (len(found), len(expected))
)
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1]
)
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found."
% (cls.__name__, repr(expected_item))
)
return True
def sql_execution_asserter(self, db=None):
if db is None:
from . import db as db
return assertsql.assert_engine(db)
def assert_sql_execution(self, db, callable_, *rules):
with self.sql_execution_asserter(db) as asserter:
result = callable_()
asserter.assert_(*rules)
return result
def assert_sql(self, db, callable_, rules):
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(
*[assertsql.CompiledSQL(k, v) for k, v in rule.items()]
)
else:
newrule = assertsql.CompiledSQL(*rule)
newrules.append(newrule)
return self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count)
)
def assert_multiple_sql_count(self, dbs, callable_, counts):
recs = [
(self.sql_execution_asserter(db), db, count)
for (db, count) in zip(dbs, counts)
]
asserters = []
for ctx, db, count in recs:
asserters.append(ctx.__enter__())
try:
return callable_()
finally:
for asserter, (ctx, db, count) in zip(asserters, recs):
ctx.__exit__(None, None, None)
asserter.assert_(assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, db, *rules):
with self.sql_execution_asserter(db) as asserter:
yield
asserter.assert_(*rules)
def assert_statement_count(self, db, count):
return self.assert_execution(db, assertsql.CountStatements(count))
| 31.391026 | 81 | 0.592526 |
795b53c65f620b1767c05c6b1c48a7f42790874a | 1,993 | py | Python | tests/test_config.py | infrascloudy/acme | 5e0ffe29f8fc8008662f511de3e6cfaf9049b74e | [
"MIT"
] | null | null | null | tests/test_config.py | infrascloudy/acme | 5e0ffe29f8fc8008662f511de3e6cfaf9049b74e | [
"MIT"
] | null | null | null | tests/test_config.py | infrascloudy/acme | 5e0ffe29f8fc8008662f511de3e6cfaf9049b74e | [
"MIT"
] | null | null | null | from unittest import TestCase
import os
import sys
import ConfigParser
import cStringIO
import tempfile
import shutil
import acme
from acme import metadata
__author__ = metadata.authors
__copyright__ = metadata.copyright
__license__ = metadata.license
def setenv(name, value):
os.environ[name] = value
def unsetenv(name):
os.environ[name] = ''
class TestConfigFilenames(TestCase):
def test_get_config_filenames_no_env(self):
pass
def test_get_config_filenames_with_XDG_CONFIG_HOME_set(self):
pass
def test_get_config_filenames_with_XDG_CONFIG_DIRS_set(self):
pass
def test_get_config_filenames_with_XDG_vars_set(self):
pass
ANSIBLE_DEFAULTS = {'ansible_managed':
'This file is managed remotely, all changes will be lost'}
class TestReadConfig(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _make_configfile(self, dir, sect, *data):
pass
def _read_config(self, project_dir):
pass
def test_read_config_files_simple(self):
pass
def test_read_config_files_precedence(self):
pass
def test_read_config_files_with_project_root(self):
pass
def test_read_config_files_with_project_root_precedence(self):
pass
class TestReadConfig2(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _make_configfile(self, dir, sect, *data):
pass
def _read_config(self, project_dir):
pass
def test_defaults(self):
pass
def test_read_config_files_simple(self):
pass
class TestReadConfigDefaultsForPlattforms(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_defaults_linux(self):
pass
def test_defaults_windows_without_APPDATA(self):
pass
def test_defaults_windows_with_APPDATA(self):
pass
def test_defaults_os_x(self):
pass
| 18.453704 | 78 | 0.687908 |
795b54755163de077676dd4aba3eb5a3457ea329 | 4,677 | py | Python | tests/snappy_pipeline/workflows/test_workflows_somatic_msi_calling.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 5 | 2021-02-26T10:39:56.000Z | 2021-12-23T07:53:26.000Z | tests/snappy_pipeline/workflows/test_workflows_somatic_msi_calling.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 93 | 2021-02-22T11:23:59.000Z | 2022-03-31T09:58:39.000Z | tests/snappy_pipeline/workflows/test_workflows_somatic_msi_calling.py | PotatoThrone/snappy-pipeline | 31200eba84bff8e459e9e210d6d95e2984627f5c | [
"MIT"
] | 3 | 2021-02-22T11:44:59.000Z | 2021-06-21T19:33:53.000Z | # -*- coding: utf-8 -*-
"""Tests for the somatic_msi_calling workflow module code"""
import textwrap
import pytest
import ruamel.yaml as yaml
from snakemake.io import Wildcards
from snappy_pipeline.workflows.somatic_msi_calling import SomaticMsiCallingWorkflow
from .conftest import patch_module_fs
__author__ = "Clemens Messerschmidt"
@pytest.fixture(scope="module") # otherwise: performance issues
def minimal_config():
"""Return YAML parsing result for configuration"""
return yaml.round_trip_load(
textwrap.dedent(
r"""
static_data_config:
reference:
path: /path/to/ref.fa
step_config:
ngs_mapping:
tools:
dna: ['bwa']
bwa:
path_index: /path/to/bwa/index.fasta
somatic_msi_calling:
tools: ['mantis']
path_ngs_mapping: ../ngs_mapping # REQUIRED
loci_bed: /path/to/hg19/loci.bed # REQUIRED
data_sets:
first_batch:
file: sheet.tsv
search_patterns:
- {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'}
search_paths: ['/path']
type: matched_cancer
naming_scheme: only_secondary_id
"""
).lstrip()
)
@pytest.fixture
def somatic_msi_calling_workflow(
dummy_workflow,
minimal_config,
dummy_cluster_config,
config_lookup_paths,
work_dir,
config_paths,
cancer_sheet_fake_fs,
mocker,
):
"""Return SomaticMsiCallingWorkflow object pre-configured with cancer sheet"""
# Patch out file-system related things in abstract (the crawling link in step is defined there)
patch_module_fs("snappy_pipeline.workflows.abstract", cancer_sheet_fake_fs, mocker)
dummy_workflow.globals = {"ngs_mapping": lambda x: "NGS_MAPPING/" + x}
# Construct the workflow object
return SomaticMsiCallingWorkflow(
dummy_workflow,
minimal_config,
dummy_cluster_config,
config_lookup_paths,
config_paths,
work_dir,
)
# Tests for FeatureCountsStepPart ------------------------------------------------------------------
def test_mantis_step_part_get_input_files(somatic_msi_calling_workflow):
wildcards = Wildcards(fromdict={"library_name": "P001-T1-DNA1-WGS1", "mapper": "bwa"})
expected = {
"normal_bai": "NGS_MAPPING/output/bwa.P001-N1-DNA1-WGS1/out/bwa.P001-N1-DNA1-WGS1.bam.bai",
"normal_bam": "NGS_MAPPING/output/bwa.P001-N1-DNA1-WGS1/out/bwa.P001-N1-DNA1-WGS1.bam",
"tumor_bai": "NGS_MAPPING/output/bwa.P001-T1-DNA1-WGS1/out/bwa.P001-T1-DNA1-WGS1.bam.bai",
"tumor_bam": "NGS_MAPPING/output/bwa.P001-T1-DNA1-WGS1/out/bwa.P001-T1-DNA1-WGS1.bam",
}
assert somatic_msi_calling_workflow.get_input_files("mantis", "run")(wildcards) == expected
def test_mantis_step_part_get_output_files(somatic_msi_calling_workflow):
# Define expected
base_name_out = "work/mantis.{mapper}.{library_name}/out/mantis.{mapper}.{library_name}_results"
expected = {
"result": base_name_out + ".txt",
"status": base_name_out + ".txt.status",
}
# Get actual
actual = somatic_msi_calling_workflow.get_output_files("mantis", "run")
assert actual == expected
def test_mantis_step_part_get_log_file(somatic_msi_calling_workflow):
expected = "work/mantis.{mapper}.{library_name}/log/snakemake.mantis_run.log"
assert somatic_msi_calling_workflow.get_log_file("mantis", "run") == expected
# Tests for SomaticMsiCallingWorkflow --------------------------------------------------------------
def test_somatic_msi_calling_workflow(somatic_msi_calling_workflow):
"""Test simple functionality of the workflow"""
# Check created sub steps
expected = ["link_out", "mantis"]
assert list(sorted(somatic_msi_calling_workflow.sub_steps.keys())) == expected
# Check result file construction
expected = [
"output/mantis.bwa.P001-T1-DNA1-WGS1/out/mantis.bwa.P001-T1-DNA1-WGS1_results.txt",
"output/mantis.bwa.P001-T1-DNA1-WGS1/out/mantis.bwa.P001-T1-DNA1-WGS1_results.txt.status",
"output/mantis.bwa.P002-T1-DNA1-WGS1/out/mantis.bwa.P002-T1-DNA1-WGS1_results.txt",
"output/mantis.bwa.P002-T1-DNA1-WGS1/out/mantis.bwa.P002-T1-DNA1-WGS1_results.txt.status",
"output/mantis.bwa.P002-T2-DNA1-WGS1/out/mantis.bwa.P002-T2-DNA1-WGS1_results.txt",
"output/mantis.bwa.P002-T2-DNA1-WGS1/out/mantis.bwa.P002-T2-DNA1-WGS1_results.txt.status",
]
actual = set(somatic_msi_calling_workflow.get_result_files())
expected = set(expected)
assert actual == expected
| 36.255814 | 100 | 0.668805 |
795b548761b1c07cb92bc8835aaaa00ac9a7d887 | 12,529 | py | Python | artemis/core/algo.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 4 | 2020-02-29T15:02:05.000Z | 2021-05-13T18:50:58.000Z | artemis/core/algo.py | artemis-analytics/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 25 | 2020-02-25T19:29:21.000Z | 2020-04-03T15:06:59.000Z | artemis/core/algo.py | ryanmwhitephd/artemis | 3e1eebdd4628145ee7d8923567b5e6f53a2e5244 | [
"Apache-2.0"
] | 2 | 2021-08-12T09:40:51.000Z | 2021-08-12T09:42:09.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © Her Majesty the Queen in Right of Canada, as represented
# by the Minister of Statistics Canada, 2019.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithms
"""
from collections import OrderedDict
import importlib
from pprint import pformat
from artemis.logger import Logger
from artemis.core.abcalgo import AbcAlgoBase
from artemis.core.properties import Properties
from artemis.core.gate import ArtemisGateSvc
from artemis.io.protobuf.configuration_pb2 import Module as Algo_pb
from artemis.core.gate import IOMetaMixin, MetaMixin
# TODO Create an interface class to AlgoBase to expose the run,
# finalize methods to framework
# Interface IAlgoBase class to expose the methods to the framework
# (apparently, I should not write a framework, see Fluent Python ...
# I am bored but probably getting paid)
# Concrete implementation of interface with AlgoBase
# Concrete base class provides the mixins or other ABCs
# Likely we want to provide the Job class instance to retrieve
# job.histbook
# job.timers
# job.objectstore
# Inherited classes for user-defined methods MyAlgo
class AlgoBase(MetaMixin, metaclass=AbcAlgoBase):
def __init__(self, name, **kwargs):
"""
Access the Base logger directly through
self.__logger
Derived class use the classmethods for info, debug, warn, error
All formatting, loglevel checks, etc...
can be done through the classmethods
Can we use staticmethods in artemis to make uniform
formatting of info, debug, warn, error?
"""
# Configure logging
Logger.configure(self, **kwargs)
self.__logger.debug("__init__ AlgoBase")
# name will be mangled to _AlgoBase__name
self.__name = name
self.properties = Properties()
for key in kwargs:
self.properties.add_property(key, kwargs[key])
self.gate = ArtemisGateSvc()
def __init_subclass__(cls, **kwargs):
"""
See PEP 487
Essentially acts as a class method decorator
"""
super().__init_subclass__(**kwargs)
@property
def name(self):
"""
Algorithm name
"""
return self.__name
@staticmethod
def load(logger, **kwargs):
"""
Returns the class instance from a dictionary
"""
logger.info("Loading Algo %s" % kwargs["name"])
try:
module = importlib.import_module(kwargs["module"])
except ImportError:
logger.error("Unable to load module %s" % kwargs["module"])
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, kwargs["class"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], kwargs["class"]))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
logger.debug(pformat(kwargs["properties"]))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in kwargs["properties"]:
kwargs["properties"]["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(kwargs["name"], **kwargs["properties"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def to_dict(self):
"""
Create json-serialize class
to create the algorithm from all properties
name - instance name as found in menu
module - where the class algo resides
class - concrete class name
properties - all the user-defined properties
"""
_dict = OrderedDict()
_dict["name"] = self.name
_dict["class"] = self.__class__.__name__
_dict["module"] = self.__module__
_dict["properties"] = self.properties.to_dict()
return _dict
def to_msg(self):
message = Algo_pb()
message.name = self.name
message.klass = self.__class__.__name__
message.module = self.__module__
message.properties.CopyFrom(self.properties.to_msg())
return message
@staticmethod
def from_msg(logger, msg):
logger.info("Loading Algo from msg %s", msg.name)
try:
module = importlib.import_module(msg.module)
except ImportError:
logger.error("Unable to load module %s", msg.module)
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, msg.klass)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, msg.klass))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
properties = Properties.from_msg(msg.properties)
logger.debug(pformat(properties))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in properties:
properties["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(msg.name, **properties)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def lock(self):
"""
Lock all properties for algorithm
"""
self.properties.lock = True
def initialize(self):
"""
Framework initialize
"""
raise NotImplementedError
def book(self):
"""
Book histograms
"""
raise NotImplementedError
def rebook(self):
"""
Rebook with new binnings
"""
raise NotImplementedError
def execute(self, payload):
"""
Algo always accepts the output Node on a graph
Data is accessed via the Parent.payload
"""
raise NotImplementedError
def finalize(self):
"""
report timings, counters, etc..
"""
raise NotImplementedError
class IOAlgoBase(MetaMixin, IOMetaMixin, metaclass=AbcAlgoBase):
def __init__(self, name, **kwargs):
"""
Access the Base logger directly through
self.__logger
Derived class use the classmethods for info, debug, warn, error
All formatting, loglevel checks, etc...
can be done through the classmethods
Can we use staticmethods in artemis to make uniform
formatting of info, debug, warn, error?
"""
# Configure logging
Logger.configure(self, **kwargs)
self.__logger.debug("__init__ AlgoBase")
# name will be mangled to _AlgoBase__name
self.__name = name
self.properties = Properties()
for key in kwargs:
self.properties.add_property(key, kwargs[key])
self.gate = ArtemisGateSvc()
@property
def name(self):
"""
Algorithm name
"""
return self.__name
@staticmethod
def load(logger, **kwargs):
"""
Returns the class instance from a dictionary
"""
logger.info("Loading Algo %s" % kwargs["name"])
try:
module = importlib.import_module(kwargs["module"])
except ImportError:
logger.error("Unable to load module %s" % kwargs["module"])
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, kwargs["class"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], kwargs["class"]))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
logger.debug(pformat(kwargs["properties"]))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in kwargs["properties"]:
kwargs["properties"]["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(kwargs["name"], **kwargs["properties"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def to_dict(self):
"""
Create json-serialize class
to create the algorithm from all properties
name - instance name as found in menu
module - where the class algo resides
class - concrete class name
properties - all the user-defined properties
"""
_dict = OrderedDict()
_dict["name"] = self.name
_dict["class"] = self.__class__.__name__
_dict["module"] = self.__module__
_dict["properties"] = self.properties.to_dict()
return _dict
def to_msg(self):
message = Algo_pb()
message.name = self.name
message.klass = self.__class__.__name__
message.module = self.__module__
message.properties.CopyFrom(self.properties.to_msg())
return message
@staticmethod
def from_msg(logger, msg):
logger.info("Loading Algo from msg %s", msg.name)
try:
module = importlib.import_module(msg.module)
except ImportError:
logger.error("Unable to load module %s", msg.module)
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, msg.klass)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, msg.klass))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
properties = Properties.from_msg(msg.properties)
logger.debug(pformat(properties))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in properties:
properties["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(msg.name, **properties)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def lock(self):
"""
Lock all properties for algorithm
"""
self.properties.lock = True
def initialize(self):
"""
Framework initialize
"""
raise NotImplementedError
def book(self):
"""
Book histograms
"""
raise NotImplementedError
def rebook(self):
"""
Rebook with new binnings
"""
raise NotImplementedError
def execute(self, payload):
"""
Algo always accepts the output Node on a graph
Data is accessed via the Parent.payload
"""
raise NotImplementedError
def finalize(self):
"""
report timings, counters, etc..
"""
raise NotImplementedError
| 30.708333 | 88 | 0.600447 |
795b54b528f702318074d84677f9fed5ea4117d0 | 526 | py | Python | ci_testbed/project_template/leetlib/checkVersion.py | monsdar/docker-testbed | 618a1d69b857ec9afed0dbb6849a4cd4f4140115 | [
"MIT"
] | null | null | null | ci_testbed/project_template/leetlib/checkVersion.py | monsdar/docker-testbed | 618a1d69b857ec9afed0dbb6849a4cd4f4140115 | [
"MIT"
] | null | null | null | ci_testbed/project_template/leetlib/checkVersion.py | monsdar/docker-testbed | 618a1d69b857ec9afed0dbb6849a4cd4f4140115 | [
"MIT"
] | null | null | null |
import re
import subprocess
import sys
INSPECTCMD = "conan inspect conanfile.py -a version" #TODO: Use --raw (or whatever #3913 comes up with)
SEMVER_SHORT_REGEX = re.compile("[0-9]*\.[0-9]*\.[0-9]*")
SEMVER_LONG_REGEX = re.compile("[0-9]*\.[0-9]*\.[0-9]*-[a-zA-Z0-9]*")
output = subprocess.check_output(INSPECTCMD, shell=True).decode("utf-8")
if not (SEMVER_SHORT_REGEX.search(output) or SEMVER_LONG_REGEX.search(output)):
print("ERROR: No valid SemVer found")
sys.exit(1)
print("Valid SemVer found: " + output)
| 30.941176 | 103 | 0.695817 |
795b554bca5a81e6db64269c6ec43391198555c5 | 2,299 | py | Python | pixivpy_async/client.py | LXY1226/pixivpy-async | c417cd838d198ce5c360ea63a6248844dbecd542 | [
"Unlicense"
] | 106 | 2019-09-15T02:58:37.000Z | 2022-03-22T00:52:05.000Z | pixivpy_async/client.py | ethpch/pixivpy-async | 0df6755c4167d30ce189c4c4385e1de80b72dcf4 | [
"Unlicense"
] | 23 | 2020-01-07T23:29:11.000Z | 2022-03-02T11:42:05.000Z | pixivpy_async/client.py | ethpch/pixivpy-async | 0df6755c4167d30ce189c4c4385e1de80b72dcf4 | [
"Unlicense"
] | 21 | 2019-10-29T19:02:50.000Z | 2022-03-01T01:09:59.000Z | import asyncio
import aiohttp
class PixivClient:
def __init__(self, limit=30, timeout=10, env=False, internal=False, proxy=None, bypass=False):
"""
When 'env' is True and 'proxy' is None, possible proxies will be
obtained automatically (wrong proxy may be obtained).
When 'proxy' is not None, it will force the proxy to be used and
'env' will have no effect.
proxy <str> is used for a single proxy with a url:
'socks5://user:password@127.0.0.1:1080'
If you want to use proxy chaining, read https://github.com/romis2012/aiohttp-socks.
"""
kwargs = {'limit_per_host': limit}
if bypass:
import ssl
from .bypass_sni import ByPassResolver
ssl_ctx = ssl.SSLContext()
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
kwargs.update({'ssl': ssl_ctx, 'resolver': ByPassResolver()})
if proxy:
try:
from aiohttp_socks import ProxyConnector
self.conn = ProxyConnector.from_url(proxy, **kwargs)
_flag = False
except ModuleNotFoundError as e:
if proxy.startswith('socks'):
raise e
else:
self.conn = aiohttp.TCPConnector(**kwargs)
_flag = True
else:
self.conn = aiohttp.TCPConnector(**kwargs)
self.internal = internal
self.client = aiohttp.ClientSession(
connector=self.conn,
timeout=aiohttp.ClientTimeout(total=timeout),
trust_env=env,
)
if proxy and _flag:
from functools import partial
self.client.head = partial(self.client.head, proxy=proxy)
self.client.get = partial(self.client.get, proxy=proxy)
self.client.post = partial(self.client.post, proxy=proxy)
def start(self):
return self.client
async def close(self):
await asyncio.sleep(0)
await self.client.close()
async def __aenter__(self):
return self.client
async def __aexit__(self, exc_type, exc, tb):
await asyncio.sleep(0)
await self.client.close()
| 31.067568 | 98 | 0.575033 |
795b55e771d5e833b8c0b9adcaeb971a0a3363ab | 4,898 | py | Python | tests/apollo/test_skvbc_auto_view_change.py | evdzhurov/concord-bft | 2e4fdabe0228b51d4d43398158e97d4e36ff974a | [
"Apache-2.0"
] | null | null | null | tests/apollo/test_skvbc_auto_view_change.py | evdzhurov/concord-bft | 2e4fdabe0228b51d4d43398158e97d4e36ff974a | [
"Apache-2.0"
] | null | null | null | tests/apollo/test_skvbc_auto_view_change.py | evdzhurov/concord-bft | 2e4fdabe0228b51d4d43398158e97d4e36ff974a | [
"Apache-2.0"
] | null | null | null | # Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import os.path
import random
import unittest
from os import environ
from util.test_base import ApolloTest
from util import skvbc as kvbc
from util.bft import with_trio, with_bft_network, KEY_FILE_PREFIX
from util.skvbc_history_tracker import verify_linearizability
def start_replica_cmd(builddir, replica_id):
"""
Return a command that starts an skvbc replica when passed to
subprocess.Popen.
Note each arguments is an element in a list.
"""
statusTimerMilli = "500"
viewChangeTimeoutMilli = "1000"
autoPrimaryRotationTimeoutMilli = "5000"
path = os.path.join(builddir, "tests", "simpleKVBC", "TesterReplica", "skvbc_replica")
return [path,
"-k", KEY_FILE_PREFIX,
"-i", str(replica_id),
"-s", statusTimerMilli,
"-v", viewChangeTimeoutMilli,
"-a", autoPrimaryRotationTimeoutMilli
]
class SkvbcAutoViewChangeTest(ApolloTest):
@with_trio
@with_bft_network(start_replica_cmd)
@verify_linearizability()
async def test_auto_vc_all_nodes_up_no_requests(self, bft_network, tracker):
"""
This test aims to validate automatic view change
in the absence of any client messages:
1) Start a full BFT network
2) Do nothing (wait for automatic view change to kick-in)
3) Check that view change has occurred (necessarily, automatic view change)
4) Perform a "read-your-writes" check in the new view
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
initial_primary = 0
# do nothing - just wait for an automatic view change
await bft_network.wait_for_view(
replica_id=random.choice(
bft_network.all_replicas(without={initial_primary})),
expected=lambda v: v > initial_primary,
err_msg="Make sure automatic view change has occurred."
)
await skvbc.read_your_writes()
@with_trio
@with_bft_network(start_replica_cmd)
@verify_linearizability()
async def test_auto_vc_when_primary_down(self, bft_network, tracker):
"""
This test aims to validate automatic view change
when the primary is down
1) Start a full BFT network
2) Stop the initial primary replica
3) Do nothing (wait for automatic view change to kick-in)
4) Check that view change has occurred (necessarily, automatic view change)
5) Perform a "read-your-writes" check in the new view
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
initial_primary = 0
bft_network.stop_replica(initial_primary)
# do nothing - just wait for an automatic view change
await bft_network.wait_for_view(
replica_id=random.choice(
bft_network.all_replicas(without={initial_primary})),
expected=lambda v: v > initial_primary,
err_msg="Make sure automatic view change has occurred."
)
await skvbc.read_your_writes()
@unittest.skip("Unstable because of BC-5101")
@with_trio
@with_bft_network(start_replica_cmd)
@verify_linearizability()
async def test_auto_vc_all_nodes_up_fast_path(self, bft_network, tracker):
"""
This test aims to validate automatic view change
while messages are being processed on the fast path
1) Start a full BFT network
2) Send a batch of write commands
3) Make sure view change occurred at some point while processing the writes
4) Check that all writes have been processed on the fast commit path
5) Perform a "read-your-writes" check in the new view
"""
bft_network.start_all_replicas()
skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)
initial_primary = 0
for _ in range(150):
await skvbc.send_write_kv_set()
await bft_network.wait_for_view(
replica_id=random.choice(
bft_network.all_replicas(without={initial_primary})),
expected=lambda v: v > initial_primary,
err_msg="Make sure automatic view change has occurred."
)
await skvbc.assert_kv_write_executed(key, val)
await bft_network.assert_fast_path_prevalent()
await skvbc.read_your_writes()
| 37.106061 | 90 | 0.679869 |
795b565ebc3e8bb41454c6c140a653fbd5978669 | 4,520 | py | Python | code/save_specific/covid19_data.py | ajoer/Newswork-on-Wikipedia | cba1aeffade8cccb0782c856128719ca904a6779 | [
"MIT"
] | 1 | 2020-02-21T10:14:04.000Z | 2020-02-21T10:14:04.000Z | code/save_specific/covid19_data.py | ankajor/Newswork-on-Wikipedia | cba1aeffade8cccb0782c856128719ca904a6779 | [
"MIT"
] | null | null | null | code/save_specific/covid19_data.py | ankajor/Newswork-on-Wikipedia | cba1aeffade8cccb0782c856128719ca904a6779 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Calls the WikiRevParser and extracts information per revision.
This file does *not* save the entire revisions, but rather specific informations per revision:
- timestamp
- size of content
- editor (name or IP adress)
- edit type (e.g. content or style edit)
Output: JSON file where each entry is a revision.
"""
import argparse
import os
import re
import revision_analysis
import string
import time
import utils_io as uio
import utils_visualization as uviz
from collections import Counter, defaultdict, OrderedDict
from WikiRevParser.wikirevparser import wikirevparser
parser = argparse.ArgumentParser(description='''Extracts specific information per revision of a Wikipedia page. To overcome data storage issues, the revision histories are not saved, only the extracted information. Used for COVID19 analysis''')
parser.add_argument("event", help="e.g. 'covid19'.")
parser.add_argument("--language", help="e.g. 'nl' (for debugging).")
parser.add_argument("--check_os", default="y")
args = parser.parse_args()
def get_language_titles():
""" Extract language and title from input file. """
language_titles = {}
input_file = open("resources/events/%s.tsv" % args.event).readlines()
for line in sorted(input_file):
try:
language, title = line.split('\t')[0], line.split('\t')[1].strip()
except IndexError:
language, title = line.split(',')[0], line.split(',')[1].strip()
if args.language:
if language != args.language: continue
if language == "lang": continue
if language.startswith("%"): continue # languages with % in front of them can't be scraped.
language_titles[language] = title
return language_titles
def get_revisions(language, title):
""" Extract and parse Wikipedia revision history for given language and title. """
time.sleep(3)
parser_instance = wikirevparser.ProcessRevisions(language, title)
page = parser_instance.wikipedia_page()
if page == None: return None
revisions = parser_instance.parse_revisions()
if revisions == None: return None
return revisions
def determine_edit_type(values, previous_values):
""" Determine whether an edit is editorial (0) or content (1).
This is dependent on whether there are substantial additions in content volume.
It is only a content contribution if:
1) both text and links/urls are added (not text in isolation)
2) images are added.
Editorial counts:
- words only
- categories
- any deletions
"""
changes = {key: values[key] - previous_values.get(key, 0) for key in values}
if changes["images"] > 0:
return "content"
elif changes["words"] > 0 and changes["links"]+changes["urls"] > 0:
return "content"
else:
return "editorial"
def get_values(revision):
""" Get the values to determine edit type (editorial or content). """
values = {
"words": len([w for w in revision["content"].split() if w not in string.punctuation]),
"images": len(revision["images"]),
"links": len(revision["links"]),
"urls": len(revision["urls"]),
#"sections": len(revision["sections"]),
"categories": len(revision["categories"])
}
return values
def main():
"""
Get revision histories and use the size changes of the different elements to determine edit type.
Output dictionary where each key is a timestamp and the value a dictionary with the following:
1) size of content
2) wikipedian
3) edit type
"""
language_titles = get_language_titles()
for language in language_titles.keys():
title = language_titles[language]
if args.check_os == "y":
if os.path.isfile("data/%s/%s.json" % (args.event, language)):
print("%s has already been processed, moving on..." % language)
continue
print("\nLanguage:\t", language)
print("Title:\t\t", title)
revisions = get_revisions(language, title)
if revisions is None: continue
timestamps = list(revisions.keys())
timestamps.reverse()
output_dict = OrderedDict()
previous_values = {
"words": 0,
"images": 0,
"links": 0,
"urls": 0,
"categories": 0
}
for n,timestamp in enumerate(timestamps):
values = get_values(revisions[timestamp])
timestamp_output = {}
timestamp_output["wikipedian"] = revisions[timestamp]["user"]
timestamp_output["words"] = values["words"]
timestamp_output["edit_type"] = determine_edit_type(values, previous_values)
previous_values = values
output_dict[timestamp] = timestamp_output
uio.save_to_json("%s/" % args.event, language, output_dict)
if __name__ == "__main__":
main()
| 29.542484 | 244 | 0.715487 |
795b569a8dce9b5452193d865310227756c358cd | 9,101 | py | Python | server/api/resources/source/warehouse/support/discover.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 5 | 2019-02-27T03:06:02.000Z | 2021-11-15T20:12:50.000Z | server/api/resources/source/warehouse/support/discover.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 48 | 2019-02-14T21:15:18.000Z | 2021-10-02T01:18:49.000Z | server/api/resources/source/warehouse/support/discover.py | nwfsc-fram/Warehouse | c1a06ab7bacd6c15ab5fe2bb6076e3ea3c95757d | [
"BSD-3-Clause"
] | 1 | 2021-03-22T23:47:54.000Z | 2021-03-22T23:47:54.000Z | """
Module providing Data Access Object api to discover Warehouse metadata.
Metadata describes the structure and contents of the datawarehouse, enabling
access to the contained data.
Copyright (C) 2016 ERT Inc.
"""
from api.resources.source.warehouse.support.dto import (
association
,table
)
from api.resources.source.warehouse.support import dto_util
SCHEMA = 'dw'
# Name of the schema containing the Warehouse tables
def lookup_tables( table_names, table_type='fact', lookup_type='dimension'
, db_url=None, connection_func=None):
"""
Utility function,returning table dictionaries associated with named tables.
Keyword Parameters:
table_names -- A collection of Strings representing tables for which
lists of associated tables are to be retrieved.
db_url -- String, representing a SQLAlchemy connection (Required, if
parameter 'connection' is not provided.
connection_func -- function returning SQLAlchemy connections
(Optional, if provided, will override db_url)
Exceptions:
ConnectionMissingArguments -- raised if neither connection or db_url
parameter is specified.
>>> any_list = ['any_thing']
>>> lookup_tables( any_list)
Traceback (most recent call last):
...
api.resources.source.warehouse.support.dto_util.ConnectionMissingArgument
"""
# get db connection
connection = dto_util.get_connection(db_url, connection_func)
# select table info
select_statement = """
SELECT
t_base.table_name as "table" --fact table name
--,c.conkey
,a_base.attname as "table_field" --fact table field containing keys to be looked up
,t_ref.table_schema as "ref_table_schema" --schema of referenced dimension table
,t_ref.table_name as "ref_table" --referenced dimension table name
--,c.confkey
,a_ref.attname as "ref_table_field" --dimension column containing the keys
,pg_catalog.pg_get_constraintdef(c.oid, true) as condef --pretty constraint text
FROM pg_catalog.pg_constraint c
inner join information_schema.tables t_base
on c.conrelid = (t_base.table_schema||'."'||t_base.table_name||'"')::regclass
inner join pg_attribute a_base
on c.conrelid = a_base.attrelid
AND a_base.attnum = ANY(c.conkey)
inner join information_schema.tables t_ref
on c.confrelid = (t_ref.table_schema||'."'||t_ref.table_name||'"')::regclass
inner join pg_attribute a_ref
on c.confrelid = a_ref.attrelid
AND a_ref.attnum = ANY(c.confkey)
WHERE c.contype = 'f' --Get only FOREIGN key constraints
and t_base.table_name = %s
"""
try:
# build list of table dicts
tables = []
for name in table_names:
result = connection.execute( select_statement, name)
ref_table_encountered = [] #track each referenced table we add
for row in result:
ref_table = row['ref_table']
if ref_table not in ref_table_encountered:
new_table = {'name': ref_table, 'type': lookup_type
,'updated':None, 'rows':None, 'years':None
,'project': None, 'contact': None
}
table.validate( new_table)
tables.append( new_table)
ref_table_encountered.append( ref_table) #only build 1x dict ea
# check for Dimensional aliases (Roles)
table_associations = lookup_associations(
table_names
,db_url
,connection_func=connection_func
,lookup_roles=False)
roles_tuple = dto_util.get_roles( table_associations)
role_tables, replacement_associations, role_associations = roles_tuple
if replacement_associations:
# include Dimension"roles" as tables,upon detection of"role" assoc.
tables.extend( role_tables)
return tables
except:
raise
finally:
connection.close()
def lookup_associations( table_names, db_url=None, connection_func=None
,default_type='fact dimension', lookup_roles=True):
"""
Utility function,returning association dictionaries associated with named tables.
Keyword Parameters:
table_names -- A collection of table names, for which the table
associations are to be retrieved.
db_url -- String, representing a SQLAlchemy connection (Required, if
parameter 'connection' is not provided.
connection_func -- function returning SQLAlchemy connections
(Optional, if provided, will override db_url)
default_type -- String representing the association_type to be
used for items found to be associated with one of the input tables
lookup_roles -- Boolean flag, indicating if the detected associations should
be inspected for Dimensional aliases (Default: True)
Exceptions:
ConnectionMissingArguments -- raised if neither connection or db_url
parameter is specified.
>>> any_list = ['any_thing']
>>> lookup_associations( any_list)
Traceback (most recent call last):
...
api.resources.source.warehouse.support.dto_util.ConnectionMissingArgument
"""
# get db connection
connection = dto_util.get_connection(db_url, connection_func)
# retrieve associations
select_statement = ('SELECT \n'
' t_base.table_name as "table" --table name \n'
' --,c.conkey \n'
' ,a_base.attname as "table_field" --table field containing keys to be looked up \n'
' ,t_ref.table_schema as "ref_table_schema" --schema of referenced table \n'
' ,t_ref.table_name as "ref_table" --referenced table name \n'
' --,c.confkey \n'
' ,a_ref.attname as "ref_table_field" --referenced table column containing the keys \n'
' ,pg_catalog.pg_get_constraintdef(c.oid, true) as condef --pretty constraint text \n'
'FROM pg_catalog.pg_constraint c \n'
' inner join information_schema.tables t_base \n'
''' on c.conrelid = (t_base.table_schema||'."'||t_base.table_name||'"')::regclass \n'''
' inner join pg_attribute a_base \n'
' on c.conrelid = a_base.attrelid \n'
' AND a_base.attnum = ANY(c.conkey) \n'
' inner join information_schema.tables t_ref \n'
''' on c.confrelid = (t_ref.table_schema||'."'||t_ref.table_name||'"')::regclass \n'''
' inner join pg_attribute a_ref \n'
' on c.confrelid = a_ref.attrelid \n'
' AND a_ref.attnum = ANY(c.confkey) \n'
'''WHERE c.contype = 'f' --Get only FOREIGN key constraints \n'''
' and t_base.table_name = %s \n')
try:
# build list of association dicts
associations = []
for name in table_names:
result = connection.execute( select_statement, name)
for row in result:
ref_table, ref_field = row['ref_table'], row['ref_table_field']
table, field = row['table'], row['table_field']
new_association = { 'parent':ref_table
,'parent_column': ref_field
,'table': table, 'column': field
,'type': default_type}
association.validate( new_association)
associations.append( new_association)
if lookup_roles: # check for Dimensional aliases (Roles)
roles_tuple = dto_util.get_roles( associations)
role_tables, replacement_associations, role_associations = roles_tuple
if replacement_associations:
# prepare a map,to replace detected assoc w/new role-aware versions
detected_assocs_by_table_column_tuple = {}
for detected_association in associations:
detected_table = detected_association['table']
detected_column = detected_association['column']
key = (detected_table,detected_column)
detected_assocs_by_table_column_tuple[key]=detected_association
for key in replacement_associations.keys():
# replace naive assoc.s with Dimension "role"-aware versions
replacement = replacement_associations[key]
detected_assocs_by_table_column_tuple[key] = replacement
associations = list(detected_assocs_by_table_column_tuple.values())
# add additional associations,relating the detected Dimension
#"roles" back to their base dimensions.
associations.extend( role_associations)
return associations
except:
raise
finally:
connection.close()
| 47.649215 | 112 | 0.626634 |
795b5784db8e6ed6f85dc75068a1ac73d7cbafeb | 89 | py | Python | mainweb/apps.py | gregbuaa/influxweb | 16c7ace0c8f9cbb4fc92204181dd8e46665f76ce | [
"MIT"
] | null | null | null | mainweb/apps.py | gregbuaa/influxweb | 16c7ace0c8f9cbb4fc92204181dd8e46665f76ce | [
"MIT"
] | null | null | null | mainweb/apps.py | gregbuaa/influxweb | 16c7ace0c8f9cbb4fc92204181dd8e46665f76ce | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class MainwebConfig(AppConfig):
name = 'mainweb'
| 14.833333 | 33 | 0.752809 |
795b5ac12c7ee05d34d84c3333e44b16ad746467 | 783 | py | Python | weblog/blog/adminforms.py | Family-TreeSY/WeBlog | cc26d604caa39f7237788873e8460bae8dff197e | [
"MIT"
] | null | null | null | weblog/blog/adminforms.py | Family-TreeSY/WeBlog | cc26d604caa39f7237788873e8460bae8dff197e | [
"MIT"
] | null | null | null | weblog/blog/adminforms.py | Family-TreeSY/WeBlog | cc26d604caa39f7237788873e8460bae8dff197e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from dal import autocomplete
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
from .models import Category, Tag
class PostAdminForm(forms.ModelForm):
desc = forms.CharField(widget=forms.Textarea, label='摘要', required=False)
content = forms.CharField(widget=CKEditorUploadingWidget(), label='内容')
# category = forms.ModelChoiceField(
# queryset=Category.objects.all(),
# widget=autocomplete.ModelSelect2(url='category-autocomplete'),
# label='分类',
# )
# tag = forms.ModelMultipleChoiceField(
# queryset=Tag.objects.all(),
# widget=autocomplete.ModelSelect2Multiple(url='tag-autocomplete'),
# label='标签',
# ) | 34.043478 | 77 | 0.706258 |
795b5b93e3b3f8a093889743436344a342ebba39 | 7,046 | py | Python | timesketch/app.py | ramo-j/timesketch | 5d4d8d1b42944f9cb41a40eb642a9d0b2339a9a6 | [
"Apache-2.0"
] | 1 | 2022-01-14T13:38:02.000Z | 2022-01-14T13:38:02.000Z | timesketch/app.py | ramo-j/timesketch | 5d4d8d1b42944f9cb41a40eb642a9d0b2339a9a6 | [
"Apache-2.0"
] | null | null | null | timesketch/app.py | ramo-j/timesketch | 5d4d8d1b42944f9cb41a40eb642a9d0b2339a9a6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for the application."""
from __future__ import unicode_literals
import logging
import os
import sys
import six
from flask import Flask
from celery import Celery
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_restful import Api
from flask_wtf import CSRFProtect
from timesketch.api.v1.routes import API_ROUTES as V1_API_ROUTES
from timesketch.lib.errors import ApiHTTPError
from timesketch.models import configure_engine
from timesketch.models import init_db
from timesketch.models.user import User
from timesketch.views.auth import auth_views
from timesketch.views.spa import spa_views
def create_app(config=None):
"""Create the Flask app instance that is used throughout the application.
Args:
config: Path to configuration file as a string or an object with config
directives.
Returns:
Application object (instance of flask.Flask).
"""
template_folder = 'frontend/dist'
static_folder = 'frontend/dist'
app = Flask(
__name__,
template_folder=template_folder,
static_folder=static_folder
)
if not config:
# Where to find the config file
default_path = '/etc/timesketch/timesketch.conf'
# Fall back to legacy location of the config file
legacy_path = '/etc/timesketch.conf'
if os.path.isfile(default_path):
config = default_path
else:
config = legacy_path
if isinstance(config, six.text_type):
os.environ['TIMESKETCH_SETTINGS'] = config
try:
app.config.from_envvar('TIMESKETCH_SETTINGS')
if 'EMAIL_USER_WHITELIST' in app.config:
sys.stderr.write(
'Warning, EMAIL_USER_WHITELIST has been deprecated. '
'Please update timesketch.conf.')
except IOError:
sys.stderr.write(
'Config file {0} does not exist.\n'.format(config))
sys.exit()
else:
app.config.from_object(config)
# Make sure that SECRET_KEY is configured.
if not app.config['SECRET_KEY']:
sys.stderr.write('ERROR: Secret key not present. '
'Please update your configuration.\n'
'To generate a key you can use openssl:\n\n'
'$ openssl rand -base64 32\n\n')
sys.exit()
# Support old style config using Elasticsearch as backend.
# TODO: Deprecate the old ELASTIC_* config in 2023.
if not app.config.get('OPENSEARCH_HOST'):
sys.stderr.write('Deprecated config field found: ELASTIC_HOST. '
'Update your config to use OPENSEARCH_HOST.\n')
app.config['OPENSEARCH_HOST'] = app.config.get('ELASTIC_HOST')
if not app.config.get('OPENSEARCH_PORT'):
sys.stderr.write('Deprecated config field found: ELASTIC_PORT. '
'Update your config to use OPENSEARCH_PORT.\n')
app.config['OPENSEARCH_PORT'] = app.config.get('ELASTIC_PORT')
# Plaso version that we support
if app.config['UPLOAD_ENABLED']:
try:
# pylint: disable=import-outside-toplevel
from plaso import __version__ as plaso_version
app.config['PLASO_VERSION'] = plaso_version
except ImportError:
pass
# Setup the database.
configure_engine(app.config['SQLALCHEMY_DATABASE_URI'])
db = init_db()
# Alembic migration support:
# http://alembic.zzzcomputing.com/en/latest/
migrate = Migrate()
migrate.init_app(app, db)
# Register blueprints. Blueprints are a way to organize your Flask
# Flask application. See this for more information:
# http://flask.pocoo.org/docs/latest/blueprints/
app.register_blueprint(spa_views)
app.register_blueprint(auth_views)
# Setup URL routes for the API.
api_v1 = Api(app, prefix='/api/v1')
for route in V1_API_ROUTES:
api_v1.add_resource(*route)
# Register error handlers
# pylint: disable=unused-variable
@app.errorhandler(ApiHTTPError)
def handle_api_http_error(error):
"""Error handler for API HTTP errors.
Returns:
HTTP response object (instance of flask.wrappers.Response)
"""
return error.build_response()
# Setup the login manager.
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'user_views.login'
# This is used by the flask_login extension.
# pylint: disable=unused-variable
@login_manager.user_loader
def load_user(user_id):
"""Based on a user_id (database primary key for a user) this function
loads a user from the database. It is used by the Flask-Login extension
to setup up the session for the user.
Args:
user_id: Integer primary key for the user.
Returns:
A user object (Instance of timesketch.models.user.User).
"""
return User.query.get(user_id)
# Setup CSRF protection for the whole application
CSRFProtect(app)
return app
def configure_logger():
"""Configure the logger."""
class NoESFilter(logging.Filter):
"""Custom filter to filter out ES logs"""
def filter(self, record):
"""Filter out records."""
return not record.name.lower() == 'opensearch'
logger_formatter = logging.Formatter(
'[%(asctime)s] %(name)s/%(levelname)s %(message)s')
logger_filter = NoESFilter()
logger_object = logging.getLogger('timesketch')
for handler in logger_object.parent.handlers:
handler.setFormatter(logger_formatter)
handler.addFilter(logger_filter)
def create_celery_app():
"""Create a Celery app instance."""
app = create_app()
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
# pylint: disable=no-init
class ContextTask(TaskBase):
"""Add Flask context to the Celery tasks created."""
abstract = True
def __call__(self, *args, **kwargs):
"""Return Task within a Flask app context.
Returns:
A Task (instance of Celery.celery.Task)
"""
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
| 32.925234 | 79 | 0.661652 |
795b5d3943531996bcd1d77480d4b31cc6d6ef77 | 771 | py | Python | agro_site/sales_backend/migrations/0003_auto_20220216_2328.py | Mortjke/agro_site | c517979220f805b1c4527bbf40575b45acad4758 | [
"MIT"
] | null | null | null | agro_site/sales_backend/migrations/0003_auto_20220216_2328.py | Mortjke/agro_site | c517979220f805b1c4527bbf40575b45acad4758 | [
"MIT"
] | null | null | null | agro_site/sales_backend/migrations/0003_auto_20220216_2328.py | Mortjke/agro_site | c517979220f805b1c4527bbf40575b45acad4758 | [
"MIT"
] | 2 | 2022-03-11T19:05:36.000Z | 2022-03-11T19:07:14.000Z | # Generated by Django 2.2.16 on 2022-02-16 20:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sales_backend', '0002_product_seller'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='seller',
),
migrations.AddField(
model_name='product',
name='product_seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='product_seller', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 28.555556 | 152 | 0.654994 |
795b5e1a10d4f0fb26bf97a3d98212df515f90fa | 19,388 | py | Python | tests/test_table_layout.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 51 | 2015-01-20T19:50:34.000Z | 2022-03-05T21:23:32.000Z | tests/test_table_layout.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 16 | 2015-11-15T04:23:43.000Z | 2021-09-27T14:14:20.000Z | tests/test_table_layout.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 46 | 2015-03-28T10:18:14.000Z | 2021-12-16T15:57:47.000Z | from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import operator, string
from reportlab.platypus import *
#from reportlab import rl_config
from reportlab.lib.styles import PropertySet, getSampleStyleSheet, ParagraphStyle
from reportlab.lib import colors
from reportlab.platypus.paragraph import Paragraph
#from reportlab.lib.utils import fp_str
#from reportlab.pdfbase import pdfmetrics
from reportlab.platypus.flowables import PageBreak
import os
import unittest
class TableTestCase(unittest.TestCase):
def getDataBlock(self):
"Helper - data for our spanned table"
return [
# two rows are for headers
['Region','Product','Period',None,None,None,'Total'],
[None,None,'Q1','Q2','Q3','Q4',None],
# now for data
['North','Spam',100,110,120,130,460],
['North','Eggs',101,111,121,131,464],
['North','Guinness',102,112,122,132,468],
['South','Spam',100,110,120,130,460],
['South','Eggs',101,111,121,131,464],
['South','Guinness',102,112,122,132,468],
]
def test_document(self):
rowheights = (24, 16, 16, 16, 16)
rowheights2 = (24, 16, 16, 16, 30)
colwidths = (50, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
GRID_STYLE = TableStyle(
[('GRID', (0,0), (-1,-1), 0.25, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT')]
)
styleSheet = getSampleStyleSheet()
styNormal = styleSheet['Normal']
styNormal.spaceBefore = 6
styNormal.spaceAfter = 6
data = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Miscellaneous accessories', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
data2 = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
('Hats\nLarge', 893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
data3 = (
('', 'Jan', 'Feb', 'Mar','Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
('Mugs', 0, 4, 17, 3, 21, 47, 12, 33, 2, -2, 44, 89),
('T-Shirts', 0, 42, 9, -3, 16, 4, 72, 89, 3, 19, 32, 119),
('Key Ring', 0,0,0,0,0,0,1,0,0,0,2,13),
(Paragraph("Let's <b>really mess things up with a <i>paragraph</i></b>",styNormal),
893, 912, '1,212', 643, 789, 159, 888, '1,298', 832, 453, '1,344','2,843')
)
lst = []
lst.append(Paragraph("""Basics about column sizing and cell contents""", styleSheet['Heading1']))
t1 = Table(data, colwidths, rowheights)
t1.setStyle(GRID_STYLE)
lst.append(Paragraph("This is GRID_STYLE with explicit column widths. Each cell contains a string or number\n", styleSheet['BodyText']))
lst.append(t1)
lst.append(Spacer(18,18))
t2 = Table(data, None, None)
t2.setStyle(GRID_STYLE)
lst.append(Paragraph("""This is GRID_STYLE with no size info. It
does the sizes itself, measuring each text string
and computing the space it needs. If the text is
too wide for the frame, the table will overflow
as seen here.""",
styNormal))
lst.append(t2)
lst.append(Spacer(18,18))
t3 = Table(data2, None, None)
t3.setStyle(GRID_STYLE)
lst.append(Paragraph("""This demonstrates the effect of adding text strings with
newlines to a cell. It breaks where you specify, and if rowHeights is None (i.e
automatic) then you'll see the effect. See bottom left cell.""",
styNormal))
lst.append(t3)
lst.append(Spacer(18,18))
colWidths = (None, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32)
t3 = Table(data3, colWidths, None)
t3.setStyle(GRID_STYLE)
lst.append(Paragraph("""This table does not specify the size of the first column,
so should work out a sane one. In this case the element
at bottom left is a paragraph, which has no intrinsic size
(the height and width are a function of each other). So,
it tots up the extra space in the frame and divides it
between any such unsizeable columns. As a result the
table fills the width of the frame (except for the
6 point padding on either size).""",
styNormal))
lst.append(t3)
lst.append(PageBreak())
lst.append(Paragraph("""Row and Column spanning""", styleSheet['Heading1']))
lst.append(Paragraph("""This shows a very basic table. We do a faint pink grid
to show what's behind it - imagine this is not printed, as we'll overlay it later
with some black lines. We're going to "span" some cells, and have put a
value of None in the data to signify the cells we don't care about.
(In real life if you want an empty cell, put '' in it rather than None). """, styNormal))
sty = TableStyle([
#very faint grid to show what's where
('GRID', (0,0), (-1,-1), 0.25, colors.pink),
])
t = Table(self.getDataBlock(), colWidths=None, rowHeights=None, style=sty)
lst.append(t)
lst.append(Paragraph("""We now center the text for the "period"
across the four cells for each quarter. To do this we add a 'span'
command to the style to make the cell at row 1 column 3 cover 4 cells,
and a 'center' command for all cells in the top row. The spanning
is not immediately evident but trust us, it's happening - the word
'Period' is centered across the 4 columns. Note also that the
underlying grid shows through. All line drawing commands apply
to the underlying grid, so you have to take care what you put
grids through.""", styNormal))
sty = TableStyle([
#
('GRID', (0,0), (-1,-1), 0.25, colors.pink),
('ALIGN', (0,0), (-1,0), 'CENTER'),
('SPAN', (2,0), (5,0)),
])
t = Table(self.getDataBlock(), colWidths=None, rowHeights=None, style=sty)
lst.append(t)
lst.append(Paragraph("""We repeat this for the words 'Region', Product'
and 'Total', which each span the top 2 rows; and for 'Nprth' and 'South'
which span 3 rows. At the moment each cell's alignment is the default
(bottom), so these words appear to have "dropped down"; in fact they
are sitting on the bottom of their allocated ranges. You will just see that
all the 'None' values vanished, as those cells are not drawn any more.""", styNormal))
sty = TableStyle([
#
('GRID', (0,0), (-1,-1), 0.25, colors.pink),
('ALIGN', (0,0), (-1,0), 'CENTER'),
('SPAN', (2,0), (5,0)),
#span the other column heads down 2 rows
('SPAN', (0,0), (0,1)),
('SPAN', (1,0), (1,1)),
('SPAN', (6,0), (6,1)),
#span the 'north' and 'south' down 3 rows each
('SPAN', (0,2), (0,4)),
('SPAN', (0,5), (0,7)),
])
t = Table(self.getDataBlock(), colWidths=None, rowHeights=None, style=sty)
lst.append(t)
lst.append(PageBreak())
lst.append(Paragraph("""Now we'll tart things up a bit. First,
we set the vertical alignment of each spanned cell to 'middle'.
Next we add in some line drawing commands which do not slash across
the spanned cells (this needs a bit of work).
Finally we'll add some thicker lines to divide it up, and hide the pink. Voila!
""", styNormal))
sty = TableStyle([
#
# ('GRID', (0,0), (-1,-1), 0.25, colors.pink),
('TOPPADDING', (0,0), (-1,-1), 3),
#span the 'period'
('SPAN', (2,0), (5,0)),
#span the other column heads down 2 rows
('SPAN', (0,0), (0,1)),
('SPAN', (1,0), (1,1)),
('SPAN', (6,0), (6,1)),
#span the 'north' and 'south' down 3 rows each
('SPAN', (0,2), (0,4)),
('SPAN', (0,5), (0,7)),
#top row headings are centred
('ALIGN', (0,0), (-1,0), 'CENTER'),
#everything we span is vertically centred
#span the other column heads down 2 rows
('VALIGN', (0,0), (0,1), 'MIDDLE'),
('VALIGN', (1,0), (1,1), 'MIDDLE'),
('VALIGN', (6,0), (6,1), 'MIDDLE'),
#span the 'north' and 'south' down 3 rows each
('VALIGN', (0,2), (0,4), 'MIDDLE'),
('VALIGN', (0,5), (0,7), 'MIDDLE'),
#numeric stuff right aligned
('ALIGN', (2,1), (-1,-1), 'RIGHT'),
#draw lines carefully so as not to swipe through
#any of the 'spanned' cells
('GRID', (1,2), (-1,-1), 1.0, colors.black),
('BOX', (0,2), (0,4), 1.0, colors.black),
('BOX', (0,5), (0,7), 1.0, colors.black),
('BOX', (0,0), (0,1), 1.0, colors.black),
('BOX', (1,0), (1,1), 1.0, colors.black),
('BOX', (2,0), (5,0), 1.0, colors.black),
('GRID', (2,1), (5,1), 1.0, colors.black),
('BOX', (6,0), (6,1), 1.0, colors.black),
# do fatter boxes around some cells
('BOX', (0,0), (-1,1), 2.0, colors.black),
('BOX', (0,2), (-1,4), 2.0, colors.black),
('BOX', (0,5), (-1,7), 2.0, colors.black),
('BOX', (-1,0), (-1,-1), 2.0, colors.black),
])
t = Table(self.getDataBlock(), colWidths=None, rowHeights=None, style=sty)
lst.append(t)
lst.append(Paragraph("""How cells get sized""", styleSheet['Heading1']))
lst.append(Paragraph("""So far the table has been auto-sized. This can be
computationally expensive, and can lead to yucky effects. Imagine a lot of
numbers, one of which goes to 4 figures - tha numeric column will be wider.
The best approach is to specify the column
widths where you know them, and let the system do the heights. Here we set some
widths - an inch for the text columns and half an inch for the numeric ones.
""", styNormal))
t = Table(self.getDataBlock(),
colWidths=(72,72,36,36,36,36,56),
rowHeights=None,
style=sty)
lst.append(t)
lst.append(Paragraph("""The auto-sized example 2 steps back demonstrates
one advanced feature of the sizing algorithm. In the table below,
the columns for Q1-Q4 should all be the same width. We've made
the text above it a bit longer than "Period". Note that this text
is technically in the 3rd column; on our first implementation this
was sized and column 3 was therefore quite wide. To get it right,
we ensure that any cells which span columns, or which are 'overwritten'
by cells which span columns, are assigned zero width in the cell
sizing. Thus, only the string 'Q1' and the numbers below it are
calculated in estimating the width of column 3, and the phrase
"What time of year?" is not used. However, row-spanned cells are
taken into account. ALL the cells in the leftmost column
have a vertical span (or are occluded by others which do)
but it can still work out a sane width for them.
""", styNormal))
data = self.getDataBlock()
data[0][2] = "Which time of year?"
#data[7][0] = Paragraph("Let's <b>really mess things up with a <i>paragraph</i>",styNormal)
t = Table(data,
#colWidths=(72,72,36,36,36,36,56),
rowHeights=None,
style=sty)
lst.append(t)
lst.append(Paragraph("""Paragraphs and unsizeable objects in table cells.""", styleSheet['Heading1']))
lst.append(Paragraph("""Paragraphs and other flowable objects make table
sizing much harder. In general the height of a paragraph is a function
of its width so you can't ask it how wide it wants to be - and the
REALLY wide all-on-one-line solution is rarely what is wanted. We
refer to Paragraphs and their kin as "unsizeable objects". In this example
we have set the widths of all but the first column. As you can see
it uses all the available space across the page for the first column.
Note also that this fairly large cell does NOT contribute to the
height calculation for its 'row'. Under the hood it is in the
same row as the second Spam, but this row gets a height based on
its own contents and not the cell with the paragraph.
""", styNormal))
data = self.getDataBlock()
data[5][0] = Paragraph("Let's <b>really mess things up</b> with a <i>paragraph</i>, whose height is a function of the width you give it.",styNormal)
t = Table(data,
colWidths=(None,72,36,36,36,36,56),
rowHeights=None,
style=sty)
lst.append(t)
lst.append(Paragraph("""This one demonstrates that our current algorithm
does not cover all cases :-( The height of row 0 is being driven by
the width of the para, which thinks it should fit in 1 column and not 4.
To really get this right would involve multiple passes through all the cells
applying rules until everything which can be sized is sized (possibly
backtracking), applying increasingly dumb and brutal
rules on each pass.
""", styNormal))
data = self.getDataBlock()
data[0][2] = Paragraph("Let's <b>really mess things up</b> with a <i>paragraph</i>.",styNormal)
data[5][0] = Paragraph("Let's <b>really mess things up</b> with a <i>paragraph</i>, whose height is a function of the width you give it.",styNormal)
t = Table(data,
colWidths=(None,72,36,36,36,36,56),
rowHeights=None,
style=sty)
lst.append(t)
lst.append(Paragraph("""To avoid these problems remember the golden rule
of ReportLab tables: (1) fix the widths if you can, (2) don't use
a paragraph when a string will do.
""", styNormal))
lst.append(Paragraph("""Unsized columns that contain flowables without
precise widths, such as paragraphs and nested tables,
still need to try and keep their content within borders and ideally
even honor percentage requests. This can be tricky--and expensive.
But sometimes you can't follow the golden rules.
""", styNormal))
lst.append(Paragraph("""The code first calculates the minimum width
for each unsized column by iterating over every flowable in each column
and remembering the largest minimum width. It then allocates
available space to accomodate the minimum widths. Any remaining space
is divided up, treating a width of '*' as greedy, a width of None as
non-greedy, and a percentage as a weight. If a column is already
wider than its percentage warrants, it is not further expanded, and
the other widths accomodate it.
""", styNormal))
lst.append(Paragraph("""For instance, consider this tortured table.
It contains four columns, with widths of None, None, 60%, and 20%,
respectively, and a single row. The first cell contains a paragraph.
The second cell contains a table with fixed column widths that total
about 50% of the total available table width. The third cell contains
a string. The last cell contains a table with no set widths but a
single cell containing a paragraph.
""", styNormal))
ministy = TableStyle([
('GRID', (0,0), (-1,-1), 1.0, colors.black),
])
nested1 = [Paragraph(
'This is a paragraph. The column has a width of None.',
styNormal)]
nested2 = [Table(
[[Paragraph(
'This table is set to take up two and a half inches. The '
'column that holds it has a width of None.', styNormal)]],
colWidths=(180,),
rowHeights=None,
style=ministy)]
nested3 = '60% width'
nested4 = [Table(
[[[Paragraph(
"This is a table with a paragraph in it but no width set. "
"The column width in the containing table is 20%.",
styNormal)]]],
colWidths=(None,),
rowHeights=None,
style=ministy)]
t = Table([[nested1, nested2, nested3, nested4]],
colWidths=(None, None, '60%', '20%'),
rowHeights=None,
style=ministy)
lst.append(t)
lst.append(Paragraph("""Notice that the second column does expand to
account for the minimum size of its contents; and that the remaining
space goes to the third column, in an attempt to honor the '60%'
request as much as possible. This is reminiscent of the typical HTML
browser approach to tables.""", styNormal))
lst.append(Paragraph("""To get an idea of how potentially expensive
this is, consider the case of the last column: the table gets the
minimum width of every flowable of every cell in the column. In this
case one of the flowables is a table with a column without a set
width, so the nested table must itself iterate over its flowables.
The contained paragraph then calculates the width of every word in it
to see what the biggest word is, given the set font face and size. It
is easy to imagine creating a structure of this sort that took an
unacceptably large amount of time to calculate. Remember the golden
rule, if you can. """, styNormal))
lst.append(Paragraph("""This code does not yet handle spans well.""",
styNormal))
SimpleDocTemplate(outputfile('test_table_layout.pdf'), showBoundary=1).build(lst)
def makeSuite():
return makeSuiteForClasses(TableTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print('saved '+outputfile('test_table_layout.pdf'))
printLocation()
| 46.272076 | 156 | 0.569012 |
795b5e67f482b28e7d2caf13015d3ae2c574099c | 1,014 | py | Python | valence/scripts/feature_extraction/tfidf_extractor.py | gizemsogancioglu/elderly-emotion-SC | b8f371e0df6e4aa8b680d59995cd18d52f591466 | [
"MIT"
] | 2 | 2020-08-05T11:44:30.000Z | 2021-02-19T15:46:10.000Z | valence/scripts/feature_extraction/tfidf_extractor.py | gizemsogancioglu/elderly-emotion-SC | b8f371e0df6e4aa8b680d59995cd18d52f591466 | [
"MIT"
] | null | null | null | valence/scripts/feature_extraction/tfidf_extractor.py | gizemsogancioglu/elderly-emotion-SC | b8f371e0df6e4aa8b680d59995cd18d52f591466 | [
"MIT"
] | 2 | 2020-08-31T02:42:15.000Z | 2020-10-29T15:39:48.000Z | from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import nltk
from nltk.stem.porter import PorterStemmer
nltk.download('punkt')
def set_vectorizer(text_f):
vectorizer = get_tfidf_vector(text_f)
return vectorizer
def get_tfidf_vector(X_all):
vectorizer = TfidfVectorizer(stop_words="english", ngram_range=(1, 2), tokenizer=tokenize_text)
vectorizer.fit_transform(X_all)
return vectorizer
def tokenize_text(text):
tokens = []
stemmer = PorterStemmer()
for sent in nltk.sent_tokenize(text):
for word in nltk.word_tokenize(sent):
if len(word) < 2:
continue
tokens.append(word.lower())
stems = [stemmer.stem(item) for item in tokens]
return stems
def normalize_text_data(X):
return (set_vectorizer(X).transform(X))
def write_TFIDF_features(df, file_path):
X_vector = normalize_text_data(df)
df2 = pd.DataFrame(X_vector.toarray())
df2.to_csv(file_path, index=False)
return df2
| 28.971429 | 99 | 0.714004 |
795b60eb47b9c4b232b637d26972189d72a87438 | 1,937 | py | Python | scrapy/spidermiddlewares/depth.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | 2 | 2015-05-27T02:06:18.000Z | 2015-05-27T02:06:37.000Z | scrapy/spidermiddlewares/depth.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scrapy/spidermiddlewares/depth.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | """
Depth Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import logging
from scrapy.http import Request
logger = logging.getLogger(__name__)
class DepthMiddleware(object):
def __init__(self, maxdepth, stats=None, verbose_stats=False, prio=1):
self.maxdepth = maxdepth
self.stats = stats
self.verbose_stats = verbose_stats
self.prio = prio
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
maxdepth = settings.getint('DEPTH_LIMIT')
verbose = settings.getbool('DEPTH_STATS_VERBOSE')
prio = settings.getint('DEPTH_PRIORITY')
return cls(maxdepth, crawler.stats, verbose, prio)
def process_spider_output(self, response, result, spider):
def _filter(request):
if isinstance(request, Request):
depth = response.meta['depth'] + 1
request.meta['depth'] = depth
if self.prio:
request.priority -= depth * self.prio
if self.maxdepth and depth > self.maxdepth:
logger.debug("Ignoring link (depth > %(maxdepth)d): %(requrl)s ",
{'maxdepth': self.maxdepth, 'requrl': request.url},
extra={'spider': spider})
return False
elif self.stats:
if self.verbose_stats:
self.stats.inc_value('request_depth_count/%s' % depth, spider=spider)
self.stats.max_value('request_depth_max', depth, spider=spider)
return True
# base case (depth=0)
if self.stats and 'depth' not in response.meta:
response.meta['depth'] = 0
if self.verbose_stats:
self.stats.inc_value('request_depth_count/0', spider=spider)
return (r for r in result or () if _filter(r))
| 35.218182 | 93 | 0.584409 |
795b627ba1d6d23d842abfd36a889157e0442bde | 395 | py | Python | parcellearning/utilities/__init__.py | kristianeschenburg/parcellearning | 93811f7d11c1c5583d8f541c7629dbbaa1785304 | [
"BSD-3-Clause"
] | 6 | 2017-07-03T23:11:29.000Z | 2022-01-04T16:41:57.000Z | parcellearning/utilities/__init__.py | kristianeschenburg/parcellearning | 93811f7d11c1c5583d8f541c7629dbbaa1785304 | [
"BSD-3-Clause"
] | null | null | null | parcellearning/utilities/__init__.py | kristianeschenburg/parcellearning | 93811f7d11c1c5583d8f541c7629dbbaa1785304 | [
"BSD-3-Clause"
] | 1 | 2018-04-24T18:01:19.000Z | 2018-04-24T18:01:19.000Z | from .batch import partition_graphs
from .downsample import GraphSampler
from .early_stop import EarlyStopping
from .gnnio import (GCNData, standardize, dataset)
from .load import (load_model, load_schema)
__all__ = ['partition_graphs',
'GraphSampler',
'EarlyStopping',
'GCNData',
'standardize',
'load_schema',
'dataset']
| 28.214286 | 50 | 0.64557 |
795b62dfaa665a4e6e53e517776aeb0a215344ce | 959 | py | Python | examples/simple_client/user/src/user/__init__.py | mardiros/pyramid-blacksmith | ff338776fc8802c9f46475fba619aa1f9413aba0 | [
"BSD-3-Clause"
] | 15 | 2022-01-16T15:23:23.000Z | 2022-01-20T21:42:53.000Z | examples/consul_template_sd/user/src/user/__init__.py | mardiros/blacksmith | c86a870da04b0d916f243cb51f8861529284337d | [
"BSD-3-Clause"
] | 9 | 2022-01-11T19:42:42.000Z | 2022-01-26T20:24:23.000Z | examples/unittesting/user/src/user/__init__.py | mardiros/blacksmith | c86a870da04b0d916f243cb51f8861529284337d | [
"BSD-3-Clause"
] | null | null | null | from typing import cast
import uvicorn
from asgiref.typing import ASGI3Application
from starlette.applications import Starlette
from starlette.responses import JSONResponse
app = Starlette(debug=True)
USERS = {
"naruto": {
"username": "naruto",
"firstname": "Naruto",
"lastname": "Uzumaki",
"email": "naruto@konoa.city",
"roles": ["genin"],
},
"hinata": {
"username": "hinata",
"firstname": "Hinata",
"lastname": "Hyûga",
"email": "hinata@konoa.city",
"roles": ["chunin"],
},
}
@app.route("/v1/users/{username}", methods=["GET"])
async def show_user(request):
username = request.path_params["username"]
try:
return JSONResponse(USERS[username])
except KeyError:
return JSONResponse({"detail": "user not found"}, status_code=404)
if __name__ == "__main__":
uvicorn.run(cast(ASGI3Application, app), host="0.0.0.0", port=8000)
| 24.589744 | 74 | 0.618352 |
795b63217586ae3e49ef3775e7261d0488e95a25 | 3,755 | py | Python | console_gateway_sdk/model/metadata_center/stream_translate_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | console_gateway_sdk/model/metadata_center/stream_translate_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | console_gateway_sdk/model/metadata_center/stream_translate_states_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: stream_translate_states.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from console_gateway_sdk.model.metadata_center import stream_translate_package_pb2 as console__gateway__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='stream_translate_states.proto',
package='metadata_center',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_center'),
serialized_pb=_b('\n\x1dstream_translate_states.proto\x12\x0fmetadata_center\x1aHconsole_gateway_sdk/model/metadata_center/stream_translate_package.proto\"o\n\x15StreamTranslateStates\x12\x0b\n\x03org\x18\x01 \x01(\x05\x12\x0f\n\x07\x63ommand\x18\x02 \x01(\t\x12\x38\n\x07payload\x18\x03 \x03(\x0b\x32\'.metadata_center.StreamTranslatePackageBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/metadata_centerb\x06proto3')
,
dependencies=[console__gateway__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2.DESCRIPTOR,])
_STREAMTRANSLATESTATES = _descriptor.Descriptor(
name='StreamTranslateStates',
full_name='metadata_center.StreamTranslateStates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='org', full_name='metadata_center.StreamTranslateStates.org', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='command', full_name='metadata_center.StreamTranslateStates.command', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='metadata_center.StreamTranslateStates.payload', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=235,
)
_STREAMTRANSLATESTATES.fields_by_name['payload'].message_type = console__gateway__sdk_dot_model_dot_metadata__center_dot_stream__translate__package__pb2._STREAMTRANSLATEPACKAGE
DESCRIPTOR.message_types_by_name['StreamTranslateStates'] = _STREAMTRANSLATESTATES
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StreamTranslateStates = _reflection.GeneratedProtocolMessageType('StreamTranslateStates', (_message.Message,), {
'DESCRIPTOR' : _STREAMTRANSLATESTATES,
'__module__' : 'stream_translate_states_pb2'
# @@protoc_insertion_point(class_scope:metadata_center.StreamTranslateStates)
})
_sym_db.RegisterMessage(StreamTranslateStates)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 42.191011 | 434 | 0.796272 |
795b64b87d35bf39a72b12cedb3ff5862b104852 | 3,176 | py | Python | SysInfo.py | dawidePl/Linux-SysInfo | bcacd6a9c64e81f2e708ae3d9385a10bdce7af72 | [
"MIT"
] | null | null | null | SysInfo.py | dawidePl/Linux-SysInfo | bcacd6a9c64e81f2e708ae3d9385a10bdce7af72 | [
"MIT"
] | null | null | null | SysInfo.py | dawidePl/Linux-SysInfo | bcacd6a9c64e81f2e708ae3d9385a10bdce7af72 | [
"MIT"
] | null | null | null | import psutil
import platform
from datetime import datetime
class SysInfo(object):
def __init__(self):
self.units = ["", "K", "M", "G", "T", "P"]
self.factor = 1024
self.func_dict = {
'system': self.get_system,
'uptime': self.get_uptime,
'cpu': self.get_cpu_data,
'ram': self.get_ram_data,
'disk': self.get_disk_data
}
self.sys_args = ['system', 'uptime', 'cpu', 'ram', 'disk']
def get_size(self, bytes: int, suffix="B") -> str:
for unit in self.units:
if bytes < self.factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= self.factor
def percentage(self, part, whole, precision = 1) -> str:
return f"{float(part)/float(whole):.{int(precision)}%}"
def get_system(self) -> str:
distro = platform.linux_distribution()
return f"{platform.system()} {' '.join(distro)}"
def get_uptime(self) -> str:
bt = datetime.fromtimestamp(psutil.boot_time())
return f"{bt.day} days {bt.hour}h {bt.minute}m {bt.second}s"
def get_cpu_data(self) -> str:
usage = f"{psutil.cpu_percent()}%"
frequency = f"{psutil.cpu_freq().current:.2f}Mhz"
return f"CPU usage: {usage}\nCPU Frequency: {frequency}"
def get_ram_data(self) -> str:
ram = psutil.virtual_memory()
total = self.get_size(ram.total)
used = self.get_size(ram.used)
used_percent = self.percentage(ram.used, ram.total)
return f"{used} of {total} ( {used_percent} ) of RAM is used."
def get_disk_data(self, show_partitions : bool = False) -> str:
partitions = psutil.disk_partitions()
if show_partitions:
partition_info = []
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
total = self.get_size(partition_usage.total)
used = self.get_size(partition_usage.used)
used_percentage = self.get_size(partition_usage.percent)
partition_info.append(f"{used} of {total} ( {used_percentage} ) of disk space is used.")
return "\n".join(partition_info)
else:
sum_total = 0
sum_used = 0
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
sum_total += partition_usage.total
sum_used += partition_usage.used
sum_used_percent = self.percentage(sum_used, sum_total)
sum_total = self.get_size(sum_total)
sum_used = self.get_size(sum_used)
return f"{sum_used} of {sum_total} ( {sum_used_percent} ) of disk space is used."
# ----------------------------------------------------
def data(self) -> str:
system = self.get_system()
uptime = self.get_uptime()
cpu_data = self.get_cpu_data()
ram_data = self.get_ram_data()
disk_data = self.get_disk_data()
return f"{system}\n\nUptime: {uptime}\n\n{cpu_data}\n\nRAM data:\n{ram_data}\n\nDisk data:\n{disk_data}"
def get_data(self, sys_arg: str) -> str:
if sys_arg == "help":
available_args = []
for key in self.func_dict:
available_args.append(key)
return "Available arguments:\n{}".format('\n'.join(available_args))
elif sys_arg in self.sys_args:
return self.func_dict[sys_arg]()
else:
return self.data() | 27.617391 | 106 | 0.666877 |
795b64d969b1a3dded532cf0509ed7d8de4b0812 | 1,787 | py | Python | setup.py | yanorestes/crypyto | 612e494e2f9e35a4d54f5f961e10edf1d9f407ae | [
"MIT"
] | 2 | 2018-07-26T18:20:40.000Z | 2019-08-31T22:23:31.000Z | setup.py | yyyyyyyyyyan/crypyto | 612e494e2f9e35a4d54f5f961e10edf1d9f407ae | [
"MIT"
] | 1 | 2018-12-02T13:45:24.000Z | 2019-05-02T19:59:46.000Z | setup.py | yyyyyyyyyyan/crypyto | 612e494e2f9e35a4d54f5f961e10edf1d9f407ae | [
"MIT"
] | 1 | 2018-07-26T18:20:48.000Z | 2018-07-26T18:20:48.000Z | import os
from setuptools import setup
base_dir = os.path.dirname(os.path.realpath(__file__))
data_files = []
package_data = []
for d in os.listdir(os.path.join(base_dir, 'crypyto/static')):
real_dir = os.path.join(base_dir, 'crypyto/static', d)
dir_name = 'static/{}'.format(d)
file_list = []
for image_file in os.listdir(real_dir):
file_list.append(os.path.join(real_dir, image_file))
package_data.append(os.path.join(real_dir, image_file))
data_files.append((dir_name, file_list))
with open('README.rst') as file:
long_description = file.read()
setup(
name='crypyto',
version='0.3.0',
author='Yan Orestes',
author_email='yan.orestes@alura.com.br',
packages=['crypyto'],
package_data={'': package_data},
data_files=data_files,
description='crypyto is a Python package that provides simple usage of cryptography tools and ciphers on your programs.',
long_description=long_description,
url='https://github.com/yanorestes/crypyto',
download_url='https://github.com/yanorestes/crypyto/archive/0.3.0.zip',
license='MIT',
keywords='crypto cryptography cipher',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Security :: Cryptography',
],
install_requires=[
'unidecode',
'Pillow',
],
) | 35.039216 | 125 | 0.646894 |
795b6616ca390dde8cde40f1e47e50a81e46b28d | 1,351 | bzl | Python | libs/barista-components/config.bzl | dt-mafe/barista | d70b2ee7fb9f666668ac5d975a3bd7672459242e | [
"Apache-2.0"
] | null | null | null | libs/barista-components/config.bzl | dt-mafe/barista | d70b2ee7fb9f666668ac5d975a3bd7672459242e | [
"Apache-2.0"
] | null | null | null | libs/barista-components/config.bzl | dt-mafe/barista | d70b2ee7fb9f666668ac5d975a3bd7672459242e | [
"Apache-2.0"
] | null | null | null | COMPONENTS = [
"alert",
"assets",
"autocomplete",
"bar-indicator",
"breadcrumbs",
"button",
"button-group",
"card",
"chart",
"checkbox",
"confirmation-dialog",
"consumption",
"container-breakpoint-observer",
"context-dialog",
"copy-to-clipboard",
"core",
"drawer",
"empty-state",
"event-chart",
"expandable-panel",
"expandable-section",
"expandable-text",
"experimental/combobox",
"experimental/drawer-table",
"experimental/quick-filter",
"filter-field",
"form-field",
"formatters",
"highlight",
"icon",
"indicator",
"info-group",
"inline-editor",
"input",
"key-value-list",
"legend",
"loading-distractor",
"menu",
"micro-chart",
"overlay",
"pagination",
"progress-bar",
"progress-circle",
"radial-chart",
"radio",
"schematics",
"secondary-nav",
"select",
"show-more",
"slider",
"stepper",
"style",
"sunburst-chart",
"switch",
"table",
"tabs",
"tag",
"theming",
"tile",
"timeline-chart",
"toast",
"toggle-button-group",
"top-bar-navigation",
"tree-table",
]
# List of all entry point target of the barista components
COMPONENT_TARGETS = ["//libs/barista-components/%s" % c for c in COMPONENTS]
| 19.3 | 76 | 0.562546 |
795b66b3c9ef3917b32288cdaa8e7b514d39685c | 28,357 | py | Python | modi_firmware_updater/core/esp32_updater.py | LUXROBO/modi-firmware-updater | f6f5412cd5eed54aa90bdaa4216a4d932fd2a1fd | [
"MIT"
] | 1 | 2021-04-22T12:03:48.000Z | 2021-04-22T12:03:48.000Z | modi_firmware_updater/core/esp32_updater.py | LUXROBO/modi-firmware-updater | f6f5412cd5eed54aa90bdaa4216a4d932fd2a1fd | [
"MIT"
] | 6 | 2021-05-06T12:25:58.000Z | 2022-03-31T07:18:37.000Z | modi_firmware_updater/core/esp32_updater.py | LUXROBO/modi-firmware-updater | f6f5412cd5eed54aa90bdaa4216a4d932fd2a1fd | [
"MIT"
] | 1 | 2021-04-07T02:14:34.000Z | 2021-04-07T02:14:34.000Z | import io
import json
import pathlib
import sys
import threading as th
import time
from base64 import b64decode, b64encode
from io import open
from os import path
import serial
import serial.tools.list_ports as stl
from modi_firmware_updater.util.connection_util import list_modi_ports
from modi_firmware_updater.util.message_util import (decode_message,
parse_message,
unpack_data)
from modi_firmware_updater.util.module_util import (Module,
get_module_type_from_uuid)
def retry(exception_to_catch):
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exception_to_catch:
return wrapper(*args, **kwargs)
return wrapper
return decorator
class ESP32FirmwareUpdater(serial.Serial):
DEVICE_READY = 0x2B
DEVICE_SYNC = 0x08
SPI_ATTACH_REQ = 0xD
SPI_FLASH_SET = 0xB
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_FLASH_BLOCK = 0x200
ESP_FLASH_CHUNK = 0x4000
ESP_CHECKSUM_MAGIC = 0xEF
def __init__(self, device=None):
self.print = True
if device != None:
super().__init__(
device, timeout = 0.1, baudrate = 921600
)
else:
modi_ports = list_modi_ports()
if not modi_ports:
raise serial.SerialException("No MODI port is connected")
for modi_port in modi_ports:
try:
super().__init__(
modi_port.device, timeout=0.1, baudrate=921600
)
except Exception:
self.__print('Next network module')
continue
else:
break
self.__print(f"Connecting to MODI network module at {modi_port.device}")
self.__address = [0x1000, 0x8000, 0xD000, 0x10000, 0xD0000]
self.file_path = [
"bootloader.bin",
"partitions.bin",
"ota_data_initial.bin",
"modi_ota_factory.bin",
"esp32.bin",
]
self.version = None
self.__version_to_update = None
self.update_in_progress = False
self.ui = None
self.current_sequence = 0
self.total_sequence = 0
self.raise_error_message = True
self.update_error = 0
self.update_error_message = ""
self.network_uuid = None
def set_ui(self, ui):
self.ui = ui
def set_print(self, print):
self.print = print
def set_raise_error(self, raise_error_message):
self.raise_error_message = raise_error_message
def update_firmware(self, update_interpreter=False, force=False):
if update_interpreter:
self.current_sequence = 0
self.total_sequence = 1
self.__print("get network uuid")
self.network_uuid = self.get_network_uuid()
self.__print("Reset interpreter...")
self.update_in_progress = True
self.write(b'{"c":160,"s":0,"d":18,"b":"AAMAAAAA","l":6}')
self.__print("ESP interpreter reset is complete!!")
self.current_sequence = 1
self.total_sequence = 1
time.sleep(1)
self.update_in_progress = False
self.flushInput()
self.flushOutput()
self.close()
self.update_error = 1
if self.ui:
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_stm32_modules.setEnabled(True)
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32.setEnabled(True)
self.ui.update_network_esp32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_esp32.setEnabled(True)
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32_bootloader.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_esp32_interpreter.setText("Update Network ESP32 Interpreter")
else:
self.ui.update_network_esp32_interpreter.setText("네트워크 모듈 인터프리터 초기화")
else:
self.__print("get network uuid")
self.network_uuid = self.get_network_uuid()
self.__print("Turning interpreter off...")
self.write(b'{"c":160,"s":0,"d":18,"b":"AAMAAAAA","l":6}')
self.update_in_progress = True
self.__boot_to_app()
self.__version_to_update = self.__get_latest_version()
self.version = self.__get_esp_version()
if self.version and self.version == self.__version_to_update:
if not force and not self.ui:
response = input(f"ESP version already up to date (v{self.version}). Do you still want to proceed? [y/n]: ")
if "y" not in response:
return
self.__print(f"Updating v{self.version} to v{self.__version_to_update}")
firmware_buffer = self.__compose_binary_firmware()
self.__device_ready()
self.__device_sync()
self.__flash_attach()
self.__set_flash_param()
manager = None
self.__write_binary_firmware(firmware_buffer, manager)
self.__print("Booting to application...")
self.__wait_for_json()
self.__boot_to_app()
time.sleep(1)
self.__set_esp_version(self.__version_to_update)
self.__print("ESP firmware update is complete!!")
self.current_sequence = 100
self.total_sequence = 100
if self.ui:
if self.ui.is_english:
self.ui.update_network_esp32.setText("Network ESP32 update is in progress. (100%)")
else:
self.ui.update_network_esp32.setText("네트워크 모듈 업데이트가 진행중입니다. (100%)")
time.sleep(1.5)
self.flushInput()
self.flushOutput()
self.close()
self.update_in_progress = False
self.update_error = 1
if self.ui:
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_stm32_modules.setEnabled(True)
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32.setEnabled(True)
self.ui.update_network_esp32_interpreter.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_esp32_interpreter.setEnabled(True)
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32_bootloader.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_esp32.setText("Update Network ESP32")
else:
self.ui.update_network_esp32.setText("네트워크 모듈 업데이트")
def get_network_uuid(self):
init_time = time.time()
while True:
get_uuid_pkt = b'{"c":40,"s":4095,"d":4095,"b":"//8AAAAAAAA=","l":8}'
self.write(get_uuid_pkt)
try:
json_msg = json.loads(self.__wait_for_json())
if json_msg["c"] == 0x05 or json_msg["c"] == 0x0A:
module_uuid = unpack_data(json_msg["b"], (6, 2))[0]
module_type = get_module_type_from_uuid(module_uuid)
if module_type == "network":
return module_uuid
except json.decoder.JSONDecodeError as jde:
self.__print("json parse error: " + str(jde))
if time.time() - init_time > 5:
return None
time.sleep(0.2)
def __device_ready(self):
self.__print("Redirecting connection to esp device...")
self.write(b'{"c":43,"s":0,"d":4095,"b":"AA==","l":1}')
def __device_sync(self):
self.__print("Syncing the esp device...")
sync_pkt = self.__parse_pkt(
[0x0, self.DEVICE_SYNC, 0x24, 0, 0, 0, 0, 0, 0x7, 0x7, 0x12, 0x20]
+ 32 * [0x55]
)
self.__send_pkt(sync_pkt, timeout=10, continuous=True)
self.__print("Sync Complete")
def __flash_attach(self):
self.__print("Attaching flash to esp device..")
attach_pkt = self.__parse_pkt(
[0x0, self.SPI_ATTACH_REQ, 0x8] + 13 * [0]
)
self.__send_pkt(attach_pkt, timeout=10)
self.__print("Flash attach Complete")
def __set_flash_param(self):
self.__print("Setting esp flash parameter...")
param_data = [0] * 32
fl_id, total_size, block_size, sector_size, page_size, status_mask = (
0,
2 * 1024 * 1024,
64 * 1024,
4 * 1024,
256,
0xFFFF,
)
param_data[1] = self.SPI_FLASH_SET
param_data[2] = 0x18
param_data[8:12] = int.to_bytes(fl_id, length=4, byteorder="little")
param_data[12:16] = int.to_bytes(total_size, length=4, byteorder="little")
param_data[16:20] = int.to_bytes(block_size, length=4, byteorder="little")
param_data[20:24] = int.to_bytes(sector_size, length=4, byteorder="little")
param_data[24:28] = int.to_bytes(page_size, length=4, byteorder="little")
param_data[28:32] = int.to_bytes(status_mask, length=4, byteorder="little")
param_pkt = self.__parse_pkt(param_data)
self.__send_pkt(param_pkt, timeout=10)
self.__print("Parameter set complete")
@staticmethod
def __parse_pkt(data):
pkt = bytes(data)
pkt = pkt.replace(b"\xdb", b"\xdb\xdd").replace(b"\xc0", b"\xdb\xdc")
pkt = b"\xc0" + pkt + b"\xc0"
return pkt
@retry(Exception)
def __send_pkt(self, pkt, wait=True, timeout=None, continuous=False):
self.write(pkt)
self.reset_input_buffer()
if wait:
cmd = bytearray(pkt)[2]
init_time = time.time()
while not timeout or time.time() - init_time < timeout:
if continuous:
time.sleep(0.1)
else:
time.sleep(0.01)
recv_pkt = self.__read_slip()
if not recv_pkt:
if continuous:
self.__send_pkt(pkt, wait=False)
continue
recv_cmd = bytearray(recv_pkt)[2]
if cmd == recv_cmd:
if bytearray(recv_pkt)[1] != 0x01:
self.update_error_message = "Packet error"
if self.raise_error_message:
raise Exception(self.update_error_message)
else:
self.update_error = -1
return True
elif continuous:
self.__send_pkt(pkt, wait=False)
self.__print("Sending Again...")
self.update_error_message = "Timeout Expired!"
if self.raise_error_message:
raise Exception(self.update_error_message)
else:
self.update_error = -1
def __read_slip(self):
slip_pkt = b""
while slip_pkt != b"\xc0":
slip_pkt = self.read()
if slip_pkt == b"":
return b""
slip_pkt += self.read_until(b"\xc0")
return slip_pkt
def __read_json(self):
json_pkt = b""
while json_pkt != b"{":
json_pkt = self.read()
if json_pkt == b"":
return ""
time.sleep(0.1)
json_pkt += self.read_until(b"}")
return json_pkt
def __wait_for_json(self):
json_msg = self.__read_json()
while not json_msg:
json_msg = self.__read_json()
time.sleep(0.1)
return json_msg
def __get_esp_version(self):
init_time = time.time()
while True:
get_version_pkt = b'{"c":160,"s":25,"d":4095,"b":"AAAAAAAAAA==","l":8}'
self.write(get_version_pkt)
try:
json_msg = json.loads(self.__wait_for_json())
if json_msg["c"] == 0xA1:
break
except json.decoder.JSONDecodeError as jde:
self.__print("json parse error: " + str(jde))
if time.time() - init_time > 1:
return None
ver = b64decode(json_msg["b"]).lstrip(b"\x00")
return ver.decode("ascii")
def __set_esp_version(self, version_text: str):
self.__print(f"Writing version info (v{version_text})")
version_byte = version_text.encode("ascii")
version_byte = b"\x00" * (8 - len(version_byte)) + version_byte
version_text = b64encode(version_byte).decode("utf8")
version_msg = (
"{" + f'"c":160,"s":24,"d":4095,'
f'"b":"{version_text}","l":8' + "}"
)
version_msg_enc = version_msg.encode("utf8")
while True:
self.write(version_msg_enc)
try:
json_msg = json.loads(self.__wait_for_json())
if json_msg["c"] == 0xA1:
break
self.__boot_to_app()
except json.decoder.JSONDecodeError as jde:
self.__print("json parse error: " + str(jde))
time.sleep(0.5)
self.__print("The version info has been set!!")
def __compose_binary_firmware(self):
binary_firmware = b""
for i, bin_path in enumerate(self.file_path):
if self.ui:
if sys.platform.startswith("win"):
root_path = pathlib.PurePosixPath(pathlib.PurePath(__file__),"..", "..", "assets", "firmware", "latest", "esp32")
else:
root_path = path.join(path.dirname(__file__), "..", "assets", "firmware", "latest", "esp32")
if sys.platform.startswith("win"):
firmware_path = pathlib.PurePosixPath(root_path, bin_path)
else:
firmware_path = path.join(root_path, bin_path)
with open(firmware_path, "rb") as bin_file:
bin_data = bin_file.read()
else:
root_path = path.join(path.dirname(__file__), "..", "assets", "firmware", "latest", "esp32")
firmware_path = path.join(root_path, bin_path)
with open(firmware_path, "rb") as bin_file:
bin_data = bin_file.read()
binary_firmware += bin_data
if i < len(self.__address) - 1:
binary_firmware += b"\xFF" * (self.__address[i + 1] - self.__address[i] - len(bin_data))
return binary_firmware
def __get_latest_version(self):
root_path = path.join(path.dirname(__file__), "..", "assets", "firmware", "latest", "esp32")
version_path = path.join(root_path, "esp_version.txt")
with open(version_path, "r") as version_file:
version_info = version_file.readline().lstrip("v").rstrip("\n")
return version_info
def __erase_chunk(self, size, offset):
num_blocks = size // self.ESP_FLASH_BLOCK + 1
erase_data = [0] * 24
erase_data[1] = self.ESP_FLASH_BEGIN
erase_data[2] = 0x10
erase_data[8:12] = int.to_bytes(size, length=4, byteorder="little")
erase_data[12:16] = int.to_bytes(num_blocks, length=4, byteorder="little")
erase_data[16:20] = int.to_bytes(self.ESP_FLASH_BLOCK, length=4, byteorder="little")
erase_data[20:24] = int.to_bytes(offset, length=4, byteorder="little")
erase_pkt = self.__parse_pkt(erase_data)
self.__send_pkt(erase_pkt, timeout=10)
def __write_flash_block(self, data, seq_block):
size = len(data)
block_data = [0] * (size + 24)
checksum = self.ESP_CHECKSUM_MAGIC
block_data[1] = self.ESP_FLASH_DATA
block_data[2:4] = int.to_bytes(size + 16, length=2, byteorder="little")
block_data[8:12] = int.to_bytes(size, length=4, byteorder="little")
block_data[12:16] = int.to_bytes(seq_block, length=4, byteorder="little")
for i in range(size):
block_data[24 + i] = data[i]
checksum ^= 0xFF & data[i]
block_data[4:8] = int.to_bytes(checksum, length=4, byteorder="little")
block_pkt = self.__parse_pkt(block_data)
self.__send_pkt(block_pkt)
def __write_binary_firmware(self, binary_firmware: bytes, manager):
chunk_queue = []
self.total_sequence = len(binary_firmware) // self.ESP_FLASH_BLOCK + 1
while binary_firmware:
if self.ESP_FLASH_CHUNK < len(binary_firmware):
chunk_queue.append(binary_firmware[: self.ESP_FLASH_CHUNK])
binary_firmware = binary_firmware[self.ESP_FLASH_CHUNK :]
else:
chunk_queue.append(binary_firmware[:])
binary_firmware = b""
blocks_downloaded = 0
self.current_sequence = blocks_downloaded
self.__print("Start uploading firmware data...")
for seq, chunk in enumerate(chunk_queue):
self.__erase_chunk(len(chunk), self.__address[0] + seq * self.ESP_FLASH_CHUNK)
blocks_downloaded += self.__write_chunk(chunk, blocks_downloaded, self.total_sequence, manager)
if manager:
manager.quit()
if self.ui:
if self.ui.is_english:
self.ui.update_network_esp32.setText("Network ESP32 update is in progress. (99%)")
else:
self.ui.update_network_esp32.setText("네트워크 모듈 업데이트가 진행중입니다. (99%)")
self.current_sequence = 99
self.total_sequence = 100
self.__print(f"\r{self.__progress_bar(99, 100)}")
self.__print("Firmware Upload Complete")
def __write_chunk(self, chunk, curr_seq, total_seq, manager):
block_queue = []
while chunk:
if self.ESP_FLASH_BLOCK < len(chunk):
block_queue.append(chunk[: self.ESP_FLASH_BLOCK])
chunk = chunk[self.ESP_FLASH_BLOCK :]
else:
block_queue.append(chunk[:])
chunk = b""
for seq, block in enumerate(block_queue):
self.current_sequence = curr_seq + seq
if manager:
manager.status = self.__progress_bar(curr_seq + seq, total_seq)
if self.ui:
if self.ui.is_english:
self.ui.update_network_esp32.setText(f"Network ESP32 update is in progress. ({int((curr_seq+seq)/total_seq*100)}%)")
else:
self.ui.update_network_esp32.setText(f"네트워크 모듈 업데이트가 진행중입니다. ({int((curr_seq+seq)/total_seq*100)}%)")
self.__print(
f"\r{self.__progress_bar(curr_seq + seq, total_seq)}", end=""
)
self.__write_flash_block(block, seq)
return len(block_queue)
def __boot_to_app(self):
self.write(b'{"c":160,"s":0,"d":174,"b":"AAAAAAAAAA==","l":8}')
def __print(self, data, end="\n"):
if self.print:
print(data, end)
@staticmethod
def __progress_bar(current: int, total: int) -> str:
curr_bar = 50 * current // total
rest_bar = 50 - curr_bar
return (
f"Firmware Upload: [{'=' * curr_bar}>{'.' * rest_bar}] "
f"{100 * current / total:3.1f}%"
)
class ESP32FirmwareMultiUpdater():
def __init__(self):
self.update_in_progress = False
self.ui = None
self.list_ui = None
def set_ui(self, ui, list_ui):
self.ui = ui
self.list_ui = list_ui
def update_firmware(self, modi_ports, update_interpreter=False, force=True):
self.esp32_updaters = []
self.network_uuid = []
self.state = []
for i, modi_port in enumerate(modi_ports):
if i > 9:
break
try:
esp32_updater = ESP32FirmwareUpdater(modi_port.device)
esp32_updater.set_print(False)
esp32_updater.set_raise_error(False)
except Exception as e:
print(e)
else:
self.esp32_updaters.append(esp32_updater)
self.state.append(0)
self.network_uuid.append('')
if self.list_ui:
self.list_ui.set_device_num(len(self.esp32_updaters))
self.list_ui.ui.close_button.setEnabled(False)
self.update_in_progress = True
for index, esp32_updater in enumerate(self.esp32_updaters):
th.Thread(
target=esp32_updater.update_firmware,
args=(update_interpreter, force),
daemon=True
).start()
delay = 0.1
while True:
is_done = True
current_sequence = 0
total_sequence = 0
for index, esp32_updater in enumerate(self.esp32_updaters):
if self.state[index] == 0:
# wait for network uuid
is_done = False
if esp32_updater.update_in_progress:
if esp32_updater.network_uuid:
self.network_uuid[index] = f'0x{esp32_updater.network_uuid:X}'
self.state[index] = 1
if self.list_ui:
self.list_ui.network_uuid_signal.emit(index, self.network_uuid[index])
else:
self.state[index] = 2
esp32_updater.update_error = -1
esp32_updater.update_error_message = "Not response network uuid"
elif self.state[index] == 1:
# update modules
if esp32_updater.update_error == 0:
is_done = is_done & False
current = esp32_updater.current_sequence
total = esp32_updater.total_sequence
value = 0 if total == 0 else current / total * 100.0
current_sequence += current
total_sequence += total
if self.list_ui:
self.list_ui.progress_signal.emit(index, value)
else:
self.state[index] = 2
elif self.state[index] == 2:
# end
current_sequence += esp32_updater.total_sequence
total_sequence += esp32_updater.total_sequence
if esp32_updater.update_error == 1:
if self.list_ui:
self.list_ui.network_state_signal.emit(index, 0)
self.list_ui.progress_signal.emit(index, 100)
else:
if self.list_ui:
self.list_ui.network_state_signal.emit(index, -1)
self.list_ui.error_message_signal.emit(index, esp32_updater.update_error_message)
self.state[index] = 3
elif self.state[index] == 3:
total_sequence += 100
if total_sequence != 0:
if self.ui:
if update_interpreter:
if self.ui.is_english:
self.ui.update_network_esp32_interpreter.setText(
f"Network ESP32 Interpreter reset is in progress. "
f"({int(current_sequence/total_sequence*100)}%)"
)
else:
self.ui.update_network_esp32_interpreter.setText(
f"네트워크 모듈 인터프리터 초기화가 진행중입니다. "
f"({int(current_sequence/total_sequence*100)}%)"
)
else:
if self.ui.is_english:
self.ui.update_network_esp32.setText(
f"Network ESP32 update is in progress. "
f"({int(current_sequence/total_sequence*100)}%)"
)
else:
self.ui.update_network_esp32.setText(
f"네트워크 모듈 업데이트가 진행중입니다. "
f"({int(current_sequence/total_sequence*100)}%)"
)
if self.list_ui:
self.list_ui.total_progress_signal.emit(current_sequence / total_sequence * 100.0)
self.list_ui.total_status_signal.emit("Uploading...")
print(f"\r{self.__progress_bar(current_sequence, total_sequence)}", end="")
if is_done:
break
time.sleep(delay)
self.update_in_progress = False
if self.list_ui:
self.list_ui.ui.close_button.setEnabled(True)
self.list_ui.total_status_signal.emit("Complete")
if update_interpreter:
if self.ui:
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_stm32_modules.setEnabled(True)
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32.setEnabled(True)
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32_bootloader.setEnabled(True)
self.ui.update_network_esp32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_esp32.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_esp32_interpreter.setText("Update Network ESP32 Interpreter")
else:
self.ui.update_network_esp32_interpreter.setText("네트워크 모듈 인터프리터 초기화")
else:
if self.ui:
self.ui.update_stm32_modules.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_stm32_modules.setEnabled(True)
self.ui.update_network_stm32.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32.setEnabled(True)
self.ui.update_network_stm32_bootloader.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_stm32_bootloader.setEnabled(True)
self.ui.update_network_esp32_interpreter.setStyleSheet(f"border-image: url({self.ui.active_path}); font-size: 16px")
self.ui.update_network_esp32_interpreter.setEnabled(True)
if self.ui.is_english:
self.ui.update_network_esp32.setText("Update Network ESP32")
else:
self.ui.update_network_esp32.setText("네트워크 모듈 업데이트")
print("\nESP firmware update is complete!!")
@staticmethod
def __progress_bar(current: int, total: int) -> str:
curr_bar = int(50 * current // total)
rest_bar = int(50 - curr_bar)
return (
f"Firmware Upload: [{'=' * curr_bar}>{'.' * rest_bar}] "
f"{100 * current / total:3.1f}%"
) | 41.701471 | 136 | 0.558063 |
795b676c15059daf2abcf751df728f684412fee5 | 456 | py | Python | __init__.py | aphi/pyflip | 20472e2dbc5e8d0b0c84167e83c80b41b45dc74e | [
"MIT"
] | 9 | 2019-08-26T21:19:02.000Z | 2022-01-10T18:26:43.000Z | __init__.py | aphi/PyFlip | 20472e2dbc5e8d0b0c84167e83c80b41b45dc74e | [
"MIT"
] | null | null | null | __init__.py | aphi/PyFlip | 20472e2dbc5e8d0b0c84167e83c80b41b45dc74e | [
"MIT"
] | 1 | 2021-10-01T16:29:57.000Z | 2021-10-01T16:29:57.000Z | """
Modern library for Linear and Integer Programming with Python 3
"""
# import into root namespace
from .src.model import *
from .src.solution import *
from .src.file_io import *
from .src.run import *
from .src.expression import *
from .src.parameter import *
# keep relative namespace
from .src import variable
from .src import solver
from .src import util
from .test import stress_test
from .test import unit_test
from .definitions import ROOT_DIR | 21.714286 | 63 | 0.77193 |
795b67a29dccd5d228023b232596755503d6e707 | 2,542 | py | Python | guided_model/guided_parts.py | naivete5656/BFP | 74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18 | [
"MIT"
] | 8 | 2020-07-31T15:20:01.000Z | 2021-09-18T08:42:07.000Z | guided_model/guided_parts.py | naivete5656/BFP | 74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18 | [
"MIT"
] | null | null | null | guided_model/guided_parts.py | naivete5656/BFP | 74c5604a9ba4eaa3ec3e2c76ef5e1282d7d10f18 | [
"MIT"
] | 5 | 2020-10-04T02:02:13.000Z | 2021-11-14T23:37:08.000Z | import torch
from torch.autograd import Function
import numpy as np
class GuidedBackpropReLU(Function):
@staticmethod
def forward(ctx, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(
torch.zeros(input.size()).type_as(input), input, positive_mask
)
ctx.save_for_backward(input, output)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_variables
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(
torch.zeros(input.size()).type_as(input),
torch.addcmul(
torch.zeros(input.size()).type_as(input), grad_output, positive_mask_1
),
positive_mask_2,
)
return grad_input
class GuidedBackpropReLUSave(Function):
@staticmethod
def forward(ctx, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(
torch.zeros(input.size()).type_as(input), input, positive_mask
)
ctx.save_for_backward(input, output)
flag = np.load("test.npy")
if flag > 0:
mask = torch.load("test.pt")
count = np.load("count.npy")
output = torch.addcmul(
torch.zeros(input.size()).type_as(input), output, mask[count]
)
return output
@staticmethod
def backward(ctx, grad_output):
input, output = ctx.saved_variables
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(
torch.zeros(input.size()).type_as(input),
torch.addcmul(
torch.zeros(input.size()).type_as(input), grad_output, positive_mask_1
),
positive_mask_2,
)
# masks = torch.load("test.pt")
# index = np.load("test.npy")
# index += 1
# # np.save("test.npy", index)
# # if masks.nelement() == 0:
# # masks = grad_input.unsqueeze(0)
# # else:
# # masks = torch.cat([grad_input.unsqueeze(0), masks])
# # torch.save(masks, "test.pt")
return grad_input
def guide_relu(self, input):
output = GuidedBackpropReLU.apply(input)
return output
def guide_relu_save(self, input):
output = GuidedBackpropReLUSave.apply(input)
return output
| 31 | 86 | 0.59284 |
795b67af80523221a035b02afc8c26f5af32d550 | 44,526 | py | Python | src/oci/core/models/instance_configuration_launch_instance_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/core/models/instance_configuration_launch_instance_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/core/models/instance_configuration_launch_instance_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceConfigurationLaunchInstanceDetails(object):
"""
Instance launch details for creating an instance from an instance configuration. Use the `sourceDetails`
parameter to specify whether a boot volume or an image should be used to launch a new instance.
See :class:`LaunchInstanceDetails` for more information.
"""
#: A constant which can be used with the launch_mode property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "NATIVE"
LAUNCH_MODE_NATIVE = "NATIVE"
#: A constant which can be used with the launch_mode property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "EMULATED"
LAUNCH_MODE_EMULATED = "EMULATED"
#: A constant which can be used with the launch_mode property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "PARAVIRTUALIZED"
LAUNCH_MODE_PARAVIRTUALIZED = "PARAVIRTUALIZED"
#: A constant which can be used with the launch_mode property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "CUSTOM"
LAUNCH_MODE_CUSTOM = "CUSTOM"
#: A constant which can be used with the preferred_maintenance_action property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "LIVE_MIGRATE"
PREFERRED_MAINTENANCE_ACTION_LIVE_MIGRATE = "LIVE_MIGRATE"
#: A constant which can be used with the preferred_maintenance_action property of a InstanceConfigurationLaunchInstanceDetails.
#: This constant has a value of "REBOOT"
PREFERRED_MAINTENANCE_ACTION_REBOOT = "REBOOT"
def __init__(self, **kwargs):
"""
Initializes a new InstanceConfigurationLaunchInstanceDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_domain:
The value to assign to the availability_domain property of this InstanceConfigurationLaunchInstanceDetails.
:type availability_domain: str
:param capacity_reservation_id:
The value to assign to the capacity_reservation_id property of this InstanceConfigurationLaunchInstanceDetails.
:type capacity_reservation_id: str
:param compartment_id:
The value to assign to the compartment_id property of this InstanceConfigurationLaunchInstanceDetails.
:type compartment_id: str
:param create_vnic_details:
The value to assign to the create_vnic_details property of this InstanceConfigurationLaunchInstanceDetails.
:type create_vnic_details: oci.core.models.InstanceConfigurationCreateVnicDetails
:param defined_tags:
The value to assign to the defined_tags property of this InstanceConfigurationLaunchInstanceDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this InstanceConfigurationLaunchInstanceDetails.
:type display_name: str
:param extended_metadata:
The value to assign to the extended_metadata property of this InstanceConfigurationLaunchInstanceDetails.
:type extended_metadata: dict(str, object)
:param freeform_tags:
The value to assign to the freeform_tags property of this InstanceConfigurationLaunchInstanceDetails.
:type freeform_tags: dict(str, str)
:param ipxe_script:
The value to assign to the ipxe_script property of this InstanceConfigurationLaunchInstanceDetails.
:type ipxe_script: str
:param metadata:
The value to assign to the metadata property of this InstanceConfigurationLaunchInstanceDetails.
:type metadata: dict(str, str)
:param shape:
The value to assign to the shape property of this InstanceConfigurationLaunchInstanceDetails.
:type shape: str
:param shape_config:
The value to assign to the shape_config property of this InstanceConfigurationLaunchInstanceDetails.
:type shape_config: oci.core.models.InstanceConfigurationLaunchInstanceShapeConfigDetails
:param platform_config:
The value to assign to the platform_config property of this InstanceConfigurationLaunchInstanceDetails.
:type platform_config: oci.core.models.InstanceConfigurationLaunchInstancePlatformConfig
:param source_details:
The value to assign to the source_details property of this InstanceConfigurationLaunchInstanceDetails.
:type source_details: oci.core.models.InstanceConfigurationInstanceSourceDetails
:param fault_domain:
The value to assign to the fault_domain property of this InstanceConfigurationLaunchInstanceDetails.
:type fault_domain: str
:param dedicated_vm_host_id:
The value to assign to the dedicated_vm_host_id property of this InstanceConfigurationLaunchInstanceDetails.
:type dedicated_vm_host_id: str
:param launch_mode:
The value to assign to the launch_mode property of this InstanceConfigurationLaunchInstanceDetails.
Allowed values for this property are: "NATIVE", "EMULATED", "PARAVIRTUALIZED", "CUSTOM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type launch_mode: str
:param launch_options:
The value to assign to the launch_options property of this InstanceConfigurationLaunchInstanceDetails.
:type launch_options: oci.core.models.InstanceConfigurationLaunchOptions
:param agent_config:
The value to assign to the agent_config property of this InstanceConfigurationLaunchInstanceDetails.
:type agent_config: oci.core.models.InstanceConfigurationLaunchInstanceAgentConfigDetails
:param is_pv_encryption_in_transit_enabled:
The value to assign to the is_pv_encryption_in_transit_enabled property of this InstanceConfigurationLaunchInstanceDetails.
:type is_pv_encryption_in_transit_enabled: bool
:param preferred_maintenance_action:
The value to assign to the preferred_maintenance_action property of this InstanceConfigurationLaunchInstanceDetails.
Allowed values for this property are: "LIVE_MIGRATE", "REBOOT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type preferred_maintenance_action: str
:param instance_options:
The value to assign to the instance_options property of this InstanceConfigurationLaunchInstanceDetails.
:type instance_options: oci.core.models.InstanceConfigurationInstanceOptions
:param availability_config:
The value to assign to the availability_config property of this InstanceConfigurationLaunchInstanceDetails.
:type availability_config: oci.core.models.InstanceConfigurationAvailabilityConfig
:param preemptible_instance_config:
The value to assign to the preemptible_instance_config property of this InstanceConfigurationLaunchInstanceDetails.
:type preemptible_instance_config: oci.core.models.PreemptibleInstanceConfigDetails
"""
self.swagger_types = {
'availability_domain': 'str',
'capacity_reservation_id': 'str',
'compartment_id': 'str',
'create_vnic_details': 'InstanceConfigurationCreateVnicDetails',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'extended_metadata': 'dict(str, object)',
'freeform_tags': 'dict(str, str)',
'ipxe_script': 'str',
'metadata': 'dict(str, str)',
'shape': 'str',
'shape_config': 'InstanceConfigurationLaunchInstanceShapeConfigDetails',
'platform_config': 'InstanceConfigurationLaunchInstancePlatformConfig',
'source_details': 'InstanceConfigurationInstanceSourceDetails',
'fault_domain': 'str',
'dedicated_vm_host_id': 'str',
'launch_mode': 'str',
'launch_options': 'InstanceConfigurationLaunchOptions',
'agent_config': 'InstanceConfigurationLaunchInstanceAgentConfigDetails',
'is_pv_encryption_in_transit_enabled': 'bool',
'preferred_maintenance_action': 'str',
'instance_options': 'InstanceConfigurationInstanceOptions',
'availability_config': 'InstanceConfigurationAvailabilityConfig',
'preemptible_instance_config': 'PreemptibleInstanceConfigDetails'
}
self.attribute_map = {
'availability_domain': 'availabilityDomain',
'capacity_reservation_id': 'capacityReservationId',
'compartment_id': 'compartmentId',
'create_vnic_details': 'createVnicDetails',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'extended_metadata': 'extendedMetadata',
'freeform_tags': 'freeformTags',
'ipxe_script': 'ipxeScript',
'metadata': 'metadata',
'shape': 'shape',
'shape_config': 'shapeConfig',
'platform_config': 'platformConfig',
'source_details': 'sourceDetails',
'fault_domain': 'faultDomain',
'dedicated_vm_host_id': 'dedicatedVmHostId',
'launch_mode': 'launchMode',
'launch_options': 'launchOptions',
'agent_config': 'agentConfig',
'is_pv_encryption_in_transit_enabled': 'isPvEncryptionInTransitEnabled',
'preferred_maintenance_action': 'preferredMaintenanceAction',
'instance_options': 'instanceOptions',
'availability_config': 'availabilityConfig',
'preemptible_instance_config': 'preemptibleInstanceConfig'
}
self._availability_domain = None
self._capacity_reservation_id = None
self._compartment_id = None
self._create_vnic_details = None
self._defined_tags = None
self._display_name = None
self._extended_metadata = None
self._freeform_tags = None
self._ipxe_script = None
self._metadata = None
self._shape = None
self._shape_config = None
self._platform_config = None
self._source_details = None
self._fault_domain = None
self._dedicated_vm_host_id = None
self._launch_mode = None
self._launch_options = None
self._agent_config = None
self._is_pv_encryption_in_transit_enabled = None
self._preferred_maintenance_action = None
self._instance_options = None
self._availability_config = None
self._preemptible_instance_config = None
@property
def availability_domain(self):
"""
Gets the availability_domain of this InstanceConfigurationLaunchInstanceDetails.
The availability domain of the instance.
Example: `Uocm:PHX-AD-1`
:return: The availability_domain of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._availability_domain
@availability_domain.setter
def availability_domain(self, availability_domain):
"""
Sets the availability_domain of this InstanceConfigurationLaunchInstanceDetails.
The availability domain of the instance.
Example: `Uocm:PHX-AD-1`
:param availability_domain: The availability_domain of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._availability_domain = availability_domain
@property
def capacity_reservation_id(self):
"""
Gets the capacity_reservation_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of the compute capacity reservation this instance is launched under.
:return: The capacity_reservation_id of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._capacity_reservation_id
@capacity_reservation_id.setter
def capacity_reservation_id(self, capacity_reservation_id):
"""
Sets the capacity_reservation_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of the compute capacity reservation this instance is launched under.
:param capacity_reservation_id: The capacity_reservation_id of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._capacity_reservation_id = capacity_reservation_id
@property
def compartment_id(self):
"""
Gets the compartment_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of the compartment.
:return: The compartment_id of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of the compartment.
:param compartment_id: The compartment_id of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def create_vnic_details(self):
"""
Gets the create_vnic_details of this InstanceConfigurationLaunchInstanceDetails.
:return: The create_vnic_details of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationCreateVnicDetails
"""
return self._create_vnic_details
@create_vnic_details.setter
def create_vnic_details(self, create_vnic_details):
"""
Sets the create_vnic_details of this InstanceConfigurationLaunchInstanceDetails.
:param create_vnic_details: The create_vnic_details of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationCreateVnicDetails
"""
self._create_vnic_details = create_vnic_details
@property
def defined_tags(self):
"""
Gets the defined_tags of this InstanceConfigurationLaunchInstanceDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this InstanceConfigurationLaunchInstanceDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this InstanceConfigurationLaunchInstanceDetails.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this InstanceConfigurationLaunchInstanceDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def display_name(self):
"""
Gets the display_name of this InstanceConfigurationLaunchInstanceDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
Example: `My bare metal instance`
:return: The display_name of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this InstanceConfigurationLaunchInstanceDetails.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
Example: `My bare metal instance`
:param display_name: The display_name of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._display_name = display_name
@property
def extended_metadata(self):
"""
Gets the extended_metadata of this InstanceConfigurationLaunchInstanceDetails.
Additional metadata key/value pairs that you provide. They serve the same purpose and
functionality as fields in the `metadata` object.
They are distinguished from `metadata` fields in that these can be nested JSON objects
(whereas `metadata` fields are string/string maps only).
The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of
32,000 bytes.
:return: The extended_metadata of this InstanceConfigurationLaunchInstanceDetails.
:rtype: dict(str, object)
"""
return self._extended_metadata
@extended_metadata.setter
def extended_metadata(self, extended_metadata):
"""
Sets the extended_metadata of this InstanceConfigurationLaunchInstanceDetails.
Additional metadata key/value pairs that you provide. They serve the same purpose and
functionality as fields in the `metadata` object.
They are distinguished from `metadata` fields in that these can be nested JSON objects
(whereas `metadata` fields are string/string maps only).
The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of
32,000 bytes.
:param extended_metadata: The extended_metadata of this InstanceConfigurationLaunchInstanceDetails.
:type: dict(str, object)
"""
self._extended_metadata = extended_metadata
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this InstanceConfigurationLaunchInstanceDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this InstanceConfigurationLaunchInstanceDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this InstanceConfigurationLaunchInstanceDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this InstanceConfigurationLaunchInstanceDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def ipxe_script(self):
"""
Gets the ipxe_script of this InstanceConfigurationLaunchInstanceDetails.
This is an advanced option.
When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot.
For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
`Bring Your Own Image`__.
For more information about iPXE, see http://ipxe.org.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm
:return: The ipxe_script of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._ipxe_script
@ipxe_script.setter
def ipxe_script(self, ipxe_script):
"""
Sets the ipxe_script of this InstanceConfigurationLaunchInstanceDetails.
This is an advanced option.
When a bare metal or virtual machine
instance boots, the iPXE firmware that runs on the instance is
configured to run an iPXE script to continue the boot process.
If you want more control over the boot process, you can provide
your own custom iPXE script that will run when the instance boots;
however, you should be aware that the same iPXE script will run
every time an instance boots; not only after the initial
LaunchInstance call.
The default iPXE script connects to the instance's local boot
volume over iSCSI and performs a network boot. If you use a custom iPXE
script and want to network-boot from the instance's local boot volume
over iSCSI the same way as the default iPXE script, you should use the
following iSCSI IP address: 169.254.0.2, and boot volume IQN:
iqn.2015-02.oracle.boot.
For more information about the Bring Your Own Image feature of
Oracle Cloud Infrastructure, see
`Bring Your Own Image`__.
For more information about iPXE, see http://ipxe.org.
__ https://docs.cloud.oracle.com/iaas/Content/Compute/References/bringyourownimage.htm
:param ipxe_script: The ipxe_script of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._ipxe_script = ipxe_script
@property
def metadata(self):
"""
Gets the metadata of this InstanceConfigurationLaunchInstanceDetails.
Custom metadata key/value pairs that you provide, such as the SSH public key
required to connect to the instance.
A metadata service runs on every launched instance. The service is an HTTP
endpoint listening on 169.254.169.254. You can use the service to:
* Provide information to `Cloud-Init`__
to be used for various system initialization tasks.
* Get information about the instance, including the custom metadata that you
provide when you launch the instance.
**Providing Cloud-Init Metadata**
You can use the following metadata key names to provide information to
Cloud-Init:
**\"ssh_authorized_keys\"** - Provide one or more public SSH keys to be
included in the `~/.ssh/authorized_keys` file for the default user on the
instance. Use a newline character to separate multiple keys. The SSH
keys must be in the format necessary for the `authorized_keys` file, as shown
in the example below.
**\"user_data\"** - Provide your own base64-encoded data to be used by
Cloud-Init to run custom scripts or provide custom Cloud-Init configuration. For
information about how to take advantage of user data, see the
`Cloud-Init Documentation`__.
**Metadata Example**
\"metadata\" : {
\"quake_bot_level\" : \"Severe\",
\"ssh_authorized_keys\" : \"ssh-rsa <your_public_SSH_key>== rsa-key-20160227\",
\"user_data\" : \"<your_public_SSH_key>==\"
}
**Getting Metadata on the Instance**
To get information about your instance, connect to the instance using SSH and issue any of the
following GET requests:
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/metadata/
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/metadata/<any-key-name>
You'll get back a response that includes all the instance information; only the metadata information; or
the metadata information for the specified key name, respectively.
The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of 32,000 bytes.
__ https://cloudinit.readthedocs.org/en/latest/
__ http://cloudinit.readthedocs.org/en/latest/topics/format.html
:return: The metadata of this InstanceConfigurationLaunchInstanceDetails.
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this InstanceConfigurationLaunchInstanceDetails.
Custom metadata key/value pairs that you provide, such as the SSH public key
required to connect to the instance.
A metadata service runs on every launched instance. The service is an HTTP
endpoint listening on 169.254.169.254. You can use the service to:
* Provide information to `Cloud-Init`__
to be used for various system initialization tasks.
* Get information about the instance, including the custom metadata that you
provide when you launch the instance.
**Providing Cloud-Init Metadata**
You can use the following metadata key names to provide information to
Cloud-Init:
**\"ssh_authorized_keys\"** - Provide one or more public SSH keys to be
included in the `~/.ssh/authorized_keys` file for the default user on the
instance. Use a newline character to separate multiple keys. The SSH
keys must be in the format necessary for the `authorized_keys` file, as shown
in the example below.
**\"user_data\"** - Provide your own base64-encoded data to be used by
Cloud-Init to run custom scripts or provide custom Cloud-Init configuration. For
information about how to take advantage of user data, see the
`Cloud-Init Documentation`__.
**Metadata Example**
\"metadata\" : {
\"quake_bot_level\" : \"Severe\",
\"ssh_authorized_keys\" : \"ssh-rsa <your_public_SSH_key>== rsa-key-20160227\",
\"user_data\" : \"<your_public_SSH_key>==\"
}
**Getting Metadata on the Instance**
To get information about your instance, connect to the instance using SSH and issue any of the
following GET requests:
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/metadata/
curl -H \"Authorization: Bearer Oracle\" http://169.254.169.254/opc/v2/instance/metadata/<any-key-name>
You'll get back a response that includes all the instance information; only the metadata information; or
the metadata information for the specified key name, respectively.
The combined size of the `metadata` and `extendedMetadata` objects can be a maximum of 32,000 bytes.
__ https://cloudinit.readthedocs.org/en/latest/
__ http://cloudinit.readthedocs.org/en/latest/topics/format.html
:param metadata: The metadata of this InstanceConfigurationLaunchInstanceDetails.
:type: dict(str, str)
"""
self._metadata = metadata
@property
def shape(self):
"""
Gets the shape of this InstanceConfigurationLaunchInstanceDetails.
The shape of an instance. The shape determines the number of CPUs, amount of memory,
and other resources allocated to the instance.
You can enumerate all available shapes by calling :func:`list_shapes`.
:return: The shape of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._shape
@shape.setter
def shape(self, shape):
"""
Sets the shape of this InstanceConfigurationLaunchInstanceDetails.
The shape of an instance. The shape determines the number of CPUs, amount of memory,
and other resources allocated to the instance.
You can enumerate all available shapes by calling :func:`list_shapes`.
:param shape: The shape of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._shape = shape
@property
def shape_config(self):
"""
Gets the shape_config of this InstanceConfigurationLaunchInstanceDetails.
:return: The shape_config of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationLaunchInstanceShapeConfigDetails
"""
return self._shape_config
@shape_config.setter
def shape_config(self, shape_config):
"""
Sets the shape_config of this InstanceConfigurationLaunchInstanceDetails.
:param shape_config: The shape_config of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationLaunchInstanceShapeConfigDetails
"""
self._shape_config = shape_config
@property
def platform_config(self):
"""
Gets the platform_config of this InstanceConfigurationLaunchInstanceDetails.
:return: The platform_config of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationLaunchInstancePlatformConfig
"""
return self._platform_config
@platform_config.setter
def platform_config(self, platform_config):
"""
Sets the platform_config of this InstanceConfigurationLaunchInstanceDetails.
:param platform_config: The platform_config of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationLaunchInstancePlatformConfig
"""
self._platform_config = platform_config
@property
def source_details(self):
"""
Gets the source_details of this InstanceConfigurationLaunchInstanceDetails.
:return: The source_details of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationInstanceSourceDetails
"""
return self._source_details
@source_details.setter
def source_details(self, source_details):
"""
Sets the source_details of this InstanceConfigurationLaunchInstanceDetails.
:param source_details: The source_details of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationInstanceSourceDetails
"""
self._source_details = source_details
@property
def fault_domain(self):
"""
Gets the fault_domain of this InstanceConfigurationLaunchInstanceDetails.
A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
If you do not specify the fault domain, the system selects one for you.
To get a list of fault domains, use the
:func:`list_fault_domains` operation in the
Identity and Access Management Service API.
Example: `FAULT-DOMAIN-1`
:return: The fault_domain of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._fault_domain
@fault_domain.setter
def fault_domain(self, fault_domain):
"""
Sets the fault_domain of this InstanceConfigurationLaunchInstanceDetails.
A fault domain is a grouping of hardware and infrastructure within an availability domain.
Each availability domain contains three fault domains. Fault domains let you distribute your
instances so that they are not on the same physical hardware within a single availability domain.
A hardware failure or Compute hardware maintenance that affects one fault domain does not affect
instances in other fault domains.
If you do not specify the fault domain, the system selects one for you.
To get a list of fault domains, use the
:func:`list_fault_domains` operation in the
Identity and Access Management Service API.
Example: `FAULT-DOMAIN-1`
:param fault_domain: The fault_domain of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._fault_domain = fault_domain
@property
def dedicated_vm_host_id(self):
"""
Gets the dedicated_vm_host_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of dedicated VM host.
Dedicated VM hosts can be used when launching individual instances from an instance configuration. They
cannot be used to launch instance pools.
:return: The dedicated_vm_host_id of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._dedicated_vm_host_id
@dedicated_vm_host_id.setter
def dedicated_vm_host_id(self, dedicated_vm_host_id):
"""
Sets the dedicated_vm_host_id of this InstanceConfigurationLaunchInstanceDetails.
The OCID of dedicated VM host.
Dedicated VM hosts can be used when launching individual instances from an instance configuration. They
cannot be used to launch instance pools.
:param dedicated_vm_host_id: The dedicated_vm_host_id of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
self._dedicated_vm_host_id = dedicated_vm_host_id
@property
def launch_mode(self):
"""
Gets the launch_mode of this InstanceConfigurationLaunchInstanceDetails.
Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter.
Allowed values for this property are: "NATIVE", "EMULATED", "PARAVIRTUALIZED", "CUSTOM", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The launch_mode of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._launch_mode
@launch_mode.setter
def launch_mode(self, launch_mode):
"""
Sets the launch_mode of this InstanceConfigurationLaunchInstanceDetails.
Specifies the configuration mode for launching virtual machine (VM) instances. The configuration modes are:
* `NATIVE` - VM instances launch with iSCSI boot and VFIO devices. The default value for platform images.
* `EMULATED` - VM instances launch with emulated devices, such as the E1000 network driver and emulated SCSI disk controller.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using VirtIO drivers.
* `CUSTOM` - VM instances launch with custom configuration settings specified in the `LaunchOptions` parameter.
:param launch_mode: The launch_mode of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
allowed_values = ["NATIVE", "EMULATED", "PARAVIRTUALIZED", "CUSTOM"]
if not value_allowed_none_or_none_sentinel(launch_mode, allowed_values):
launch_mode = 'UNKNOWN_ENUM_VALUE'
self._launch_mode = launch_mode
@property
def launch_options(self):
"""
Gets the launch_options of this InstanceConfigurationLaunchInstanceDetails.
:return: The launch_options of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationLaunchOptions
"""
return self._launch_options
@launch_options.setter
def launch_options(self, launch_options):
"""
Sets the launch_options of this InstanceConfigurationLaunchInstanceDetails.
:param launch_options: The launch_options of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationLaunchOptions
"""
self._launch_options = launch_options
@property
def agent_config(self):
"""
Gets the agent_config of this InstanceConfigurationLaunchInstanceDetails.
:return: The agent_config of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationLaunchInstanceAgentConfigDetails
"""
return self._agent_config
@agent_config.setter
def agent_config(self, agent_config):
"""
Sets the agent_config of this InstanceConfigurationLaunchInstanceDetails.
:param agent_config: The agent_config of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationLaunchInstanceAgentConfigDetails
"""
self._agent_config = agent_config
@property
def is_pv_encryption_in_transit_enabled(self):
"""
Gets the is_pv_encryption_in_transit_enabled of this InstanceConfigurationLaunchInstanceDetails.
Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
:return: The is_pv_encryption_in_transit_enabled of this InstanceConfigurationLaunchInstanceDetails.
:rtype: bool
"""
return self._is_pv_encryption_in_transit_enabled
@is_pv_encryption_in_transit_enabled.setter
def is_pv_encryption_in_transit_enabled(self, is_pv_encryption_in_transit_enabled):
"""
Sets the is_pv_encryption_in_transit_enabled of this InstanceConfigurationLaunchInstanceDetails.
Whether to enable in-transit encryption for the data volume's paravirtualized attachment. The default value is false.
:param is_pv_encryption_in_transit_enabled: The is_pv_encryption_in_transit_enabled of this InstanceConfigurationLaunchInstanceDetails.
:type: bool
"""
self._is_pv_encryption_in_transit_enabled = is_pv_encryption_in_transit_enabled
@property
def preferred_maintenance_action(self):
"""
Gets the preferred_maintenance_action of this InstanceConfigurationLaunchInstanceDetails.
The preferred maintenance action for an instance. The default is LIVE_MIGRATE, if live migration is supported.
* `LIVE_MIGRATE` - Run maintenance using a live migration.
* `REBOOT` - Run maintenance using a reboot.
Allowed values for this property are: "LIVE_MIGRATE", "REBOOT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The preferred_maintenance_action of this InstanceConfigurationLaunchInstanceDetails.
:rtype: str
"""
return self._preferred_maintenance_action
@preferred_maintenance_action.setter
def preferred_maintenance_action(self, preferred_maintenance_action):
"""
Sets the preferred_maintenance_action of this InstanceConfigurationLaunchInstanceDetails.
The preferred maintenance action for an instance. The default is LIVE_MIGRATE, if live migration is supported.
* `LIVE_MIGRATE` - Run maintenance using a live migration.
* `REBOOT` - Run maintenance using a reboot.
:param preferred_maintenance_action: The preferred_maintenance_action of this InstanceConfigurationLaunchInstanceDetails.
:type: str
"""
allowed_values = ["LIVE_MIGRATE", "REBOOT"]
if not value_allowed_none_or_none_sentinel(preferred_maintenance_action, allowed_values):
preferred_maintenance_action = 'UNKNOWN_ENUM_VALUE'
self._preferred_maintenance_action = preferred_maintenance_action
@property
def instance_options(self):
"""
Gets the instance_options of this InstanceConfigurationLaunchInstanceDetails.
:return: The instance_options of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationInstanceOptions
"""
return self._instance_options
@instance_options.setter
def instance_options(self, instance_options):
"""
Sets the instance_options of this InstanceConfigurationLaunchInstanceDetails.
:param instance_options: The instance_options of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationInstanceOptions
"""
self._instance_options = instance_options
@property
def availability_config(self):
"""
Gets the availability_config of this InstanceConfigurationLaunchInstanceDetails.
:return: The availability_config of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.InstanceConfigurationAvailabilityConfig
"""
return self._availability_config
@availability_config.setter
def availability_config(self, availability_config):
"""
Sets the availability_config of this InstanceConfigurationLaunchInstanceDetails.
:param availability_config: The availability_config of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.InstanceConfigurationAvailabilityConfig
"""
self._availability_config = availability_config
@property
def preemptible_instance_config(self):
"""
Gets the preemptible_instance_config of this InstanceConfigurationLaunchInstanceDetails.
:return: The preemptible_instance_config of this InstanceConfigurationLaunchInstanceDetails.
:rtype: oci.core.models.PreemptibleInstanceConfigDetails
"""
return self._preemptible_instance_config
@preemptible_instance_config.setter
def preemptible_instance_config(self, preemptible_instance_config):
"""
Sets the preemptible_instance_config of this InstanceConfigurationLaunchInstanceDetails.
:param preemptible_instance_config: The preemptible_instance_config of this InstanceConfigurationLaunchInstanceDetails.
:type: oci.core.models.PreemptibleInstanceConfigDetails
"""
self._preemptible_instance_config = preemptible_instance_config
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 43.02029 | 245 | 0.708013 |
795b68083931d595ec6a4ff8b5164bf60a023a58 | 4,339 | py | Python | NNet_Core/NN_Analyzer.py | mingrui/Deep_MRI_brain_extraction | 96690eaddef28de5a2d0e896d2a524f17245c16e | [
"MIT"
] | 1 | 2021-07-17T08:19:42.000Z | 2021-07-17T08:19:42.000Z | NNet_Core/NN_Analyzer.py | neurofractal/Deep_MRI_brain_extraction | 96690eaddef28de5a2d0e896d2a524f17245c16e | [
"MIT"
] | null | null | null | NNet_Core/NN_Analyzer.py | neurofractal/Deep_MRI_brain_extraction | 96690eaddef28de5a2d0e896d2a524f17245c16e | [
"MIT"
] | 1 | 2019-10-22T20:21:10.000Z | 2019-10-22T20:21:10.000Z | """
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 Gregor Urban, Jens Kleesiek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import theano
import theano.tensor as T
import numpy as np
class Analyzer(object):
def __init__(self, cnn):
self._cnn = cnn
self._ranonce = False
self._ranonce2 = False
####################
def _runonce(self):
if self._ranonce:
return
print self,'compiling...'
self._output_function = theano.function([self._cnn.layers[0].input], [lay.output for lay in self._cnn.layers])
self._ranonce=True
####################
def _runonce2(self):
if self._ranonce2:
return
print self,'compiling...'
output_layer_Gradients = T.grad(self._cnn.output_layer_Loss, self._cnn.params, disconnected_inputs="warn")
self._output_function2 = theano.function([self._cnn.x, self._cnn.y], [x for x in output_layer_Gradients], on_unused_input='warn')
# = theano.function([self._cnn.layers[0].input, self._cnn.y], [lay.output for lay in self._cnn.layers])
self._ranonce2=True
####################
def analyze_forward_pass(self, *input):
""" input should be a list of all inputs. ((DO NOT INCLUDE labels/targets!))"""
self._runonce()
outputs = self._output_function(*input)
print
print 'Analyzing internal outputs of network',self._cnn,' (I am',self,') ... '
for lay,out in zip(self._cnn.layers, outputs):
mi,ma = np.min(out), np.max(out)
mea,med = np.mean(out),np.median(out)
std = np.std(out)
print '{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std)
print
return outputs
####################
def analyze_gradients(self, *input):
""" input should be a list of all inputs and labels/targets"""
self._runonce2()
outputs = self._output_function2(*input)
print
print 'Analyzing internal gradients of network',self._cnn,' (I am',self,') ... '
i = 0
j = 0
for lay in self._cnn.layers:
try:
j = len(lay.params)
except:
j = 0
if j:
for out in outputs[i:i+j]:
mi,ma = np.min(out), np.max(out)
mea,med = np.mean(out),np.median(out)
std = np.std(out)
print '{:^100}: {:^30}, min/max = [{:9.5f}, {:9.5f}], mean/median = ({:9.5f}, {:9.5f}), std = {:9.5f}'.format(lay,out.shape,mi,ma,mea,med,std)
else:
print '{:^100}: no parameters'.format(lay)
i+=j
print
return outputs
####################
| 37.730435 | 162 | 0.608666 |
795b68f81bf3aadbdcd53d9aa1794185f7218f31 | 2,037 | py | Python | saleor/graphql/shop/schema.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 1,392 | 2021-10-06T15:54:28.000Z | 2022-03-31T20:50:55.000Z | saleor/graphql/shop/schema.py | eanknd/saleor | 08aa724176be00d7aaf654f14e9ae99dd4327f97 | [
"CC-BY-4.0"
] | 888 | 2021-10-06T10:48:54.000Z | 2022-03-31T11:00:30.000Z | saleor/graphql/shop/schema.py | gustavoarmoa/saleor | f81b2f347e4c7a624cd68a1eca3b0a5611498f6e | [
"CC-BY-4.0"
] | 538 | 2021-10-07T16:21:27.000Z | 2022-03-31T22:58:57.000Z | import graphene
from ...core.permissions import GiftcardPermissions, OrderPermissions
from ..core.fields import PermissionsField
from ..translations.mutations import ShopSettingsTranslate
from .mutations import (
GiftCardSettingsUpdate,
OrderSettingsUpdate,
ShopAddressUpdate,
ShopDomainUpdate,
ShopFetchTaxRates,
ShopSettingsUpdate,
StaffNotificationRecipientCreate,
StaffNotificationRecipientDelete,
StaffNotificationRecipientUpdate,
)
from .types import GiftCardSettings, OrderSettings, Shop
class ShopQueries(graphene.ObjectType):
shop = graphene.Field(
Shop,
description="Return information about the shop.",
required=True,
)
order_settings = PermissionsField(
OrderSettings,
description="Order related settings from site settings.",
permissions=[OrderPermissions.MANAGE_ORDERS],
)
gift_card_settings = PermissionsField(
GiftCardSettings,
description="Gift card related settings from site settings.",
required=True,
permissions=[GiftcardPermissions.MANAGE_GIFT_CARD],
)
def resolve_shop(self, _info):
return Shop()
def resolve_order_settings(self, info):
return info.context.site.settings
def resolve_gift_card_settings(self, info):
return info.context.site.settings
class ShopMutations(graphene.ObjectType):
staff_notification_recipient_create = StaffNotificationRecipientCreate.Field()
staff_notification_recipient_update = StaffNotificationRecipientUpdate.Field()
staff_notification_recipient_delete = StaffNotificationRecipientDelete.Field()
shop_domain_update = ShopDomainUpdate.Field()
shop_settings_update = ShopSettingsUpdate.Field()
shop_fetch_tax_rates = ShopFetchTaxRates.Field()
shop_settings_translate = ShopSettingsTranslate.Field()
shop_address_update = ShopAddressUpdate.Field()
order_settings_update = OrderSettingsUpdate.Field()
gift_card_settings_update = GiftCardSettingsUpdate.Field()
| 33.393443 | 82 | 0.762887 |
795b693640b8ecd8555953c652b9f9b700e5c9dc | 14,181 | py | Python | servertools/camera.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | null | null | null | servertools/camera.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | null | null | null | servertools/camera.py | barretobrock/server-tools | 2f2b899994df90817686b2232d3d65e53defd8c2 | [
"MIT"
] | null | null | null | import os
import re
from datetime import (
datetime as dt,
timedelta
)
import requests
from requests.auth import HTTPDigestAuth
from requests.exceptions import ConnectionError
from typing import (
Optional,
List,
Dict,
Union
)
import amcrest
from loguru import logger
from reolink_api import Camera
from kavalkilu import Keys
# TODO:
# - add get_dimensions (sub or main stream) methods to both classes
# - add means of drawing motion on captured frames inside each class
# - include an option to clip only frames that have motion with a bit of buffer
class Reolink(Camera):
"""Wrapper for Reolink's Camera class"""
def __init__(self, ip: str, parent_log: logger = None):
self.ip = ip
self.logg = parent_log.bind(child_name=self.__class__.__name__)
creds = Keys().get_key('webcam')
super().__init__(self.ip, username=creds['un'], password=creds['pw'])
def snapshot(self, filepath: str) -> bool:
"""Takes a snapshot - mirrors the similar method in Amcrest,
though these POE cameras seem to be more stable with regards to connectivity"""
self.logg.debug('Taking snapshot...')
img = self.get_snap()
img.save(filepath)
return True
def get_dimensions(self, stream: str = 'sub') -> List[int]:
"""Gets the video dimensions of the camera's stream"""
dims = self.get_recording_encoding()[0]['initial']['Enc'][f'{stream.lower()}Stream']['size']
# Split by '*', convert to int
dims = [int(x) for x in dims.split('*')]
return dims
class Amcrest:
"""Amcrest camera-related methods"""
camera_types = {
'DB': 'doorbell',
'IPC': 'ip_cam'
}
def __init__(self, ip: str, port: int = 80, parent_log: logger = None):
self.ip = ip
self.logg = parent_log.bind(child_name=self.__class__.__name__)
self.creds = Keys().get_key('webcam')
self.base_url = f'http://{ip}/cgi-bin'
self.base_url_with_cred = f'http://{self.creds["un"]}:{self.creds["pw"]}@{ip}/cgi-bin'
self.config_url = f'{self.base_url}/configManager.cgi?action=setConfig'
try:
self.camera = amcrest.AmcrestCamera(ip, port, self.creds['un'], self.creds['pw']).camera
self.is_connected = True
name = re.search(r'(?<=Name=).*(?=\r)', self.camera.video_channel_title).group()
model = re.search(r'(?<=type=).*(?=\r)', self.camera.device_type).group()
camera_type = re.search(r'(?<=class=).*(?=\r)', self.camera.device_class).group()
except (ConnectionError, amcrest.exceptions.CommError) as e:
self.camera = None
self.is_connected = False
name = model = camera_type = 'unknown'
if self.camera is not None:
if camera_type in self.camera_types.keys():
self.camera_type = self.camera_types[camera_type]
else:
self.camera_type = 'other'
self.is_armed = self.camera.is_motion_detector_on()
self.is_ptz_enabled = self._check_for_ptz()
else:
self.camera_type = 'other'
self.name = name.lower()
self.model = model.lower()
def _check_for_ptz(self) -> bool:
"""Checks if camera is capable of ptz actions"""
try:
return True if self.camera.ptz_presets_list() != '' else False
except amcrest.exceptions.CommError:
return False
def _send_request(self, req_str: str):
"""Sends an HTTP request"""
result = requests.get(req_str, auth=HTTPDigestAuth(self.creds['un'], self.creds['pw']))
if result.status_code != 200:
raise Exception('Error in HTTP GET response. Status code: '
f'{result.status_code}, Message: {result.text}')
def toggle_motion(self, set_motion: bool = True):
"""Sets motion detection"""
if self.camera is None or not self.is_connected:
return None
if self.is_armed == set_motion:
# State is already where we wanted it to be; no need to change
return None
motion_val = 'true' if set_motion else 'false'
motion_url = f'{self.config_url}&MotionDetect[0].Enable={motion_val}'
self._send_request(motion_url)
def set_ptz_flag(self, armed: bool):
"""Orients PTZ-enabled cameras either to armed position (1) or disarmed (2)"""
if self.is_ptz_enabled:
# This is likely PTZ-enabled
# Set to target flag
preset_pt = 1 if armed else 2
resp = self.camera.go_to_preset(action='start', preset_point_number=preset_pt)
if resp[:2] != 'OK':
# Something went wrong. Raise exception so it gets logged
raise Exception(f'Camera "{self.name}" PTZ call '
f'saw unexpected response: "{resp}"')
def get_current_ptz_coordinates(self) -> Optional[str]:
"""Gets the current xyz coordinates for a PTZ-enabled camera"""
if self.is_ptz_enabled:
ptz_list = self.camera.ptz_status().split('\r\n')[2:5]
return ','.join([x.split('=')[1] for x in ptz_list])
def arm_camera(self, armed: bool = True):
"""Wrapper method that both arms the motion detection setting
as well as orients a PTZ enabled camera to the ARMED position"""
if self.camera is None:
return None
self.toggle_motion(armed)
if self.is_ptz_enabled:
self.set_ptz_flag(armed)
def snapshot(self, filepath: str) -> bool:
"""Takes a snapshot using the main stream (0)"""
self.logg.debug('Getting snapshot...')
res = self.camera.snapshot(0, filepath)
self.logg.debug(f'Response from snapshot: {res.status}')
if res.status != 200:
return False
return True
@staticmethod
def _consolidate_events(events: List[Dict[str, Union[str, dt]]], limit_s: int = 60,
default_s: int = 60) -> Optional[List[Dict[str, dt]]]:
"""Takes in a list of motion events and consolidates them if they're within range of each other
Args:
limit_s: limit in seconds, after which two events are actually considered separate
default_s: if no start/end time provided, the end with be this amount of seconds
added to the missing start/end time
"""
# First step is to pair event starts and ends
new_event = {}
new_events = []
for event in events:
if all([x in new_event.keys() for x in ['start', 'end']]):
# All keys have been added. Append to the list
new_events.append(new_event)
new_event = {}
if len(new_event.keys()) == 0:
# New dict
if event['type'] == 'Event End':
# Event end before begin; this likely means a motion event started
# before our time range. Use default lookbehind to estimate the event start
new_event['start'] = event['time'] - timedelta(seconds=default_s)
start_or_end = 'start' if 'Begin' in event['type'] else 'end'
# Populate common parts of event info
new_event.update({
start_or_end: event['time'],
'region': event['detail.region-name'].lower(),
'channel': int(event['detail.channel-no.']),
'event-type': event['detail.event-type'].lower()
})
if len(new_event) != 0:
# Make sure we also have an end to this last event
if 'end' not in new_event.keys():
new_event['end'] = new_event['start'] + timedelta(seconds=default_s)
new_events.append(new_event)
# Now combine individual events if they occur within {limit_s} to each other
combi_event = {'event-list': []}
combi_events = []
prev_event_end = None
if len(new_events) == 0:
return []
for event in new_events:
# Calculate the diff
if prev_event_end is not None:
diff = (event['start'] - prev_event_end).seconds
else:
# First event
diff = 0
# Compare diff; determine whether to combine
if diff <= limit_s:
# Combine current start and previous end
combi_event['event-list'].append(event)
else:
# diff exceeds limit; split into another combi event
combi_event.update({
'start': min([x['start'] for x in combi_event['event-list']]),
'end': max([x['end'] for x in combi_event['event-list']])
})
combi_events.append(combi_event)
# Reset dict
combi_event = {
'event-list': [event]
}
prev_event_end = event['end']
if len(combi_event['event-list']) > 0:
# Info remaining in combi_event
combi_event.update({
'start': min([x['start'] for x in combi_event['event-list']]),
'end': max([x['end'] for x in combi_event['event-list']])
})
combi_events.append(combi_event)
return combi_events
def get_motion_log(self, start_dt: dt, end_dt: dt) -> List[dict]:
"""Returns log of motion detection events between two timestamps"""
# Get logs for given range
# Amcrest does a kind of tokenization that allows us to grab
# logs in batches of 100. Tokens seem to be just sequential ints
# and are not page numbers! Once the end of the log is reached,
# the 'found' variable will be 0.
raw_token = self.camera.log_find_start(start_dt, end_dt)
token = re.search(r'(?!token=)\d+', raw_token).group(0)
events = []
item_dict = {}
cur_item_no = 0
while True:
log_batch = self.camera.log_find_next(token, count=100)
raw_logs = log_batch.split('\r\n')
batch_size = int(re.search(r'(?!found=)\d+', log_batch).group(0))
if batch_size == 0:
break
# Sift through logs, build out events
for logstr in raw_logs:
# Make sure we're getting an item and not any other info
if re.search(r'(?<=items\[)\d+', logstr):
# Get item number
item_no = int(re.search(r'(?<=items)\[(\d+)]', logstr).group(1))
# Get & clean the name of the item
item_name = re.search(r'(?<=]\.).*(?==)', logstr).group(0).lower().replace(' ', '-')
item_name = re.sub(r'\[\d+]', '', item_name)
# The value after the item name
item_value = re.search(r'(?<==).*', logstr).group(0)
if item_name == 'time':
# Convert to datetime
item_value = dt.strptime(item_value, '%Y-%m-%d %H:%M:%S')
if item_no != cur_item_no:
# New item - move item dict to events and initiate new one
events.append(item_dict)
item_dict = {item_name: item_value}
cur_item_no = item_no
else:
# Same item - add to existing dict
item_dict[item_name] = item_value
# Of the events that are motion related
mevents = [x for x in events if x.get('detail.event-type', '') == 'Motion Detect']
# Reverse the order of events so they're chronological
mevents.reverse()
return self._consolidate_events(mevents)
@staticmethod
def extract_timestamp(fpath: str) -> str:
"""Extracts a timestamp from the filepath"""
final = []
regex = [
r'(?<=\/sd\/)\d{4}(-\d{2}){2}',
r'(?<=\/dav\/\d{2}\/)\d{2}(\.\d{2}){2}-\d{2}(\.\d{2}){2}'
]
for rgx in regex:
match = re.search(rgx, fpath)
if match is not None:
final.append(match.group())
return '_'.join(final).replace('.', ':')
def download_files_from_range(self, start_dt: dt, end_dt: dt,
temp_dir: str) -> List[dict]:
"""Downloads mp4 files from a set datetime range"""
file_dicts = []
for text in self.camera.find_files(start_dt, end_dt):
for line in text.split('\r\n'):
key, value = list(line.split('=', 1) + [None])[:2]
if key.endswith('.FilePath'):
if value.endswith('.mp4'):
ts = self.extract_timestamp(value)
dt_objs = []
date_dt = dt.strptime(ts.split('_')[0], '%Y-%m-%d')
for t in ts.split('_')[1].split('-'):
dt_objs.append(dt.combine(date_dt, dt.strptime(t, '%H:%M:%S').time()))
new_filename = f'{ts}.mp4'
fpath = os.path.join(temp_dir, new_filename)
file_dicts.append({
'start': dt_objs[0],
'end': dt_objs[1],
'path': fpath
})
with open(fpath, 'wb') as f:
f.write(self.camera.download_file(value))
return file_dicts
def get_video_stream(self, channel: int = 0, subtype: int = 1) -> str:
"""
Outputs the video stream url
Args:
channel: int, which channel to use. default = 0
subtype: int, stream type to use. 0 = main, 1 = extra_1, etc
default = 1
Returns:
str, the url to the stream
"""
return f'{self.base_url_with_cred}/mjpg/video.cgi?channel={channel}&subtype={subtype}'
| 42.713855 | 104 | 0.549044 |
795b698a88fe41e8570294c07c5926f21800b84d | 4,770 | py | Python | euler54.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | euler54.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | euler54.py | dchourasia/euler-solutions | e20cbf016a9ea601fcce928d9690930c9a498837 | [
"Apache-2.0"
] | null | null | null | '''
The file, poker.txt, contains one-thousand random hands dealt to two players. Each line of the file contains ten cards (separated by a single space): the first five are Player 1's cards and the last five are Player 2's cards. You can assume that all hands are valid (no invalid characters or repeated cards), each player's hand is in no specific order, and in each hand there is a clear winner.
High Card: Highest value card.
One Pair: Two cards of the same value.
Two Pairs: Two different pairs.
Three of a Kind: Three cards of the same value.
Straight: All cards are consecutive values.
Flush: All cards of the same suit.
Full House: Three of a kind and a pair.
Four of a Kind: Four cards of the same value.
Straight Flush: All cards are consecutive values of same suit.
Royal Flush: Ten, Jack, Queen, King, Ace, in same suit.
How many hands does Player 1 win?
'''
values = {}
for x in range(2, 10):
values[str(x)] = x
values['T'] = 10
values['J'] = 11
values['K'] = 12
values['Q'] = 13
values['A'] = 14
data = open('p054_poker.txt').readlines()
print(len(data))
def get_cards(line):
res = [[], []]
for i, x in enumerate(line.strip().split(' ')):
y = 0 if i < 5 else 1
res[y].append((values[x[0]], x[1]))
return res
def get_winner(cards):
c1 = sorted(cards[0], key=lambda x: x[0])
c2 = sorted(cards[1], key=lambda x: x[0])
print(c1, c2)
winner = 0
tie = False
winner = eval_winner(c1, c2)
return winner
def is_1_pair(c):
vals = [x[0] for x in c]
cts = set([vals.count(x) for x in set(vals)])
cts_dict = {x:vals.count(x) for x in set(vals)}
h = sorted([x for x, y in cts_dict.items() if y == 2])
return 2 in cts, h
def get_rank(c):
defs = [is_royal_flush, is_straight_flush, is_4_of_a_kind, is_full_house, is_flush, is_straight, is_3_of_a_kind, is_2_pairs, is_1_pair]
rank=100
for i, func in enumerate(defs):
r, h = func(c)
if r:
print('setting rank', i, func.__name__)
if i < 4:
raise Exception('lower ranks found')
rank=i
break
return rank, h
def is_2_pairs(c):
vals = [x[0] for x in c]
cts = [vals.count(x) for x in set(vals)]
cts_dict = {x:vals.count(x) for x in set(vals)}
h = sorted([x for x, y in cts_dict.items() if y == 2])
print('cts', cts)
print('vals', vals)
return [1, 2, 2] == sorted(cts), h
def is_3_of_a_kind(c):
vals = [x[0] for x in c]
cts = set([vals.count(x) for x in set(vals)])
cts_dict = {x:vals.count(x) for x in set(vals)}
h = sorted([x for x, y in cts_dict.items() if y == 3])
return 3 in cts, h
def is_straight(c):
init = c[0][0]
for i, card in enumerate(c[1:]):
if card[0] != init + i + 1:
return False, c[4][0]
return True, c[4][0]
def is_flush(c):
suits = set([x[1] for x in c])
return len(suits) == 1, [c[4][0]]
def is_full_house(c):
vals = [x[0] for x in c]
cts = [vals.count(x) for x in set(vals)]
cts_dict = {x:vals.count(x) for x in set(vals)}
h1 = [x for x, y in cts_dict.items() if y == 3]
h2 = [x for x, y in cts_dict.items() if y == 2]
return [2, 3] == sorted(cts) in cts, h1 + h2
def get_winner_on_highest_card(c1, c2):
print('deciding on higher card')
c1 = sorted(c1, key=lambda x: x[0], reverse=True)
c2 = sorted(c2, key=lambda x: x[0], reverse=True)
winner = 0
for x, y in zip(c1, c2):
print(x[0], y[0])
if x[0] != y[0]:
winner = int(y[0] > x[0]) + 1
break
return winner
def eval_winner( c1, c2):
tie=False
winner = 0
if not winner:
r1, h1 = get_rank(c1)
r2, h2 = get_rank(c2)
print(r1, h1)
print(r2, h2)
if r1 == r2 and r1 != -1:
print('rank tie', r1)
for x, y in zip(h1, h2):
if x != y:
winner = int(y > x) + 1
break
if not winner:
tie=True
elif r1 == r2 == -1:
print('no rank tie')
tie=True
else:
winner = int(r2 < r1) + 1
if tie:
winner = get_winner_on_highest_card(c1, c2)
return winner
def is_4_of_a_kind(c):
vals = [x[0] for x in c]
cts = set([vals.count(x) for x in set(vals)])
cts_dict = {x:vals.count(x) for x in set(vals)}
h = sorted([x for x, y in cts_dict.items() if y == 4])
return 4 in cts, h
def is_straight_flush(c):
s=c[0][1]
for card in c:
if card[1] != s:
return False, [c[4][0]]
init = c[0][0]
for i, card in enumerate(c[1:]):
if card[0] != init + i + 1:
return False, [c[4][0]]
return True, [c[4][0]]
def is_royal_flush(c):
s=c[0][1]
for card in c:
if card[1] != s:
return False, [c[4][0]]
for i, card in enumerate(c):
if card[0] != i + 10:
return False, [c[4][0]]
return True, [c[4][0]]
winners = []
for line in data:
winner = get_winner(get_cards(line))
print(winner)
winners.append(winner)
#print(int(False))
print(winners.count(1))
a = [2, 2, 3, 1]
b = [2, 1, 2]
#x = a.sort()
#print(2 in set(a))
#print(a+b)
#print(all(x in a for x in b))
| 24.587629 | 394 | 0.622642 |
795b69bed0b2d43cbae29793112fbf19ca9b8c92 | 416 | py | Python | sponsors/migrations/0002_auto_20190726_1556.py | pyladiesghana/PyLadies-Website | 558f436f20a6de6e6b5947432f888212d05e1ab6 | [
"MIT"
] | 2 | 2019-08-02T09:39:27.000Z | 2019-12-15T18:49:26.000Z | sponsors/migrations/0002_auto_20190726_1556.py | pyladiesghana/PyLadies-Website | 558f436f20a6de6e6b5947432f888212d05e1ab6 | [
"MIT"
] | 10 | 2020-05-03T07:41:56.000Z | 2022-03-12T00:34:34.000Z | sponsors/migrations/0002_auto_20190726_1556.py | pyladiesghana/PyLadies-Website | 558f436f20a6de6e6b5947432f888212d05e1ab6 | [
"MIT"
] | 1 | 2019-12-27T15:24:24.000Z | 2019-12-27T15:24:24.000Z | # Generated by Django 2.2 on 2019-07-26 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='sponsor_logo',
field=models.ImageField(blank=True, null=True, upload_to='sponsor_pics'),
),
]
| 21.894737 | 85 | 0.610577 |
795b6acc7da0caa16448fbb541c11a70f057703e | 11,604 | py | Python | kickass/tests/functional_tests/proofs.py | trublud/KickAss-Gui | ddbce9cea11a73594b12d020bf4c2935f01e9d88 | [
"BSD-3-Clause"
] | null | null | null | kickass/tests/functional_tests/proofs.py | trublud/KickAss-Gui | ddbce9cea11a73594b12d020bf4c2935f01e9d88 | [
"BSD-3-Clause"
] | null | null | null | kickass/tests/functional_tests/proofs.py | trublud/KickAss-Gui | ddbce9cea11a73594b12d020bf4c2935f01e9d88 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The KickAssCoin Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
"""Test misc proofs (tx key, send, receive, reserve)
"""
from framework.daemon import Daemon
from framework.wallet import Wallet
class ProofsTest():
def run_test(self):
self.reset()
self.mine('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 80)
self.create_wallets()
txid, tx_key, amount = self.transfer()
self.check_tx_key(txid, tx_key, amount)
self.check_tx_proof(txid, amount)
self.check_reserve_proof()
def reset(self):
print('Resetting blockchain')
daemon = Daemon()
daemon.pop_blocks(1000)
daemon.flush_txpool()
def mine(self, address, blocks):
print("Mining some blocks")
daemon = Daemon()
daemon.generateblocks(address, blocks)
def transfer(self):
print('Creating transaction')
self.wallet[0].refresh()
dst = {'address': '44Kbx4sJ7JDRDV5aAhLJzQCjDz2ViLRduE3ijDZu3osWKBjMGkV1XPk4pfDUMqt1Aiezvephdqm6YD19GKFD9ZcXVUTp6BW', 'amount':123456789000}
res = self.wallet[0].transfer([dst], get_tx_key = True)
assert len(res.tx_hash) == 64
assert len(res.tx_key) == 64
daemon = Daemon()
daemon.generateblocks('42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', 1)
return (res.tx_hash, res.tx_key, 123456789000)
def create_wallets(self):
print('Creating wallets')
seeds = [
'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted',
'peeled mixture ionic radar utopia puddle buying illness nuns gadget river spout cavernous bounced paradise drunk looking cottage jump tequila melting went winter adjust spout',
]
self.wallet = [None, None]
for i in range(2):
self.wallet[i] = Wallet(idx = i)
try: self.wallet[i].close_wallet()
except: pass
res = self.wallet[i].restore_deterministic_wallet(seed = seeds[i])
def check_tx_key(self, txid, tx_key, amount):
daemon = Daemon()
print('Checking tx key')
self.wallet[0].refresh()
self.wallet[1].refresh()
sending_address = '42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm'
receiving_address = '44Kbx4sJ7JDRDV5aAhLJzQCjDz2ViLRduE3ijDZu3osWKBjMGkV1XPk4pfDUMqt1Aiezvephdqm6YD19GKFD9ZcXVUTp6BW'
res = self.wallet[0].get_tx_key(txid)
assert res.tx_key == tx_key
res = self.wallet[0].check_tx_key(txid = txid, tx_key = tx_key, address = receiving_address)
assert res.received == amount
assert not res.in_pool
assert res.confirmations == 1
res = self.wallet[1].check_tx_key(txid = txid, tx_key = tx_key, address = receiving_address)
assert res.received == amount
assert not res.in_pool
assert res.confirmations == 1
self.wallet[1].check_tx_key(txid = txid, tx_key = tx_key, address = sending_address)
assert res.received >= 0 # might be change
assert not res.in_pool
assert res.confirmations == 1
ok = False
try: self.wallet[1].check_tx_key(txid = '0' * 64, tx_key = tx_key, address = receiving_address)
except: ok = True
assert ok
res = self.wallet[1].check_tx_key(txid = txid, tx_key = '0' * 64, address = receiving_address)
assert res.received == 0
assert not res.in_pool
assert res.confirmations == 1
def check_tx_proof(self, txid, amount):
daemon = Daemon()
print('Checking tx proof')
self.wallet[0].refresh()
self.wallet[1].refresh()
sending_address = '42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm'
receiving_address = '44Kbx4sJ7JDRDV5aAhLJzQCjDz2ViLRduE3ijDZu3osWKBjMGkV1XPk4pfDUMqt1Aiezvephdqm6YD19GKFD9ZcXVUTp6BW'
res = self.wallet[0].get_tx_proof(txid, sending_address, 'foo');
assert res.signature.startswith('InProof');
signature0i = res.signature
res = self.wallet[0].get_tx_proof(txid, receiving_address, 'bar');
assert res.signature.startswith('OutProof');
signature0o = res.signature
res = self.wallet[1].get_tx_proof(txid, receiving_address, 'baz');
assert res.signature.startswith('InProof');
signature1 = res.signature
res = self.wallet[0].check_tx_proof(txid, sending_address, 'foo', signature0i);
assert res.good
assert res.received > 0 # likely change
assert not res.in_pool
assert res.confirmations == 1
ok = False
try: res = self.wallet[0].check_tx_proof('0' * 64, sending_address, 'foo', signature0i);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, receiving_address, 'foo', signature0i);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, sending_address, '', signature0i);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, sending_address, 'foo', signature1);
except: ok = True
assert ok or not res.good
res = self.wallet[0].check_tx_proof(txid, receiving_address, 'bar', signature0o);
assert res.good
assert res.received == amount
assert not res.in_pool
assert res.confirmations == 1
ok = False
try: res = self.wallet[0].check_tx_proof('0' * 64, receiving_address, 'bar', signature0o);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, sending_address, 'bar', signature0o);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, receiving_address, '', signature0o);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[0].check_tx_proof(txid, receiving_address, 'bar', signature0i);
except: ok = True
assert ok or not res.good
res = self.wallet[1].check_tx_proof(txid, receiving_address, 'baz', signature1);
assert res.good
assert res.received == amount
assert not res.in_pool
assert res.confirmations == 1
ok = False
try: res = self.wallet[1].check_tx_proof('0' * 64, receiving_address, 'baz', signature1);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[1].check_tx_proof(txid, sending_address, 'baz', signature1);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[1].check_tx_proof(txid, receiving_address, '', signature1);
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[1].check_tx_proof(txid, receiving_address, 'baz', signature0o);
except: ok = True
assert ok or not res.good
def check_reserve_proof(self):
daemon = Daemon()
print('Checking reserve proof')
address0 = '42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm'
address1 = '44Kbx4sJ7JDRDV5aAhLJzQCjDz2ViLRduE3ijDZu3osWKBjMGkV1XPk4pfDUMqt1Aiezvephdqm6YD19GKFD9ZcXVUTp6BW'
self.wallet[0].refresh()
res = self.wallet[0].get_balance()
balance0 = res.balance
self.wallet[1].refresh()
res = self.wallet[1].get_balance()
balance1 = res.balance
res = self.wallet[0].get_reserve_proof(all_ = True, message = 'foo')
assert res.signature.startswith('ReserveProof')
signature = res.signature
for i in range(2):
res = self.wallet[i].check_reserve_proof(address = address0, message = 'foo', signature = signature)
assert res.good
assert res.total == balance0
ok = False
try: res = self.wallet[i].check_reserve_proof(address = address0, message = 'bar', signature = signature)
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[i].check_reserve_proof(address = address1, message = 'foo', signature = signature)
except: ok = True
assert ok or not res.good
amount = int(balance0 / 10)
res = self.wallet[0].get_reserve_proof(all_ = False, amount = amount, message = 'foo')
assert res.signature.startswith('ReserveProof')
signature = res.signature
for i in range(2):
res = self.wallet[i].check_reserve_proof(address = address0, message = 'foo', signature = signature)
assert res.good
assert res.total >= amount and res.total <= balance0
ok = False
try: res = self.wallet[i].check_reserve_proof(address = address0, message = 'bar', signature = signature)
except: ok = True
assert ok or not res.good
ok = False
try: res = self.wallet[i].check_reserve_proof(address = address1, message = 'foo', signature = signature)
except: ok = True
assert ok or not res.good
ok = False
try: self.wallet[0].get_reserve_proof(all_ = False, amount = balance0 + 1, message = 'foo')
except: ok = True
assert ok
class Guard:
def __enter__(self):
for i in range(4):
Wallet(idx = i).auto_refresh(False)
def __exit__(self, exc_type, exc_value, traceback):
for i in range(4):
Wallet(idx = i).auto_refresh(True)
if __name__ == '__main__':
with Guard() as guard:
ProofsTest().run_test()
| 40.013793 | 186 | 0.663909 |
795b6b62f9d9f7a6733f199a7ad298536ecc5d7b | 33,877 | py | Python | src/wormhole/test/test_wormhole.py | kneufeld/magic-wormhole | 0977ef02c4e47f278b1d89b12fe36f327da366e0 | [
"MIT"
] | 1 | 2021-06-28T08:12:43.000Z | 2021-06-28T08:12:43.000Z | src/wormhole/test/test_wormhole.py | kneufeld/magic-wormhole | 0977ef02c4e47f278b1d89b12fe36f327da366e0 | [
"MIT"
] | null | null | null | src/wormhole/test/test_wormhole.py | kneufeld/magic-wormhole | 0977ef02c4e47f278b1d89b12fe36f327da366e0 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os, json, re, gc
from binascii import hexlify, unhexlify
import mock
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.defer import Deferred, gatherResults, inlineCallbacks
from .common import ServerBase
from .. import wormhole
from ..errors import (WrongPasswordError, WelcomeError, UsageError,
KeyFormatError)
from spake2 import SPAKE2_Symmetric
from ..timing import DebugTiming
from ..util import (bytes_to_dict, dict_to_bytes,
hexstr_to_bytes, bytes_to_hexstr)
from nacl.secret import SecretBox
APPID = u"appid"
class MockWebSocket:
def __init__(self):
self._payloads = []
def sendMessage(self, payload, is_binary):
assert not is_binary
self._payloads.append(payload)
def outbound(self):
out = []
while self._payloads:
p = self._payloads.pop(0)
out.append(json.loads(p.decode("utf-8")))
return out
def response(w, **kwargs):
payload = json.dumps(kwargs).encode("utf-8")
w._ws_dispatch_response(payload)
class Welcome(unittest.TestCase):
def test_tolerate_no_current_version(self):
w = wormhole._WelcomeHandler(u"relay_url", u"current_cli_version", None)
w.handle_welcome({})
def test_print_motd(self):
w = wormhole._WelcomeHandler(u"relay_url", u"current_cli_version", None)
with mock.patch("sys.stderr") as stderr:
w.handle_welcome({u"motd": u"message of\nthe day"})
self.assertEqual(stderr.method_calls,
[mock.call.write(u"Server (at relay_url) says:\n"
" message of\n the day"),
mock.call.write(u"\n")])
# motd can be displayed multiple times
with mock.patch("sys.stderr") as stderr2:
w.handle_welcome({u"motd": u"second message"})
self.assertEqual(stderr2.method_calls,
[mock.call.write(u"Server (at relay_url) says:\n"
" second message"),
mock.call.write(u"\n")])
def test_current_version(self):
w = wormhole._WelcomeHandler(u"relay_url", u"2.0", None)
with mock.patch("sys.stderr") as stderr:
w.handle_welcome({u"current_cli_version": u"2.0"})
self.assertEqual(stderr.method_calls, [])
with mock.patch("sys.stderr") as stderr:
w.handle_welcome({u"current_cli_version": u"3.0"})
exp1 = (u"Warning: errors may occur unless both sides are"
" running the same version")
exp2 = (u"Server claims 3.0 is current, but ours is 2.0")
self.assertEqual(stderr.method_calls,
[mock.call.write(exp1),
mock.call.write(u"\n"),
mock.call.write(exp2),
mock.call.write(u"\n"),
])
# warning is only displayed once
with mock.patch("sys.stderr") as stderr:
w.handle_welcome({u"current_cli_version": u"3.0"})
self.assertEqual(stderr.method_calls, [])
def test_non_release_version(self):
w = wormhole._WelcomeHandler(u"relay_url", u"2.0-dirty", None)
with mock.patch("sys.stderr") as stderr:
w.handle_welcome({u"current_cli_version": u"3.0"})
self.assertEqual(stderr.method_calls, [])
def test_signal_error(self):
se = mock.Mock()
w = wormhole._WelcomeHandler(u"relay_url", u"2.0", se)
w.handle_welcome({})
self.assertEqual(se.mock_calls, [])
w.handle_welcome({u"error": u"oops"})
self.assertEqual(len(se.mock_calls), 1)
self.assertEqual(len(se.mock_calls[0][1]), 2) # posargs
we = se.mock_calls[0][1][0]
self.assertIsInstance(we, WelcomeError)
self.assertEqual(we.args, (u"oops",))
mood = se.mock_calls[0][1][1]
self.assertEqual(mood, u"unwelcome")
# alas WelcomeError instances don't compare against each other
#self.assertEqual(se.mock_calls, [mock.call(WelcomeError(u"oops"))])
class InputCode(unittest.TestCase):
def test_list(self):
send_command = mock.Mock()
ic = wormhole._InputCode(None, u"prompt", 2, send_command,
DebugTiming())
d = ic._list()
self.assertNoResult(d)
self.assertEqual(send_command.mock_calls, [mock.call(u"list")])
ic._response_handle_nameplates({u"type": u"nameplates",
u"nameplates": [{u"id": u"123"}]})
res = self.successResultOf(d)
self.assertEqual(res, [u"123"])
class GetCode(unittest.TestCase):
def test_get(self):
send_command = mock.Mock()
gc = wormhole._GetCode(2, send_command, DebugTiming())
d = gc.go()
self.assertNoResult(d)
self.assertEqual(send_command.mock_calls, [mock.call(u"allocate")])
# TODO: nameplate attributes get added and checked here
gc._response_handle_allocated({u"type": u"allocated",
u"nameplate": u"123"})
code = self.successResultOf(d)
self.assertIsInstance(code, type(u""))
self.assert_(code.startswith(u"123-"))
pieces = code.split(u"-")
self.assertEqual(len(pieces), 3) # nameplate plus two words
self.assert_(re.search(r'^\d+-\w+-\w+$', code), code)
class Basic(unittest.TestCase):
def tearDown(self):
# flush out any errorful Deferreds left dangling in cycles
gc.collect()
def check_out(self, out, **kwargs):
# Assert that each kwarg is present in the 'out' dict. Ignore other
# keys ('msgid' in particular)
for key, value in kwargs.items():
self.assertIn(key, out)
self.assertEqual(out[key], value, (out, key, value))
def check_outbound(self, ws, types):
out = ws.outbound()
self.assertEqual(len(out), len(types), (out, types))
for i,t in enumerate(types):
self.assertEqual(out[i][u"type"], t, (i,t,out))
return out
def make_pake(self, code, side, msg1):
sp2 = SPAKE2_Symmetric(wormhole.to_bytes(code),
idSymmetric=wormhole.to_bytes(APPID))
msg2 = sp2.start()
key = sp2.finish(msg1)
return key, msg2
def test_create(self):
wormhole._Wormhole(APPID, u"relay_url", reactor, None, None)
def test_basic(self):
# We don't call w._start(), so this doesn't create a WebSocket
# connection. We provide a mock connection instead. If we wanted to
# exercise _connect, we'd mock out WSFactory.
# w._connect = lambda self: None
# w._event_connected(mock_ws)
# w._event_ws_opened()
# w._ws_dispatch_response(payload)
timing = DebugTiming()
with mock.patch("wormhole.wormhole._WelcomeHandler") as wh_c:
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
wh = wh_c.return_value
self.assertEqual(w._ws_url, u"relay_url")
self.assertTrue(w._flag_need_nameplate)
self.assertTrue(w._flag_need_to_build_msg1)
self.assertTrue(w._flag_need_to_send_PAKE)
v = w.verify()
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
out = ws.outbound()
self.assertEqual(len(out), 0)
w._event_ws_opened(None)
out = ws.outbound()
self.assertEqual(len(out), 1)
self.check_out(out[0], type=u"bind", appid=APPID, side=w._side)
self.assertIn(u"id", out[0])
# WelcomeHandler should get called upon 'welcome' response. Its full
# behavior is exercised in 'Welcome' above.
WELCOME = {u"foo": u"bar"}
response(w, type="welcome", welcome=WELCOME)
self.assertEqual(wh.mock_calls, [mock.call.handle_welcome(WELCOME)])
# because we're connected, setting the code also claims the mailbox
CODE = u"123-foo-bar"
w.set_code(CODE)
self.assertFalse(w._flag_need_to_build_msg1)
out = ws.outbound()
self.assertEqual(len(out), 1)
self.check_out(out[0], type=u"claim", nameplate=u"123")
# the server reveals the linked mailbox
response(w, type=u"claimed", mailbox=u"mb456")
# that triggers event_learned_mailbox, which should send open() and
# PAKE
self.assertEqual(w._mailbox_state, wormhole.OPEN)
out = ws.outbound()
self.assertEqual(len(out), 2)
self.check_out(out[0], type=u"open", mailbox=u"mb456")
self.check_out(out[1], type=u"add", phase=u"pake")
self.assertNoResult(v)
# server echoes back all "add" messages
response(w, type=u"message", phase=u"pake", body=out[1][u"body"],
side=w._side)
self.assertNoResult(v)
# extract our outbound PAKE message
body = bytes_to_dict(hexstr_to_bytes(out[1][u"body"]))
msg1 = hexstr_to_bytes(body[u"pake_v1"])
# next we build the simulated peer's PAKE operation
side2 = w._side + u"other"
key, msg2 = self.make_pake(CODE, side2, msg1)
payload = {u"pake_v1": bytes_to_hexstr(msg2)}
body_hex = bytes_to_hexstr(dict_to_bytes(payload))
response(w, type=u"message", phase=u"pake", body=body_hex, side=side2)
# hearing the peer's PAKE (msg2) makes us release the nameplate, send
# the confirmation message, and sends any queued phase messages. It
# doesn't deliver the verifier because we're still waiting on the
# confirmation message.
self.assertFalse(w._flag_need_to_see_mailbox_used)
self.assertEqual(w._key, key)
out = ws.outbound()
self.assertEqual(len(out), 2, out)
self.check_out(out[0], type=u"release")
self.check_out(out[1], type=u"add", phase=u"version")
self.assertNoResult(v)
# hearing a valid confirmation message doesn't throw an error
plaintext = json.dumps({}).encode("utf-8")
data_key = w._derive_phase_key(side2, u"version")
confmsg = w._encrypt_data(data_key, plaintext)
version2_hex = hexlify(confmsg).decode("ascii")
response(w, type=u"message", phase=u"version", body=version2_hex,
side=side2)
# and it releases the verifier
verifier = self.successResultOf(v)
self.assertEqual(verifier,
w.derive_key(u"wormhole:verifier", SecretBox.KEY_SIZE))
# an outbound message can now be sent immediately
w.send(b"phase0-outbound")
out = ws.outbound()
self.assertEqual(len(out), 1)
self.check_out(out[0], type=u"add", phase=u"0")
# decrypt+check the outbound message
p0_outbound = unhexlify(out[0][u"body"].encode("ascii"))
msgkey0 = w._derive_phase_key(w._side, u"0")
p0_plaintext = w._decrypt_data(msgkey0, p0_outbound)
self.assertEqual(p0_plaintext, b"phase0-outbound")
# get() waits for the inbound message to arrive
md = w.get()
self.assertNoResult(md)
self.assertIn(u"0", w._receive_waiters)
self.assertNotIn(u"0", w._received_messages)
msgkey1 = w._derive_phase_key(side2, u"0")
p0_inbound = w._encrypt_data(msgkey1, b"phase0-inbound")
p0_inbound_hex = hexlify(p0_inbound).decode("ascii")
response(w, type=u"message", phase=u"0", body=p0_inbound_hex,
side=side2)
p0_in = self.successResultOf(md)
self.assertEqual(p0_in, b"phase0-inbound")
self.assertNotIn(u"0", w._receive_waiters)
self.assertIn(u"0", w._received_messages)
# receiving an inbound message will queue it until get() is called
msgkey2 = w._derive_phase_key(side2, u"1")
p1_inbound = w._encrypt_data(msgkey2, b"phase1-inbound")
p1_inbound_hex = hexlify(p1_inbound).decode("ascii")
response(w, type=u"message", phase=u"1", body=p1_inbound_hex,
side=side2)
self.assertIn(u"1", w._received_messages)
self.assertNotIn(u"1", w._receive_waiters)
p1_in = self.successResultOf(w.get())
self.assertEqual(p1_in, b"phase1-inbound")
self.assertIn(u"1", w._received_messages)
self.assertNotIn(u"1", w._receive_waiters)
d = w.close()
self.assertNoResult(d)
out = ws.outbound()
self.assertEqual(len(out), 1)
self.check_out(out[0], type=u"close", mood=u"happy")
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"released")
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"closed")
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
w._ws_closed(True, None, None)
self.assertEqual(self.successResultOf(d), None)
def test_close_wait_0(self):
# Close before the connection is established. The connection still
# gets established, but it is then torn down before sending anything.
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
d = w.close()
self.assertNoResult(d)
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
self.assertNoResult(d)
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_wait_1(self):
# close before even claiming the nameplate
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
d = w.close()
self.check_outbound(ws, [u"bind"])
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
self.assertNoResult(d)
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_wait_2(self):
# Close after claiming the nameplate, but before opening the mailbox.
# The 'claimed' response arrives before we close.
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
CODE = u"123-foo-bar"
w.set_code(CODE)
self.check_outbound(ws, [u"bind", u"claim"])
response(w, type=u"claimed", mailbox=u"mb123")
d = w.close()
self.check_outbound(ws, [u"open", u"add", u"release", u"close"])
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"released")
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"closed")
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
self.assertNoResult(d)
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_wait_3(self):
# close after claiming the nameplate, but before opening the mailbox
# The 'claimed' response arrives after we start to close.
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
CODE = u"123-foo-bar"
w.set_code(CODE)
self.check_outbound(ws, [u"bind", u"claim"])
d = w.close()
response(w, type=u"claimed", mailbox=u"mb123")
self.check_outbound(ws, [u"release"])
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"released")
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
self.assertNoResult(d)
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_wait_4(self):
# close after both claiming the nameplate and opening the mailbox
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
CODE = u"123-foo-bar"
w.set_code(CODE)
response(w, type=u"claimed", mailbox=u"mb456")
self.check_outbound(ws, [u"bind", u"claim", u"open", u"add"])
d = w.close()
self.check_outbound(ws, [u"release", u"close"])
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"released")
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"closed")
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_wait_5(self):
# close after claiming the nameplate, opening the mailbox, then
# releasing the nameplate
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
CODE = u"123-foo-bar"
w.set_code(CODE)
response(w, type=u"claimed", mailbox=u"mb456")
w._key = b""
msgkey = w._derive_phase_key(u"side2", u"misc")
p1_inbound = w._encrypt_data(msgkey, b"")
p1_inbound_hex = hexlify(p1_inbound).decode("ascii")
response(w, type=u"message", phase=u"misc", side=u"side2",
body=p1_inbound_hex)
self.check_outbound(ws, [u"bind", u"claim", u"open", u"add",
u"release"])
d = w.close()
self.check_outbound(ws, [u"close"])
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"released")
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [])
response(w, type=u"closed")
self.assertNoResult(d)
self.assertEqual(w._drop_connection.mock_calls, [mock.call()])
w._ws_closed(True, None, None)
self.successResultOf(d)
def test_close_errbacks(self):
# make sure the Deferreds returned by verify() and get() are properly
# errbacked upon close
pass
def test_get_code_mock(self):
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
ws = MockWebSocket() # TODO: mock w._ws_send_command instead
w._event_connected(ws)
w._event_ws_opened(None)
self.check_outbound(ws, [u"bind"])
gc_c = mock.Mock()
gc = gc_c.return_value = mock.Mock()
gc_d = gc.go.return_value = Deferred()
with mock.patch("wormhole.wormhole._GetCode", gc_c):
d = w.get_code()
self.assertNoResult(d)
gc_d.callback(u"123-foo-bar")
code = self.successResultOf(d)
self.assertEqual(code, u"123-foo-bar")
def test_get_code_real(self):
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
self.check_outbound(ws, [u"bind"])
d = w.get_code()
out = ws.outbound()
self.assertEqual(len(out), 1)
self.check_out(out[0], type=u"allocate")
# TODO: nameplate attributes go here
self.assertNoResult(d)
response(w, type=u"allocated", nameplate=u"123")
code = self.successResultOf(d)
self.assertIsInstance(code, type(u""))
self.assert_(code.startswith(u"123-"))
pieces = code.split(u"-")
self.assertEqual(len(pieces), 3) # nameplate plus two words
self.assert_(re.search(r'^\d+-\w+-\w+$', code), code)
# make sure verify() can be called both before and after the verifier is
# computed
def _test_verifier(self, when, order, success):
assert when in ("early", "middle", "late")
assert order in ("key-then-version", "version-then-key")
assert isinstance(success, bool)
#print(when, order, success)
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
w._ws_send_command = mock.Mock()
w._mailbox_state = wormhole.OPEN
side2 = u"side2"
d = None
if success:
w._key = b"key"
else:
w._key = b"wrongkey"
plaintext = json.dumps({}).encode("utf-8")
data_key = w._derive_phase_key(side2, u"version")
confmsg = w._encrypt_data(data_key, plaintext)
w._key = None
if when == "early":
d = w.verify()
self.assertNoResult(d)
if order == "key-then-version":
w._key = b"key"
w._event_established_key()
else:
w._event_received_version(side2, confmsg)
if when == "middle":
d = w.verify()
if d:
self.assertNoResult(d) # still waiting for other msg
if order == "version-then-key":
w._key = b"key"
w._event_established_key()
else:
w._event_received_version(side2, confmsg)
if when == "late":
d = w.verify()
if success:
self.successResultOf(d)
else:
self.assertFailure(d, wormhole.WrongPasswordError)
self.flushLoggedErrors(WrongPasswordError)
def test_verifier(self):
for when in ("early", "middle", "late"):
for order in ("key-then-version", "version-then-key"):
for success in (False, True):
self._test_verifier(when, order, success)
def test_api_errors(self):
# doing things you're not supposed to do
pass
def test_welcome_error(self):
# A welcome message could arrive at any time, with an [error] key
# that should make us halt. In practice, though, this gets sent as
# soon as the connection is established, which limits the possible
# states in which we might see it.
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
self.check_outbound(ws, [u"bind"])
d1 = w.get()
d2 = w.verify()
d3 = w.get_code()
# TODO (tricky): test w.input_code
self.assertNoResult(d1)
self.assertNoResult(d2)
self.assertNoResult(d3)
w._signal_error(WelcomeError(u"you are not actually welcome"), u"pouty")
self.failureResultOf(d1, WelcomeError)
self.failureResultOf(d2, WelcomeError)
self.failureResultOf(d3, WelcomeError)
# once the error is signalled, all API calls should fail
self.assertRaises(WelcomeError, w.send, u"foo")
self.assertRaises(WelcomeError,
w.derive_key, u"foo", SecretBox.KEY_SIZE)
self.failureResultOf(w.get(), WelcomeError)
self.failureResultOf(w.verify(), WelcomeError)
def test_version_error(self):
# we should only receive the "version" message after we receive the
# PAKE message, by which point we should know the key. If the
# confirmation message doesn't decrypt, we signal an error.
timing = DebugTiming()
w = wormhole._Wormhole(APPID, u"relay_url", reactor, None, timing)
w._drop_connection = mock.Mock()
ws = MockWebSocket()
w._event_connected(ws)
w._event_ws_opened(None)
w.set_code(u"123-foo-bar")
response(w, type=u"claimed", mailbox=u"mb456")
d1 = w.get()
d2 = w.verify()
self.assertNoResult(d1)
self.assertNoResult(d2)
out = ws.outbound()
# [u"bind", u"claim", u"open", u"add"]
self.assertEqual(len(out), 4)
self.assertEqual(out[3][u"type"], u"add")
sp2 = SPAKE2_Symmetric(b"", idSymmetric=wormhole.to_bytes(APPID))
msg2 = sp2.start()
payload = {u"pake_v1": bytes_to_hexstr(msg2)}
body_hex = bytes_to_hexstr(dict_to_bytes(payload))
response(w, type=u"message", phase=u"pake", body=body_hex, side=u"s2")
self.assertNoResult(d1)
self.assertNoResult(d2) # verify() waits for confirmation
# sending a random version message will cause a confirmation error
confkey = w.derive_key(u"WRONG", SecretBox.KEY_SIZE)
nonce = os.urandom(wormhole.CONFMSG_NONCE_LENGTH)
badversion = wormhole.make_confmsg(confkey, nonce)
badversion_hex = hexlify(badversion).decode("ascii")
response(w, type=u"message", phase=u"version", body=badversion_hex,
side=u"s2")
self.failureResultOf(d1, WrongPasswordError)
self.failureResultOf(d2, WrongPasswordError)
# once the error is signalled, all API calls should fail
self.assertRaises(WrongPasswordError, w.send, u"foo")
self.assertRaises(WrongPasswordError,
w.derive_key, u"foo", SecretBox.KEY_SIZE)
self.failureResultOf(w.get(), WrongPasswordError)
self.failureResultOf(w.verify(), WrongPasswordError)
# event orderings to exercise:
#
# * normal sender: set_code, send_phase1, connected, claimed, learn_msg2,
# learn_phase1
# * normal receiver (argv[2]=code): set_code, connected, learn_msg1,
# learn_phase1, send_phase1,
# * normal receiver (readline): connected, input_code
# *
# * set_code, then connected
# * connected, receive_pake, send_phase, set_code
class Wormholes(ServerBase, unittest.TestCase):
# integration test, with a real server
def doBoth(self, d1, d2):
return gatherResults([d1, d2], True)
@inlineCallbacks
def test_basic(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code)
w1.send(b"data1")
w2.send(b"data2")
dataX = yield w1.get()
dataY = yield w2.get()
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_same_message(self):
# the two sides use random nonces for their messages, so it's ok for
# both to try and send the same body: they'll result in distinct
# encrypted messages
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code)
w1.send(b"data")
w2.send(b"data")
dataX = yield w1.get()
dataY = yield w2.get()
self.assertEqual(dataX, b"data")
self.assertEqual(dataY, b"data")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_interleaved(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code)
w1.send(b"data1")
dataY = yield w2.get()
self.assertEqual(dataY, b"data1")
d = w1.get()
w2.send(b"data2")
dataX = yield d
self.assertEqual(dataX, b"data2")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_unidirectional(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code)
w1.send(b"data1")
dataY = yield w2.get()
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_early(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w1.send(b"data1")
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
d = w2.get()
w1.set_code(u"123-abc-def")
w2.set_code(u"123-abc-def")
dataY = yield d
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_fixed_code(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
w1.set_code(u"123-purple-elephant")
w2.set_code(u"123-purple-elephant")
w1.send(b"data1"), w2.send(b"data2")
dl = yield self.doBoth(w1.get(), w2.get())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_multiple_messages(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
w1.set_code(u"123-purple-elephant")
w2.set_code(u"123-purple-elephant")
w1.send(b"data1"), w2.send(b"data2")
w1.send(b"data3"), w2.send(b"data4")
dl = yield self.doBoth(w1.get(), w2.get())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
dl = yield self.doBoth(w1.get(), w2.get())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data4")
self.assertEqual(dataY, b"data3")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_wrong_password(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code+"not")
# That's enough to allow both sides to discover the mismatch, but
# only after the confirmation message gets through. API calls that
# don't wait will appear to work until the mismatched confirmation
# message arrives.
w1.send(b"should still work")
w2.send(b"should still work")
# API calls that wait (i.e. get) will errback
yield self.assertFailure(w2.get(), WrongPasswordError)
yield self.assertFailure(w1.get(), WrongPasswordError)
yield w1.close()
yield w2.close()
self.flushLoggedErrors(WrongPasswordError)
@inlineCallbacks
def test_wrong_password_with_spaces(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
code_no_dashes = code.replace('-', ' ')
with self.assertRaises(KeyFormatError) as ex:
w2.set_code(code_no_dashes)
expected_msg = "code (%s) contains spaces." % (code_no_dashes,)
self.assertEqual(expected_msg, str(ex.exception))
yield w1.close()
yield w2.close()
self.flushLoggedErrors(KeyFormatError)
@inlineCallbacks
def test_verifier(self):
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
code = yield w1.get_code()
w2.set_code(code)
v1 = yield w1.verify()
v2 = yield w2.verify()
self.failUnlessEqual(type(v1), type(b""))
self.failUnlessEqual(v1, v2)
w1.send(b"data1")
w2.send(b"data2")
dataX = yield w1.get()
dataY = yield w2.get()
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_versions(self):
# there's no API for this yet, but make sure the internals work
w1 = wormhole.wormhole(APPID, self.relayurl, reactor)
w1._my_versions = {u"w1": 123}
w2 = wormhole.wormhole(APPID, self.relayurl, reactor)
w2._my_versions = {u"w2": 456}
code = yield w1.get_code()
w2.set_code(code)
yield w1.verify()
self.assertEqual(w1._their_versions, {u"w2": 456})
yield w2.verify()
self.assertEqual(w2._their_versions, {u"w1": 123})
yield w1.close()
yield w2.close()
class Errors(ServerBase, unittest.TestCase):
@inlineCallbacks
def test_codes_1(self):
w = wormhole.wormhole(APPID, self.relayurl, reactor)
# definitely too early
self.assertRaises(UsageError, w.derive_key, u"purpose", 12)
w.set_code(u"123-purple-elephant")
# code can only be set once
self.assertRaises(UsageError, w.set_code, u"123-nope")
yield self.assertFailure(w.get_code(), UsageError)
yield self.assertFailure(w.input_code(), UsageError)
yield w.close()
@inlineCallbacks
def test_codes_2(self):
w = wormhole.wormhole(APPID, self.relayurl, reactor)
yield w.get_code()
self.assertRaises(UsageError, w.set_code, u"123-nope")
yield self.assertFailure(w.get_code(), UsageError)
yield self.assertFailure(w.input_code(), UsageError)
yield w.close()
| 37.809152 | 80 | 0.614458 |
795b6bb7249b3b27ae2a19005ca4c04fbff28e19 | 2,260 | py | Python | mldp/tests/transformers/test_chunk_sorter.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | 1 | 2020-10-03T05:23:31.000Z | 2020-10-03T05:23:31.000Z | mldp/tests/transformers/test_chunk_sorter.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | mldp/tests/transformers/test_chunk_sorter.py | prashantlv/mltoolkit | acc192bafc66b7661d541ef4f604b5e5ab7df5ca | [
"MIT"
] | null | null | null | import unittest
from mldp.steps.transformers.general import ChunkSorter
from mldp.utils.tools import DataChunk
import numpy as np
class TestChunkSorter(unittest.TestCase):
def setUp(self):
self.ints_fn = "ints"
self.strings_fn = "strings"
self.floats_fn = "floats"
def test_sorting_by_ints_descending(self):
expected_dc = DataChunk(**{
self.ints_fn: np.array([123, 10, 0]),
self.strings_fn: np.array(["d", "a", "c"]),
self.floats_fn: np.array([15., -1, -10.])
})
actual_dc = self._run_sorter(fn=self.ints_fn, order='descending')
self.assertTrue(expected_dc == actual_dc)
def test_sorting_by_ints_ascending(self):
expected_dc = DataChunk(**{
self.ints_fn: np.array([0, 10, 123]),
self.strings_fn: np.array(["c", "a", "d"]),
self.floats_fn: np.array([-10., -1., 15.])
})
actual_dc = self._run_sorter(fn=self.ints_fn, order='ascending')
self.assertTrue(expected_dc == actual_dc)
def test_sorting_by_strings_descending(self):
expected_dc = DataChunk(**{
self.ints_fn: np.array([123, 0, 10]),
self.strings_fn: np.array(["d", "c", "a"]),
self.floats_fn: np.array([15., -10., -1.])
})
actual_dc = self._run_sorter(fn=self.strings_fn, order='descending')
self.assertTrue(expected_dc == actual_dc)
def test_sorting_by_string_ascending(self):
expected_dc = DataChunk(**{
self.ints_fn: np.array([10, 0, 123]),
self.strings_fn: np.array(["a", "c", "d"]),
self.floats_fn: np.array([-1., -10, 15.])
})
actual_dc = self._run_sorter(fn=self.strings_fn, order='ascending')
self.assertTrue(expected_dc == actual_dc)
def _get_dc(self):
return DataChunk(**{
self.ints_fn: np.array([10, 0, 123]),
self.strings_fn: np.array(["a", "c", "d"]),
self.floats_fn: np.array([-1., -10., 15.])
})
def _run_sorter(self, fn, order):
dc = self._get_dc()
sorter = ChunkSorter(field_name=fn, order=order)
dc = sorter(dc)
return dc
if __name__ == '__main__':
unittest.main()
| 34.769231 | 76 | 0.582301 |
795b6cf71c2304801047cc848c17f9b578d0e72c | 9,242 | py | Python | projects/nerf/train_nerf.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | 1 | 2021-07-22T08:42:09.000Z | 2021-07-22T08:42:09.000Z | projects/nerf/train_nerf.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | null | null | null | projects/nerf/train_nerf.py | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import collections
import os
import pickle
import warnings
import hydra
import numpy as np
import torch
from nerf.dataset import get_nerf_datasets, trivial_collate
from nerf.nerf_renderer import RadianceFieldRenderer, visualize_nerf_outputs
from nerf.stats import Stats
from omegaconf import DictConfig
from visdom import Visdom
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
def main(cfg: DictConfig):
# Set the relevant seeds for reproducibility.
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
# Device on which to run.
if torch.cuda.is_available():
device = "cuda"
else:
warnings.warn(
"Please note that although executing on CPU is supported,"
+ "the training is unlikely to finish in resonable time."
)
device = "cpu"
# Initialize the Radiance Field model.
model = RadianceFieldRenderer(
image_size=cfg.data.image_size,
n_pts_per_ray=cfg.raysampler.n_pts_per_ray,
n_pts_per_ray_fine=cfg.raysampler.n_pts_per_ray,
n_rays_per_image=cfg.raysampler.n_rays_per_image,
min_depth=cfg.raysampler.min_depth,
max_depth=cfg.raysampler.max_depth,
stratified=cfg.raysampler.stratified,
stratified_test=cfg.raysampler.stratified_test,
chunk_size_test=cfg.raysampler.chunk_size_test,
n_harmonic_functions_xyz=cfg.implicit_function.n_harmonic_functions_xyz,
n_harmonic_functions_dir=cfg.implicit_function.n_harmonic_functions_dir,
n_hidden_neurons_xyz=cfg.implicit_function.n_hidden_neurons_xyz,
n_hidden_neurons_dir=cfg.implicit_function.n_hidden_neurons_dir,
n_layers_xyz=cfg.implicit_function.n_layers_xyz,
density_noise_std=cfg.implicit_function.density_noise_std,
)
# Move the model to the relevant device.
model.to(device)
# Init stats to None before loading.
stats = None
optimizer_state_dict = None
start_epoch = 0
checkpoint_path = os.path.join(hydra.utils.get_original_cwd(), cfg.checkpoint_path)
if len(cfg.checkpoint_path) > 0:
# Make the root of the experiment directory.
checkpoint_dir = os.path.split(checkpoint_path)[0]
os.makedirs(checkpoint_dir, exist_ok=True)
# Resume training if requested.
if cfg.resume and os.path.isfile(checkpoint_path):
print(f"Resuming from checkpoint {checkpoint_path}.")
loaded_data = torch.load(checkpoint_path)
model.load_state_dict(loaded_data["model"])
stats = pickle.loads(loaded_data["stats"])
print(f" => resuming from epoch {stats.epoch}.")
optimizer_state_dict = loaded_data["optimizer"]
start_epoch = stats.epoch
# Initialize the optimizer.
optimizer = torch.optim.Adam(
model.parameters(),
lr=cfg.optimizer.lr,
)
# Load the optimizer state dict in case we are resuming.
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
optimizer.last_epoch = start_epoch
# Init the stats object.
if stats is None:
stats = Stats(
["loss", "mse_coarse", "mse_fine", "psnr_coarse", "psnr_fine", "sec/it"],
)
# Learning rate scheduler setup.
# Following the original code, we use exponential decay of the
# learning rate: current_lr = base_lr * gamma ** (epoch / step_size)
def lr_lambda(epoch):
return cfg.optimizer.lr_scheduler_gamma ** (
epoch / cfg.optimizer.lr_scheduler_step_size
)
# The learning rate scheduling is implemented with LambdaLR PyTorch scheduler.
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda, last_epoch=start_epoch - 1, verbose=False
)
# Initialize the cache for storing variables needed for visulization.
visuals_cache = collections.deque(maxlen=cfg.visualization.history_size)
# Init the visualization visdom env.
if cfg.visualization.visdom:
viz = Visdom(
server=cfg.visualization.visdom_server,
port=cfg.visualization.visdom_port,
use_incoming_socket=False,
)
else:
viz = None
# Load the training/validation data.
train_dataset, val_dataset, _ = get_nerf_datasets(
dataset_name=cfg.data.dataset_name,
image_size=cfg.data.image_size,
)
if cfg.data.precache_rays:
# Precache the projection rays.
model.eval()
with torch.no_grad():
for dataset in (train_dataset, val_dataset):
cache_cameras = [e["camera"].to(device) for e in dataset]
cache_camera_hashes = [e["camera_idx"] for e in dataset]
model.precache_rays(cache_cameras, cache_camera_hashes)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=1,
shuffle=True,
num_workers=0,
collate_fn=trivial_collate,
)
# The validation dataloader is just an endless stream of random samples.
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
num_workers=0,
collate_fn=trivial_collate,
sampler=torch.utils.data.RandomSampler(
val_dataset,
replacement=True,
num_samples=cfg.optimizer.max_epochs,
),
)
# Set the model to the training mode.
model.train()
# Run the main training loop.
for epoch in range(start_epoch, cfg.optimizer.max_epochs):
stats.new_epoch() # Init a new epoch.
for iteration, batch in enumerate(train_dataloader):
image, camera, camera_idx = batch[0].values()
image = image.to(device)
camera = camera.to(device)
optimizer.zero_grad()
# Run the forward pass of the model.
nerf_out, metrics = model(
camera_idx if cfg.data.precache_rays else None,
camera,
image,
)
# The loss is a sum of coarse and fine MSEs
loss = metrics["mse_coarse"] + metrics["mse_fine"]
# Take the training step.
loss.backward()
optimizer.step()
# Update stats with the current metrics.
stats.update(
{"loss": float(loss), **metrics},
stat_set="train",
)
if iteration % cfg.stats_print_interval == 0:
stats.print(stat_set="train")
# Update the visualisatioon cache.
visuals_cache.append(
{
"camera": camera.cpu(),
"camera_idx": camera_idx,
"image": image.cpu().detach(),
"rgb_fine": nerf_out["rgb_fine"].cpu().detach(),
"rgb_coarse": nerf_out["rgb_coarse"].cpu().detach(),
"rgb_gt": nerf_out["rgb_gt"].cpu().detach(),
"coarse_ray_bundle": nerf_out["coarse_ray_bundle"],
}
)
# Adjust the learning rate.
lr_scheduler.step()
# Validation
if epoch % cfg.validation_epoch_interval == 0 and epoch > 0:
# Sample a validation camera/image.
val_batch = next(val_dataloader.__iter__())
val_image, val_camera, camera_idx = val_batch[0].values()
val_image = val_image.to(device)
val_camera = val_camera.to(device)
# Activate eval mode of the model (allows to do a full rendering pass).
model.eval()
with torch.no_grad():
val_nerf_out, val_metrics = model(
camera_idx if cfg.data.precache_rays else None,
val_camera,
val_image,
)
# Update stats with the validation metrics.
stats.update(val_metrics, stat_set="val")
stats.print(stat_set="val")
if viz is not None:
# Plot that loss curves into visdom.
stats.plot_stats(
viz=viz,
visdom_env=cfg.visualization.visdom_env,
plot_file=None,
)
# Visualize the intermediate results.
visualize_nerf_outputs(
val_nerf_out, visuals_cache, viz, cfg.visualization.visdom_env
)
# Set the model back to train mode.
model.train()
# Checkpoint.
if (
epoch % cfg.checkpoint_epoch_interval == 0
and len(cfg.checkpoint_path) > 0
and epoch > 0
):
print(f"Storing checkpoint {checkpoint_path}.")
data_to_store = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"stats": pickle.dumps(stats),
}
torch.save(data_to_store, checkpoint_path)
if __name__ == "__main__":
main()
| 34.614232 | 87 | 0.615235 |
795b6e7173a24724f844d5e22b324258df0d8872 | 5,811 | py | Python | util.py | blackoutjack/ExTRA | b9179eaa667cc2bd11d002b7b4ae9cdca37feac5 | [
"BSD-3-Clause"
] | null | null | null | util.py | blackoutjack/ExTRA | b9179eaa667cc2bd11d002b7b4ae9cdca37feac5 | [
"BSD-3-Clause"
] | null | null | null | util.py | blackoutjack/ExTRA | b9179eaa667cc2bd11d002b7b4ae9cdca37feac5 | [
"BSD-3-Clause"
] | null | null | null |
#
# This file contains some general-purpose utility functions that can be
# used by other Python scripts. It is not meant to be run on its own.
#
import sys
MAJOR = sys.version_info[0]
if MAJOR >= 3:
import urllib.parse as urlparse
else:
import urlparse
from config import *
def get_unique_filename(origpath):
dirpath, filename = os.path.split(origpath)
if not os.path.exists(dirpath):
# Easy case: the original path is available.
return origpath
if not os.path.isdir(dirpath):
dirparts = []
# Climb the directory structure to see if there are conflicts.
dirpart = dirpath
while dirpart != '':
# %%% Maybe need special handling for symlinks?
if os.path.isdir(dirpart):
break
if os.path.isfile(dirpart):
# Rename the directory.
pathprefix = dirpart
dirpart, lastpart = os.path.split(dirpart)
dirparts.insert(0, lastpart)
idx = 0
while os.path.isfile(pathprefix):
# Make an altered directory name.
newlastpart = lastpart + '-' + str(idx)
pathprefix = os.path.join(dirpart, newlastpart)
idx += 1
newfilepath = pathprefix
for suffixpart in dirparts:
newfilepath = os.path.join(newfilepath, suffixpart)
if os.path.isdir(pathprefix):
# Need to make sure the new path is available.
return get_unique_filename(newfilepath)
origpath = newfilepath
break
dirpart, lastpart = os.path.split(dirpart)
dirparts.insert(0, lastpart)
# Getting here means the original path is available.
return origpath
filebase, fileext = os.path.splitext(filename)
newfilepath = origpath
idx = 0
while os.path.exists(newfilepath):
# Make an altered filename.
newfilebase = filebase + '-' + str(idx)
newfilename = newfilebase + fileext
newfilepath = os.path.join(dirpath, newfilename)
idx += 1
return newfilepath
# /get_unique_filename
def get_output_dir(top, base):
parts = base.split('/')
dirparts = parts[:-1]
parent = top
for dirpart in dirparts:
parent = os.path.join(parent, dirpart)
lastpart = parts[-1]
nextId = 0
if os.path.isdir(parent):
for curdir in os.listdir(parent):
if curdir.startswith(lastpart + '-'):
idx = curdir[len(lastpart)+1:]
try:
idxnum = int(idx)
if idxnum >= nextId:
nextId = idxnum + 1
except:
warn('Non-numeric suffix, ignoring: %s' % idx)
dirparts.append(lastpart + '-' + str(nextId))
ret = top
for dirpart in dirparts:
ret = os.path.join(ret, dirpart)
return ret
# /get_output_dir
def fatal(txt, code=1):
sys.stderr.write("FATAL: %s\n" % txt)
sys.exit(code)
# /fatal
def err(txt):
sys.stderr.write("ERROR: %s\n" % txt)
sys.stderr.flush()
# /err
def out(txt):
sys.stderr.write("INFO: %s\n" % txt)
sys.stderr.flush()
# /out
def warn(txt):
sys.stderr.write("WARNING: %s\n" % txt)
sys.stderr.flush()
# /warn
def get_lines(filename, comment=None):
fl = open(filename, 'r')
ret = []
for line in fl.readlines():
line = line.strip()
if comment is not None and line.startswith(comment):
continue
ret.append(line)
return ret
# /get_lines
def get_file_info(filepath):
return {
'app': get_base(filepath),
'desc': get_descriptors(filepath),
'ext': get_ext(filepath)
}
# /get_file_info
def get_ext(filepath):
parts = os.path.splitext(filepath)
return parts[1]
# /get_ext
# Get the internal dot-separated components of the filepath.
# E.g. "path/app.jam.more.extra.js" => ['jam', 'more', 'extra'].
def get_descriptors(filepath):
dirpath, filename = os.path.split(filepath)
dirname = os.path.basename(dirpath)
if dirname.startswith('source-'):
descname = dirname[len('source-'):]
desc = descname.split('.')
else:
desc = []
fileparts = filename.split('.')
if len(fileparts) > 2:
for i in range(1, len(fileparts) - 1):
desc.append(fileparts[i])
return desc
# /get_descriptors
# Get the first dot-separated component of a filename or filepath.
def get_base(filepath):
filename = os.path.basename(filepath)
fileparts = filename.split('.', 1)
base = fileparts[0]
return base
# /get_base
def symlink(srcpath, linkdir, linkname=None, relative=False):
srcdir, srcname = os.path.split(srcpath)
if linkname is None:
linkpath = os.path.join(linkdir, srcname)
else:
linkpath = os.path.join(linkdir, linkname)
# |lexists| is true for broken symbolic links.
# %%% Should check to see if the link is correct or needs updating.
if not os.path.lexists(linkpath):
if relative:
# Get the path relative to the target directory.
src = os.path.relpath(srcpath, linkdir)
else:
src = os.path.abspath(srcpath)
os.symlink(src, linkpath)
return linkpath
# /symlink
def is_url(uri):
return get_protocol(uri) in ['http', 'https']
# /is_url
def get_protocol(url):
urlparts = urlparse.urlparse(url)
prot = urlparts[0]
return prot
# /get_protocol
def get_relative_path(url, usedomain=False, referer=None):
urlparts = urlparse.urlparse(url)
filepath = urlparts[2]
filepath = filepath.lstrip('/')
if usedomain and is_url(url):
# Prepend the domain
filepath = os.path.join(urlparts[1], filepath)
if referer is not None:
# Get the path relative to the referer.
refparts = urlparse.urlparse(referer)
refpath = refparts[2]
# Assume the referer is a file, and remove the filename.
refpath = os.path.split(refpath)[0]
if refpath.startswith('/'):
refpath = refpath[1:]
filepath = os.path.relpath(filepath, refpath)
# Remove beginning and ending slashes.
filepath = filepath.strip('/')
return filepath
# /get_relative_path
| 26.294118 | 71 | 0.657546 |
795b6fe54b68282d1af73d66ad0864345852f2f0 | 1,660 | py | Python | examples/Ch02/ex_triple_barrier.py | cw-jang/adv_finance | 240ce03e53fc6eead469a1ce7a220510a78c437e | [
"BSD-3-Clause"
] | 15 | 2019-05-20T04:28:38.000Z | 2021-12-11T06:50:52.000Z | examples/Ch02/ex_triple_barrier.py | cw-jang/adv_finance | 240ce03e53fc6eead469a1ce7a220510a78c437e | [
"BSD-3-Clause"
] | null | null | null | examples/Ch02/ex_triple_barrier.py | cw-jang/adv_finance | 240ce03e53fc6eead469a1ce7a220510a78c437e | [
"BSD-3-Clause"
] | 2 | 2020-05-16T13:23:30.000Z | 2020-08-13T22:58:08.000Z | import pandas as pd
import numpy as np
from datetime import datetime
from adv_finance import stats, labeling
if __name__ == "__main__":
print("main started")
df = pd.read_csv("..\\TRADE_A233740_2019_DV.csv")
df.timestamp = df.timestamp.apply(lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f'))
df = df.set_index('timestamp')
df = df[['open', 'high', 'low', 'close', 'vol']].drop_duplicates()
df = df.loc[~df.index.duplicated(keep='first')]
close = df['close']
# daily_vol = stats.get_daily_vol(close, 20)
# threshold = daily_vol.ewm(20).mean() * 0.5
# side_events = labeling.cusum_filter(close, threshold)
# ===
daily_vol = stats.get_daily_vol(close)
threshold = daily_vol.mean() * 0.5
cusum_events = labeling.cusum_filter(df['close'], threshold)
vertical_barriers = labeling.add_vertical_barrier(t_events=cusum_events, close=df['close'], num_days=1)
pt_sl = [1, 2]
min_ret = 0.005
triple_barrier_events = labeling.get_events(close=df['close'],
t_events=cusum_events,
pt_sl=pt_sl,
target=daily_vol['close'],
min_ret=min_ret,
num_threads=2,
vertical_barrier_times=vertical_barriers,
side_prediction=None)
labels = labeling.get_bins(triple_barrier_events, df['close'])
# print(labels.side.value_counts())
print("main finished")
| 37.727273 | 107 | 0.553614 |
795b7016b905f45fabd05c9ada18567120914ecf | 1,474 | py | Python | Code/batch_feature_extraction.py | RickyMexx/3D-Sound-Localization | 43fd4d2f341fea91ba48f82cd519a65c82ec1cb2 | [
"Apache-2.0"
] | 13 | 2020-03-27T10:33:17.000Z | 2022-02-05T09:44:07.000Z | batch_feature_extraction.py | FedericaCoppa/Neural-Network-Project-QUATERNION-SELD-TCN | 8ba9a05e52fecce68864919464f1d495aed31896 | [
"RSA-MD"
] | 4 | 2020-09-25T22:38:22.000Z | 2022-02-09T23:37:53.000Z | batch_feature_extraction.py | FedericaCoppa/Neural-Network-Project-QUATERNION-SELD-TCN | 8ba9a05e52fecce68864919464f1d495aed31896 | [
"RSA-MD"
] | 1 | 2020-03-28T10:52:24.000Z | 2020-03-28T10:52:24.000Z | # Extracts the features, labels, and normalizes the training and test split features. Make sure you update the location
# of the downloaded datasets before in the cls_feature_class.py
import cls_feature_class
import cls_feature_extr
import parameter
params = parameter.get_params('1')
dataset_name = params['dataset']
dataset_dir = params['dataset_dir']
feat_label_dir = params['feat_label_dir']
if(dataset_name == "foa"):
# -------------- Extract features and labels for development set -----------------------------
dev_feat_cls = cls_feature_extr.FeatureClass(dataset=dataset_name, dataset_dir=dataset_dir, feat_label_dir=feat_label_dir)
# Extract features and normalize them
dev_feat_cls.extract_all_feature()
dev_feat_cls.preprocess_features()
# Extract labels in regression mode
dev_feat_cls.extract_all_labels()
else:
# Extracts feature and labels for all overlap and splits
for ovo in [2]: # SE overlap
for splito in [1]: # all splits. Use [1, 8, 9] for 'real' dataset
for nffto in [512]:
feat_cls = cls_feature_class.FeatureClass(ov=ovo, split=splito, nfft=nffto, dataset=dataset_name)
# Extract features and normalize them
feat_cls.extract_all_feature()
feat_cls.preprocess_features()
# # Extract labels in regression mode
feat_cls.extract_all_labels('regr', 0)
| 38.789474 | 127 | 0.671642 |
795b706059574c57faeac17c987bb50f517947fa | 1,415 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vpc/models/ModifySecurityGroupRules.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vpc/models/ModifySecurityGroupRules.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/vpc/models/ModifySecurityGroupRules.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ModifySecurityGroupRules(object):
def __init__(self, ruleId, protocol=None, fromPort=None, toPort=None, addressPrefix=None, description=None):
"""
:param ruleId: 安全组规则的ID。
:param protocol: (Optional) 规则限定协议。300:All; 6:TCP; 17:UDP; 1:ICMP
:param fromPort: (Optional) 安全组规则的起始端口。取值范围:1-65535
:param toPort: (Optional) 安全组规则的终端口。取值范围:1-65535
:param addressPrefix: (Optional) 安全组规则前缀,取值范围:正确的CIDR
:param description: (Optional) 安全组规则的描述,取值范围:0-256个UTF-8编码下的全部字符
"""
self.ruleId = ruleId
self.protocol = protocol
self.fromPort = fromPort
self.toPort = toPort
self.addressPrefix = addressPrefix
self.description = description
| 37.236842 | 112 | 0.70742 |
795b70613496a6f922d59700fb35f60ee5441ef7 | 1,050 | py | Python | src/billing/migrations/0001_initial.py | HuyNguyen260398/python-ecommerce | 609600058bf4268f4dbe00e179bf3fd75e9a3a79 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | src/billing/migrations/0001_initial.py | HuyNguyen260398/python-ecommerce | 609600058bf4268f4dbe00e179bf3fd75e9a3a79 | [
"bzip2-1.0.6",
"MIT"
] | 6 | 2021-03-19T02:41:46.000Z | 2022-01-13T01:31:12.000Z | src/billing/migrations/0001_initial.py | HuyNguyen260398/python-ecommerce | 609600058bf4268f4dbe00e179bf3fd75e9a3a79 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-07-22 13:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BillingProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('active', models.BooleanField(default=True)),
('update', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, unique=True)),
],
),
]
| 33.870968 | 154 | 0.64 |
795b717ee02811cf932cfbaf45686cd081fb32fb | 3,020 | py | Python | src/OTLMOW/OTLModel/Datatypes/KlExternedetectieAangeslotentoestel.py | davidvlaminck/OTLClassPython | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | 2 | 2022-02-01T08:58:11.000Z | 2022-02-08T13:35:17.000Z | src/OTLMOW/OTLModel/Datatypes/KlExternedetectieAangeslotentoestel.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | src/OTLMOW/OTLModel/Datatypes/KlExternedetectieAangeslotentoestel.py | davidvlaminck/OTLMOW | 71330afeb37c3ea6d9981f521ff8f4a3f8b946fc | [
"MIT"
] | null | null | null | # coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlExternedetectieAangeslotentoestel(KeuzelijstField):
"""Keuzelijst met de voorkomende types van aangesloten toestellen (trein, brug, FCD) aan een externe detectie."""
naam = 'KlExternedetectieAangeslotentoestel'
label = 'Externedetectie aangeslotentoestel'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlExternedetectieAangeslotentoestel'
definition = 'Keuzelijst met de voorkomende types van aangesloten toestellen (trein, brug, FCD) aan een externe detectie.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlExternedetectieAangeslotentoestel'
options = {
'MIVB': KeuzelijstWaarde(invulwaarde='MIVB',
label='MIVB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/MIVB'),
'brug': KeuzelijstWaarde(invulwaarde='brug',
label='brug',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/brug'),
'de-Lijn': KeuzelijstWaarde(invulwaarde='de-Lijn',
label='de Lijn',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/de-Lijn'),
'hulpdiensten': KeuzelijstWaarde(invulwaarde='hulpdiensten',
label='hulpdiensten',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/hulpdiensten'),
'luchthaven': KeuzelijstWaarde(invulwaarde='luchthaven',
label='luchthaven',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/luchthaven'),
'militaire-kazerne': KeuzelijstWaarde(invulwaarde='militaire-kazerne',
label='militaire kazerne',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/militaire-kazerne'),
'spoorweg': KeuzelijstWaarde(invulwaarde='spoorweg',
label='spoorweg',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/spoorweg'),
'tunnel': KeuzelijstWaarde(invulwaarde='tunnel',
label='tunnel',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlExternedetectieAangeslotentoestel/tunnel')
}
| 73.658537 | 166 | 0.638079 |
795b71cf209564842f3640eae90c1d3482afa3fb | 1,079 | py | Python | ghidra_plugins/FullAnalysis.py | ogre2007/Ghidraaas | f3af11f2c605cb2f8d0594d786f91c6a4618f868 | [
"Apache-2.0"
] | 2 | 2021-12-30T06:41:30.000Z | 2022-03-12T09:19:10.000Z | ghidra_plugins/FullAnalysis.py | ogre2007/Ghidraaas | f3af11f2c605cb2f8d0594d786f91c6a4618f868 | [
"Apache-2.0"
] | null | null | null | ghidra_plugins/FullAnalysis.py | ogre2007/Ghidraaas | f3af11f2c605cb2f8d0594d786f91c6a4618f868 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import sys
try:
args = getScriptArgs()
response_dict = dict()
if len(args) < 1:
print("usage: ./FullAnalysis.py output_path")
sys.exit(0)
# output_path of the json file (should terminate with ".json")
output_path = args[0]
symbols = list()
external = dict()
sm = currentProgram.getSymbolTable()
symb = sm.getExternalSymbols()
c = 0
for s in symb:
namespace = s.getParentNamespace().getName()
if namespace not in external.keys():
external[namespace] = []
external[namespace].append(s.getName())
c+=1
# Create a dictionary of address - function names
response_dict['External Symbols'] = external
print("Found %d external symbols" % (c))
with open(output_path, "w") as f_out:
json.dump(response_dict, f_out)
print("Json saved to %s" % output_path)
except Exception:
response_dict['status'] = "error"
print(json.dumps(response_dict)) | 26.975 | 67 | 0.601483 |
795b71f3c7c859148863477bfdfd49ff79ceeba9 | 6,773 | py | Python | docs/conf.py | vishalbelsare/pyjanitor | 9c5ff2c4ad5969ee4bc683ba82010b55b55fd2bb | [
"MIT"
] | 674 | 2018-03-04T22:59:51.000Z | 2021-03-22T10:08:58.000Z | docs/conf.py | vishalbelsare/pyjanitor | 9c5ff2c4ad5969ee4bc683ba82010b55b55fd2bb | [
"MIT"
] | 737 | 2018-03-05T01:03:15.000Z | 2021-03-22T23:55:27.000Z | docs/conf.py | vishalbelsare/pyjanitor | 9c5ff2c4ad5969ee4bc683ba82010b55b55fd2bb | [
"MIT"
] | 176 | 2018-03-14T23:00:56.000Z | 2021-03-24T19:32:34.000Z | """Sphinx configuration."""
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
import datetime
import os
import platform
import sys
from pathlib import Path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../examples"))
# Make a symlink in our sphinx source directory to the top-level
# examples/notebooks directory so we can include notebooks in the doc
notebooks = Path("./notebooks")
if platform.system() == "Windows":
# Only for windows
os.system("mklink /J notebooks ..\\examples\\notebooks")
else:
try:
print("Making symlink to ../examples/notebooks")
notebooks.symlink_to("../examples/notebooks")
except FileExistsError as e: # noqa: F841
print(f"{notebooks} directory already exists. Not creating..")
# -- Project information -----------------------------------------------------
project = "pyjanitor"
now = datetime.datetime.now()
CurrentYear = str(now.year)
copyright = CurrentYear + ", PyJanitor devs"
author = "pyjanitor devs"
# The short X.Y version
version = "0.1.0"
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinxcontrib.fulltoc",
"nbsphinx",
"sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = [".md", ".rst", ".ipynb"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"**.ipynb_checkpoints",
"inflating_converting_currency*", # not working on PR #828.
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"logo": "logo_title.svg"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
"**": ["about.html", "navigation.html", "relations.html", "searchbox.html"]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pyjanitordoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyjanitor.tex",
"pyjanitor Documentation",
"Eric J. Ma",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyjanitor", "pyjanitor Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyjanitor",
"pyjanitor Documentation",
author,
"pyjanitor",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"https://docs.python.org/": None,
"https://pandas.pydata.org/pandas-docs/stable": None,
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Other options -----------------------------------------------------------
autosummary_generate = True # Make _autosummary files and include them
nbsphinx_execute = "never"
| 30.372197 | 79 | 0.637827 |
795b7274d82484140a514a069d0fb2f53083f877 | 12,816 | py | Python | model/rnn.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | model/rnn.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | model/rnn.py | frankxu2004/tacred-relation-cotrain | 005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4 | [
"Apache-2.0"
] | null | null | null | """
A rnn model for relation extraction, written in pytorch.
"""
import math
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import torch.nn.functional as F
from utils import constant, torch_utils
from model import layers
class RelationModel(object):
""" A wrapper class for the training and evaluation of models. """
def __init__(self, opt, emb_matrix=None):
self.opt = opt
self.model = PositionAwareRNN(opt, emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if opt['cuda']:
self.model.cuda()
self.criterion.cuda()
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def joint_update(self, re_batch, qa_batch):
if self.opt['cuda']:
re_inputs = [Variable(b.cuda()) for b in re_batch[:7]]
re_labels = Variable(re_batch[7].cuda())
qa_inputs = [Variable(b.cuda()) for b in qa_batch[:8]]
target_s = Variable(qa_batch[8].cuda())
target_e = Variable(qa_batch[9].cuda())
else:
re_inputs = [Variable(b) for b in re_batch[:7]]
re_labels = Variable(re_batch[7])
qa_inputs = [Variable(b) for b in qa_batch[:8]]
target_s = Variable(qa_batch[8])
target_e = Variable(qa_batch[9])
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, _ = self.model(re_inputs, data_type='RE')
score_s, score_e = self.model(qa_inputs, data_type='RC')
loss = self.opt['qa_weight'] * (self.criterion(score_s, target_s) + self.criterion(score_e, target_e)) + \
self.criterion(logits, re_labels)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
loss_val = loss.data[0]
return loss_val
def update(self, batch):
""" Run a step of forward and backward model update. """
if self.opt['cuda']:
inputs = [Variable(b.cuda()) for b in batch[:7]]
labels = Variable(batch[7].cuda())
else:
inputs = [Variable(b) for b in batch[:7]]
labels = Variable(batch[7])
# step forward
self.model.train()
self.optimizer.zero_grad()
logits, _ = self.model(inputs, data_type='RE')
loss = self.criterion(logits, labels)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
loss_val = loss.data[0]
return loss_val
def update_qa(self, qa_batch):
if self.opt['cuda']:
inputs = [Variable(b.cuda()) for b in qa_batch[:8]]
target_s = Variable(qa_batch[8].cuda())
target_e = Variable(qa_batch[9].cuda())
else:
inputs = [Variable(b) for b in qa_batch[:8]]
target_s = Variable(qa_batch[8])
target_e = Variable(qa_batch[9])
# step forward
self.model.train()
self.optimizer.zero_grad()
score_s, score_e = self.model(inputs, data_type='RC')
loss = self.criterion(score_s, target_s) + self.criterion(score_e, target_e)
# backward
loss.backward()
torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
loss_val = loss.data[0]
return loss_val
def predict(self, batch, unsort=True):
""" Run forward prediction. If unsort is True, recover the original order of the batch. """
if self.opt['cuda']:
inputs = [Variable(b.cuda()) for b in batch[:7]]
labels = Variable(batch[7].cuda())
else:
inputs = [Variable(b) for b in batch[:7]]
labels = Variable(batch[7])
orig_idx = batch[8]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
if unsort:
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx, \
predictions, probs)))]
return predictions, probs, loss.data[0]
def update_lr(self, new_lr):
torch_utils.change_lr(self.optimizer, new_lr)
def save(self, filename, epoch):
params = {
'model': self.model.state_dict(),
'config': self.opt,
'epoch': epoch
}
try:
torch.save(params, filename)
print("model saved to {}".format(filename))
except BaseException:
print("[Warning: Saving failed... continuing anyway.]")
def load(self, filename):
try:
checkpoint = torch.load(filename)
except BaseException:
print("Cannot load model from {}".format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
class PositionAwareRNN(nn.Module):
""" A sequence model for relation extraction. """
def __init__(self, opt, emb_matrix=None):
super(PositionAwareRNN, self).__init__()
self.drop = nn.Dropout(opt['dropout'])
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
if opt['pos_dim'] > 0:
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],
padding_idx=constant.PAD_ID)
if opt['ner_dim'] > 0:
self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],
padding_idx=constant.PAD_ID)
input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
q_input_size = opt['emb_dim']
self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,
dropout=opt['dropout'])
self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])
self.qrnn = nn.LSTM(q_input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,
dropout=opt['dropout'])
self.self_attn = layers.LinearSeqAttn(opt['hidden_dim'])
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
opt['hidden_dim'],
opt['hidden_dim'],
)
self.end_attn = layers.BilinearSeqAttn(
opt['hidden_dim'],
opt['hidden_dim'],
)
if opt['attn']:
self.attn_layer = layers.PositionAwareAttention(opt['hidden_dim'],
opt['hidden_dim'], 2 * opt['pe_dim'], opt['attn_dim'])
self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt['pe_dim'])
self.opt = opt
self.topn = self.opt.get('topn', 1e10)
self.use_cuda = opt['cuda']
self.emb_matrix = emb_matrix
self.init_weights()
def init_weights(self):
if self.emb_matrix is None:
self.emb.weight.data[1:, :].uniform_(-1.0, 1.0) # keep padding dimension to be 0
else:
self.emb_matrix = torch.from_numpy(self.emb_matrix)
self.emb.weight.data.copy_(self.emb_matrix)
if self.opt['pos_dim'] > 0:
self.pos_emb.weight.data[1:, :].uniform_(-1.0, 1.0)
if self.opt['ner_dim'] > 0:
self.ner_emb.weight.data[1:, :].uniform_(-1.0, 1.0)
self.linear.bias.data.fill_(0)
init.xavier_uniform(self.linear.weight, gain=1) # initialize linear layer
if self.opt['attn']:
self.pe_emb.weight.data.uniform_(-1.0, 1.0)
# decide finetuning
if self.topn <= 0:
print("Do not finetune word embedding layer.")
self.emb.weight.requires_grad = False
elif self.topn < self.opt['vocab_size']:
print("Finetune top {} word embeddings.".format(self.topn))
self.emb.weight.register_hook(lambda x: \
torch_utils.keep_partial_grad(x, self.topn))
else:
print("Finetune all embeddings.")
def zero_state(self, batch_size):
state_shape = (self.opt['num_layers'], batch_size, self.opt['hidden_dim'])
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
if self.use_cuda:
return h0.cuda(), c0.cuda()
else:
return h0, c0
def forward(self, inputs, data_type='RE'):
if data_type == 'RE':
words, masks, pos, ner, deprel, subj_pos, obj_pos = inputs # unpack
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
batch_size = words.size()[0]
# embedding lookup
word_inputs = self.emb(words)
inputs = [word_inputs]
if self.opt['pos_dim'] > 0:
inputs += [self.pos_emb(pos)]
if self.opt['ner_dim'] > 0:
inputs += [self.ner_emb(ner)]
inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input
# rnn
h0, c0 = self.zero_state(batch_size)
inputs = nn.utils.rnn.pack_padded_sequence(inputs, seq_lens, batch_first=True)
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
outputs, output_lens = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
hidden = self.drop(ht[-1, :, :]) # get the outmost layer h_n
outputs = self.drop(outputs)
# attention
if self.opt['attn']:
# convert all negative PE numbers to positive indices
# e.g., -2 -1 0 1 will be mapped to 98 99 100 101
subj_pe_inputs = self.pe_emb(subj_pos + constant.MAX_LEN)
obj_pe_inputs = self.pe_emb(obj_pos + constant.MAX_LEN)
pe_features = torch.cat((subj_pe_inputs, obj_pe_inputs), dim=2)
final_hidden = self.attn_layer(outputs, masks, hidden, pe_features)
else:
final_hidden = hidden
logits = self.linear(final_hidden)
return logits, final_hidden
elif data_type == 'RC':
context_words, context_pos, context_ner, context_mask, orig_idx, question_words, question_mask, q_orig_idx = inputs # unpack
seq_lens = list(context_mask.data.eq(constant.PAD_ID).long().sum(1).squeeze())
q_seq_lens = list(question_mask.data.eq(constant.PAD_ID).long().sum(1).squeeze())
batch_size = context_words.size()[0]
# embedding lookup
word_inputs = self.emb(context_words)
q_word_inputs = self.drop(self.emb(question_words))
inputs = [word_inputs]
if self.opt['pos_dim'] > 0:
inputs += [self.pos_emb(context_pos)]
if self.opt['ner_dim'] > 0:
inputs += [self.ner_emb(context_ner)]
inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input
# rnn
h0, c0 = self.zero_state(batch_size)
inputs = nn.utils.rnn.pack_padded_sequence(inputs, seq_lens, batch_first=True)
outputs, _ = self.rnn(inputs, (h0, c0))
outputs, output_lens = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)
doc_hiddens = self.drop(outputs)
qh0, qc0 = self.zero_state(batch_size)
q_word_inputs = nn.utils.rnn.pack_padded_sequence(q_word_inputs, q_seq_lens, batch_first=True)
q_outputs, _ = self.qrnn(q_word_inputs, (qh0, qc0))
q_outputs, q_output_lens = nn.utils.rnn.pad_packed_sequence(q_outputs, batch_first=True)
q_hiddens = self.drop(q_outputs)
q_merge_weights = self.self_attn(q_hiddens, question_mask)
question_hidden = layers.weighted_avg(q_hiddens, q_merge_weights)
# unsort both doc and question to original ordering
doc_hiddens = doc_hiddens.index_select(0, orig_idx)
question_hidden = question_hidden.index_select(0, q_orig_idx)
context_mask = context_mask.index_select(0, orig_idx)
# Predict start and end positions
start_scores = self.start_attn(doc_hiddens, question_hidden, context_mask)
end_scores = self.end_attn(doc_hiddens, question_hidden, context_mask)
return start_scores, end_scores
| 41.341935 | 137 | 0.584582 |
795b72b52e689067995b55b47b45f3f4a613a7c4 | 11,394 | py | Python | django/contrib/auth/tests/test_management.py | pegler/django | e9c6d0422422d7ea5ff3be8992ca73b5ba73bd0c | [
"BSD-3-Clause"
] | 1 | 2017-02-21T09:06:04.000Z | 2017-02-21T09:06:04.000Z | django/contrib/auth/tests/test_management.py | rogerhu/django | 317fd13c7ac25db94d3dabf8ee115acbfbd3e5a7 | [
"BSD-3-Clause"
] | null | null | null | django/contrib/auth/tests/test_management.py | rogerhu/django | 317fd13c7ac25db94d3dabf8ee115acbfbd3e5a7 | [
"BSD-3-Clause"
] | 1 | 2020-12-24T01:28:30.000Z | 2020-12-24T01:28:30.000Z | from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import models, management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.contenttypes.models import ContentType
from django.core import exceptions
from django.core.management import call_command
from django.core.management.base import CommandError
from django.core.management.validation import get_validation_errors
from django.db.models.loading import get_app
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils.six import StringIO
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute("joe", stdout=self.stdout, stderr=self.stderr)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_createsuperuser(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_verbosity_zero(self):
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
skip_validation=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command("createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
skip_validation=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
class CustomUserModelValidationTestCase(TestCase):
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonListRequiredFields')
def test_required_fields_is_list(self):
"REQUIRED_FIELDS should be a list."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The REQUIRED_FIELDS must be a list or tuple.", new_io.getvalue())
@override_settings(AUTH_USER_MODEL='auth.CustomUserBadRequiredFields')
def test_username_not_in_required_fields(self):
"USERNAME_FIELD should not appear in REQUIRED_FIELDS."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The field named as the USERNAME_FIELD should not be included in REQUIRED_FIELDS on a swappable User model.", new_io.getvalue())
@override_settings(AUTH_USER_MODEL='auth.CustomUserNonUniqueUsername')
def test_username_non_unique(self):
"A non-unique USERNAME_FIELD should raise a model validation error."
new_io = StringIO()
get_validation_errors(new_io, get_app('auth'))
self.assertIn("The USERNAME_FIELD must be unique. Add unique=True to the field parameters.", new_io.getvalue())
class PermissionTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
self._original_default_permissions = models.Permission._meta.default_permissions
self._original_verbose_name = models.Permission._meta.verbose_name
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
models.Permission._meta.default_permissions = self._original_default_permissions
models.Permission._meta.verbose_name = self._original_verbose_name
ContentType.objects.clear_cache()
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(models, [], verbosity=0)
def test_default_permissions(self):
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
]
create_permissions(models, [], verbosity=0)
# add/change/delete permission by default + custom permission
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 4)
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.default_permissions = []
create_permissions(models, [], verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(models.Permission.objects.filter(
content_type=permission_content_type,
).count(), 1)
def test_verbose_name_length(self):
permission_content_type = ContentType.objects.get_by_natural_key('auth', 'permission')
models.Permission.objects.filter(content_type=permission_content_type).delete()
models.Permission._meta.verbose_name = "some ridiculously long verbose name that is out of control"
six.assertRaisesRegex(self, exceptions.ValidationError,
"The verbose_name of permission is longer than 39 characters",
create_permissions, models, [], verbosity=0)
| 41.736264 | 150 | 0.692119 |
795b731a4534953f505a457f7280ffd094ed3477 | 925 | py | Python | yatube/posts/migrations/0010_follow.py | Andr3w-k/yatube_final | 940266c92eae10edf94692263ab1552afe5f6363 | [
"MIT"
] | null | null | null | yatube/posts/migrations/0010_follow.py | Andr3w-k/yatube_final | 940266c92eae10edf94692263ab1552afe5f6363 | [
"MIT"
] | null | null | null | yatube/posts/migrations/0010_follow.py | Andr3w-k/yatube_final | 940266c92eae10edf94692263ab1552afe5f6363 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.16 on 2022-01-24 20:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0009_auto_20220122_1927'),
]
operations = [
migrations.CreateModel(
name='Follow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL, verbose_name='Подписчик')),
],
),
]
| 37 | 169 | 0.671351 |
795b763fb0304f66a2a13e54a0743e566eab26a9 | 3,190 | py | Python | books/books/settings.py | pjloveshiphop/from-scrapy-to-mongodb | 81f62c4576056fd1bab0d13f9bcd91aee512001b | [
"MIT"
] | null | null | null | books/books/settings.py | pjloveshiphop/from-scrapy-to-mongodb | 81f62c4576056fd1bab0d13f9bcd91aee512001b | [
"MIT"
] | null | null | null | books/books/settings.py | pjloveshiphop/from-scrapy-to-mongodb | 81f62c4576056fd1bab0d13f9bcd91aee512001b | [
"MIT"
] | null | null | null | # Scrapy settings for books project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'books'
SPIDER_MODULES = ['books.spiders']
NEWSPIDER_MODULE = 'books.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'books (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'books.middlewares.BooksSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'books.middlewares.BooksDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'books.pipelines.BooksPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
ITEM_PIPELINES = {'books.pipelines.mongoDB_pipeline':300}
MONGO_URI = 'mongodb://localhost:27017'
MONGO_DB = 'books'
COLLECTION_NAME = 'titles' | 34.301075 | 103 | 0.775235 |
795b78746a8bba52316b00c2ebfd2a9bb1543aca | 1,183 | py | Python | vote/app.py | jamesongithub/example-voting-app | 67b6f7b08f890753ede42745090444acedcfa2d2 | [
"Apache-2.0"
] | null | null | null | vote/app.py | jamesongithub/example-voting-app | 67b6f7b08f890753ede42745090444acedcfa2d2 | [
"Apache-2.0"
] | 1 | 2020-09-08T07:01:17.000Z | 2020-09-08T07:01:17.000Z | vote/app.py | jamesongithub/example-voting-app | 67b6f7b08f890753ede42745090444acedcfa2d2 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "Portland")
option_b = os.getenv('OPTION_B', "Kings Island")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
redis_host = os.getenv('REDIS_HOST')
g.redis = Redis(host=redis_host, db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| 25.170213 | 67 | 0.641589 |
795b78da7eddece26ba5930675629117b737c119 | 40,954 | py | Python | magenta/music/encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/music/encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/music/encoder_decoder.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | 1 | 2021-09-09T15:30:36.000Z | 2021-09-09T15:30:36.000Z | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for converting between event sequences and models inputs/outputs.
OneHotEncoding is an abstract class for specifying a one-hot encoding, i.e.
how to convert back and forth between an arbitrary event space and integer
indices between 0 and the number of classes.
EventSequenceEncoderDecoder is an abstract class for translating event
_sequences_, i.e. how to convert event sequences to input vectors and output
labels to be fed into a model, and how to convert from output labels back to
events.
Use EventSequenceEncoderDecoder.encode to convert an event sequence to a
tf.train.SequenceExample of inputs and labels. These SequenceExamples are fed
into the model during training and evaluation.
During generation, use EventSequenceEncoderDecoder.get_inputs_batch to convert a
list of event sequences into an inputs batch which can be fed into the model to
predict what the next event should be for each sequence. Then use
EventSequenceEncoderDecoder.extend_event_sequences to extend each of those event
sequences with an event sampled from the softmax output by the model.
OneHotEventSequenceEncoderDecoder is an EventSequenceEncoderDecoder that uses a
OneHotEncoding of individual events. The input vectors are one-hot encodings of
the most recent event. The output labels are one-hot encodings of the next
event.
LookbackEventSequenceEncoderDecoder is an EventSequenceEncoderDecoder that also
uses a OneHotEncoding of individual events. However, its input and output
encodings also consider whether the event sequence is repeating, and the input
encoding includes binary counters for timekeeping.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numbers
from magenta.music import constants
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf # noqa
DEFAULT_STEPS_PER_BAR = constants.DEFAULT_STEPS_PER_BAR
DEFAULT_LOOKBACK_DISTANCES = [DEFAULT_STEPS_PER_BAR, DEFAULT_STEPS_PER_BAR * 2]
class OneHotEncoding(object):
"""An interface for specifying a one-hot encoding of individual events."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def num_classes(self):
"""The number of distinct event encodings.
Returns:
An int, the range of ints that can be returned by self.encode_event.
"""
pass
@abc.abstractproperty
def default_event(self):
"""An event value to use as a default.
Returns:
The default event value.
"""
pass
@abc.abstractmethod
def encode_event(self, event):
"""Convert from an event value to an encoding integer.
Args:
event: An event value to encode.
Returns:
An integer representing the encoded event, in range [0, self.num_classes).
"""
pass
@abc.abstractmethod
def decode_event(self, index):
"""Convert from an encoding integer to an event value.
Args:
index: The encoding, an integer in the range [0, self.num_classes).
Returns:
The decoded event value.
"""
pass
def event_to_num_steps(self, unused_event):
"""Returns the number of time steps corresponding to an event value.
This is used for normalization when computing metrics. Subclasses with
variable step size should override this method.
Args:
unused_event: An event value for which to return the number of steps.
Returns:
The number of steps corresponding to the given event value, defaulting to
one.
"""
return 1
class EventSequenceEncoderDecoder(object):
"""An abstract class for translating between events and model data.
When building your dataset, the `encode` method takes in an event sequence
and returns a SequenceExample of inputs and labels. These SequenceExamples
are fed into the model during training and evaluation.
During generation, the `get_inputs_batch` method takes in a list of the
current event sequences and returns an inputs batch which is fed into the
model to predict what the next event should be for each sequence. The
`extend_event_sequences` method takes in the list of event sequences and the
softmax returned by the model and extends each sequence by one step by
sampling from the softmax probabilities. This loop (`get_inputs_batch` ->
inputs batch is fed through the model to get a softmax ->
`extend_event_sequences`) is repeated until the generated event sequences
have reached the desired length.
Properties:
input_size: The length of the list returned by self.events_to_input.
num_classes: The range of ints that can be returned by
self.events_to_label.
The `input_size`, `num_classes`, `events_to_input`, `events_to_label`, and
`class_index_to_event` method must be overwritten to be specific to your
model.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def input_size(self):
"""The size of the input vector used by this model.
Returns:
An integer, the length of the list returned by self.events_to_input.
"""
pass
@abc.abstractproperty
def num_classes(self):
"""The range of labels used by this model.
Returns:
An integer, the range of integers that can be returned by
self.events_to_label.
"""
pass
@abc.abstractproperty
def default_event_label(self):
"""The class label that represents a default event.
Returns:
An int, the class label that represents a default event.
"""
pass
@abc.abstractmethod
def events_to_input(self, events, position):
"""Returns the input vector for the event at the given position.
Args:
events: A list-like sequence of events.
position: An integer event position in the sequence.
Returns:
An input vector, a self.input_size length list of floats.
"""
pass
@abc.abstractmethod
def events_to_label(self, events, position):
"""Returns the label for the event at the given position.
Args:
events: A list-like sequence of events.
position: An integer event position in the sequence.
Returns:
A label, an integer in the range [0, self.num_classes).
"""
pass
@abc.abstractmethod
def class_index_to_event(self, class_index, events):
"""Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A list-like sequence of events.
Returns:
An event value.
"""
pass
def labels_to_num_steps(self, labels):
"""Returns the total number of time steps for a sequence of class labels.
This is used for normalization when computing metrics. Subclasses with
variable step size should override this method.
Args:
labels: A list-like sequence of integers in the range
[0, self.num_classes).
Returns:
The total number of time steps for the label sequence, defaulting to one
per event.
"""
return len(labels)
def encode(self, events):
"""Returns a SequenceExample for the given event sequence.
Args:
events: A list-like sequence of events.
Returns:
A tf.train.SequenceExample containing inputs and labels.
"""
inputs = []
labels = []
for i in range(len(events) - 1):
inputs.append(self.events_to_input(events, i))
labels.append(self.events_to_label(events, i + 1))
return make_sequence_example(inputs, labels)
def get_inputs_batch(self, event_sequences, full_length=False):
"""Returns an inputs batch for the given event sequences.
Args:
event_sequences: A list of list-like event sequences.
full_length: If True, the inputs batch will be for the full length of
each event sequence. If False, the inputs batch will only be for the
last event of each event sequence. A full-length inputs batch is used
for the first step of extending the event sequences, since the RNN
cell state needs to be initialized with the priming sequence. For
subsequent generation steps, only a last-event inputs batch is used.
Returns:
An inputs batch. If `full_length` is True, the shape will be
[len(event_sequences), len(event_sequences[0]), INPUT_SIZE]. If
`full_length` is False, the shape will be
[len(event_sequences), 1, INPUT_SIZE].
"""
inputs_batch = []
for events in event_sequences:
inputs = []
if full_length:
for i in range(len(events)):
inputs.append(self.events_to_input(events, i))
else:
inputs.append(self.events_to_input(events, len(events) - 1))
inputs_batch.append(inputs)
return inputs_batch
def extend_event_sequences(self, event_sequences, softmax):
"""Extends the event sequences by sampling the softmax probabilities.
Args:
event_sequences: A list of EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list of chosen class indices, one for each event sequence.
"""
chosen_classes = []
for i in range(len(event_sequences)):
if not isinstance(softmax[0][0][0], numbers.Number):
# In this case, softmax is a list of several sub-softmaxes, each
# potentially with a different size.
# shape: [[beam_size, event_num, softmax_size]]
chosen_class = []
for sub_softmax in softmax:
num_classes = len(sub_softmax[0][0])
chosen_class.append(
np.random.choice(num_classes, p=sub_softmax[i][-1]))
else:
# In this case, softmax is just one softmax.
# shape: [beam_size, event_num, softmax_size]
num_classes = len(softmax[0][0])
chosen_class = np.random.choice(num_classes, p=softmax[i][-1])
event = self.class_index_to_event(chosen_class, event_sequences[i])
event_sequences[i].append(event)
chosen_classes.append(chosen_class)
return chosen_classes
def evaluate_log_likelihood(self, event_sequences, softmax):
"""Evaluate the log likelihood of multiple event sequences.
Each event sequence is evaluated from the end. If the size of the
corresponding softmax vector is 1 less than the number of events, the entire
event sequence will be evaluated (other than the first event, whose
distribution is not modeled). If the softmax vector is shorter than this,
only the events at the end of the sequence will be evaluated.
Args:
event_sequences: A list of EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list containing the log likelihood of each event sequence.
Raises:
ValueError: If one of the event sequences is too long with respect to the
corresponding softmax vectors.
"""
all_loglik = []
for i in range(len(event_sequences)):
if len(softmax[i]) >= len(event_sequences[i]):
raise ValueError(
'event sequence must be longer than softmax vector (%d events but '
'softmax vector has length %d)' % (len(event_sequences[i]),
len(softmax[i])))
end_pos = len(event_sequences[i])
start_pos = end_pos - len(softmax[i])
loglik = 0.0
for softmax_pos, position in enumerate(range(start_pos, end_pos)):
index = self.events_to_label(event_sequences[i], position)
if isinstance(index, numbers.Number):
loglik += np.log(softmax[i][softmax_pos][index])
else:
for sub_softmax_i in range(len(index)):
loglik += np.log(
softmax[i][softmax_pos][sub_softmax_i][index[sub_softmax_i]])
all_loglik.append(loglik)
return all_loglik
class OneHotEventSequenceEncoderDecoder(EventSequenceEncoderDecoder):
"""An EventSequenceEncoderDecoder that produces a one-hot encoding."""
def __init__(self, one_hot_encoding):
"""Initialize a OneHotEventSequenceEncoderDecoder object.
Args:
one_hot_encoding: A OneHotEncoding object that transforms events to and
from integer indices.
"""
self._one_hot_encoding = one_hot_encoding
@property
def input_size(self):
return self._one_hot_encoding.num_classes
@property
def num_classes(self):
return self._one_hot_encoding.num_classes
@property
def default_event_label(self):
return self._one_hot_encoding.encode_event(
self._one_hot_encoding.default_event)
def events_to_input(self, events, position):
"""Returns the input vector for the given position in the event sequence.
Returns a one-hot vector for the given position in the event sequence, as
determined by the one hot encoding.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
An input vector, a list of floats.
"""
input_ = [0.0] * self.input_size
input_[self._one_hot_encoding.encode_event(events[position])] = 1.0
return input_
def events_to_label(self, events, position):
"""Returns the label for the given position in the event sequence.
Returns the zero-based index value for the given position in the event
sequence, as determined by the one hot encoding.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
A label, an integer.
"""
return self._one_hot_encoding.encode_event(events[position])
def class_index_to_event(self, class_index, events):
"""Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
events: A list-like sequence of events. This object is not used in this
implementation.
Returns:
An event value.
"""
return self._one_hot_encoding.decode_event(class_index)
def labels_to_num_steps(self, labels):
"""Returns the total number of time steps for a sequence of class labels.
Args:
labels: A list-like sequence of integers in the range
[0, self.num_classes).
Returns:
The total number of time steps for the label sequence, as determined by
the one-hot encoding.
"""
events = []
for label in labels:
events.append(self.class_index_to_event(label, events))
return sum(self._one_hot_encoding.event_to_num_steps(event)
for event in events)
class OneHotIndexEventSequenceEncoderDecoder(OneHotEventSequenceEncoderDecoder):
"""An EventSequenceEncoderDecoder that produces one-hot indices."""
@property
def input_size(self):
return 1
@property
def input_depth(self):
return self._one_hot_encoding.num_classes
def events_to_input(self, events, position):
"""Returns the one-hot index for the event at the given position.
Args:
events: A list-like sequence of events.
position: An integer event position in the event sequence.
Returns:
An integer input event index.
"""
return [self._one_hot_encoding.encode_event(events[position])]
class LookbackEventSequenceEncoderDecoder(EventSequenceEncoderDecoder):
"""An EventSequenceEncoderDecoder that encodes repeated events and meter."""
def __init__(self, one_hot_encoding, lookback_distances=None,
binary_counter_bits=5):
"""Initializes the LookbackEventSequenceEncoderDecoder.
Args:
one_hot_encoding: A OneHotEncoding object that transforms events to and
from integer indices.
lookback_distances: A list of step intervals to look back in history to
encode both the following event and whether the current step is a
repeat. If None, use default lookback distances.
binary_counter_bits: The number of input bits to use as a counter for the
metric position of the next event.
"""
self._one_hot_encoding = one_hot_encoding
if lookback_distances is None:
self._lookback_distances = DEFAULT_LOOKBACK_DISTANCES
else:
self._lookback_distances = lookback_distances
self._binary_counter_bits = binary_counter_bits
@property
def input_size(self):
one_hot_size = self._one_hot_encoding.num_classes
num_lookbacks = len(self._lookback_distances)
return (one_hot_size + # current event
num_lookbacks * one_hot_size + # next event for each lookback
self._binary_counter_bits + # binary counters
num_lookbacks) # whether event matches lookbacks
@property
def num_classes(self):
return self._one_hot_encoding.num_classes + len(self._lookback_distances)
@property
def default_event_label(self):
return self._one_hot_encoding.encode_event(
self._one_hot_encoding.default_event)
def events_to_input(self, events, position):
"""Returns the input vector for the given position in the event sequence.
Returns a self.input_size length list of floats. Assuming a one-hot
encoding with 38 classes, two lookback distances, and five binary counters,
self.input_size will = 121. Each index represents a different input signal
to the model.
Indices [0, 120]:
[0, 37]: Event of current step.
[38, 75]: Event of next step for first lookback.
[76, 113]: Event of next step for second lookback.
114: 16th note binary counter.
115: 8th note binary counter.
116: 4th note binary counter.
117: Half note binary counter.
118: Whole note binary counter.
119: The current step is repeating (first lookback).
120: The current step is repeating (second lookback).
Args:
events: A list-like sequence of events.
position: An integer position in the event sequence.
Returns:
An input vector, an self.input_size length list of floats.
"""
input_ = [0.0] * self.input_size
offset = 0
# Last event.
index = self._one_hot_encoding.encode_event(events[position])
input_[index] = 1.0
offset += self._one_hot_encoding.num_classes
# Next event if repeating N positions ago.
for i, lookback_distance in enumerate(self._lookback_distances):
lookback_position = position - lookback_distance + 1
if lookback_position < 0:
event = self._one_hot_encoding.default_event
else:
event = events[lookback_position]
index = self._one_hot_encoding.encode_event(event)
input_[offset + index] = 1.0
offset += self._one_hot_encoding.num_classes
# Binary time counter giving the metric location of the *next* event.
n = position + 1
for i in range(self._binary_counter_bits):
input_[offset] = 1.0 if (n // 2 ** i) % 2 else -1.0
offset += 1
# Last event is repeating N bars ago.
for i, lookback_distance in enumerate(self._lookback_distances):
lookback_position = position - lookback_distance
if (lookback_position >= 0 and
events[position] == events[lookback_position]):
input_[offset] = 1.0
offset += 1
assert offset == self.input_size
return input_
def events_to_label(self, events, position):
"""Returns the label for the given position in the event sequence.
Returns an integer in the range [0, self.num_classes). Indices in the range
[0, self._one_hot_encoding.num_classes) map to standard events. Indices
self._one_hot_encoding.num_classes and self._one_hot_encoding.num_classes +
1 are signals to repeat events from earlier in the sequence. More distant
repeats are selected first and standard events are selected last.
Assuming a one-hot encoding with 38 classes and two lookback distances,
self.num_classes = 40 and the values will be as follows.
Values [0, 39]:
[0, 37]: Event of the last step in the event sequence, if not repeating
any of the lookbacks.
38: If the last event is repeating the first lookback, if not also
repeating the second lookback.
39: If the last event is repeating the second lookback.
Args:
events: A list-like sequence of events.
position: An integer position in the event sequence.
Returns:
A label, an integer.
"""
if (self._lookback_distances and
position < self._lookback_distances[-1] and
events[position] == self._one_hot_encoding.default_event):
return (self._one_hot_encoding.num_classes +
len(self._lookback_distances) - 1)
# If last step repeated N bars ago.
for i, lookback_distance in reversed(
list(enumerate(self._lookback_distances))):
lookback_position = position - lookback_distance
if (lookback_position >= 0 and
events[position] == events[lookback_position]):
return self._one_hot_encoding.num_classes + i
# If last step didn't repeat at one of the lookback positions, use the
# specific event.
return self._one_hot_encoding.encode_event(events[position])
def class_index_to_event(self, class_index, events):
"""Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An int in the range [0, self.num_classes).
events: The current event sequence.
Returns:
An event value.
"""
# Repeat N bar ago.
for i, lookback_distance in reversed(
list(enumerate(self._lookback_distances))):
if class_index == self._one_hot_encoding.num_classes + i:
if len(events) < lookback_distance:
return self._one_hot_encoding.default_event
return events[-lookback_distance]
# Return the event for that class index.
return self._one_hot_encoding.decode_event(class_index)
def labels_to_num_steps(self, labels):
"""Returns the total number of time steps for a sequence of class labels.
This method assumes the event sequence begins with the event corresponding
to the first label, which is inconsistent with the `encode` method in
EventSequenceEncoderDecoder that uses the second event as the first label.
Therefore, if the label sequence includes a lookback to the very first event
and that event is a different number of time steps than the default event,
this method will give an incorrect answer.
Args:
labels: A list-like sequence of integers in the range
[0, self.num_classes).
Returns:
The total number of time steps for the label sequence, as determined by
the one-hot encoding.
"""
events = []
for label in labels:
events.append(self.class_index_to_event(label, events))
return sum(self._one_hot_encoding.event_to_num_steps(event)
for event in events)
class ConditionalEventSequenceEncoderDecoder(object):
"""An encoder/decoder for conditional event sequences.
This class is similar to an EventSequenceEncoderDecoder but operates on
*conditional* event sequences, where there is both a control event sequence
and a target event sequence. The target sequence consists of events that are
directly generated by the model, while the control sequence, known in advance,
affects the inputs provided to the model. The event types of the two sequences
can be different.
Model inputs are determined by both control and target sequences, and are
formed by concatenating the encoded control and target input vectors. Model
outputs are determined by the target sequence only.
This implementation assumes that the control event at position `i` is known
when the target event at position `i` is to be generated.
Properties:
input_size: The length of the list returned by self.events_to_input.
num_classes: The range of ints that can be returned by
self.events_to_label.
"""
def __init__(self, control_encoder_decoder, target_encoder_decoder):
"""Initialize a ConditionalEventSequenceEncoderDecoder object.
Args:
control_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode
the control sequence.
target_encoder_decoder: The EventSequenceEncoderDecoder to encode/decode
the target sequence.
"""
self._control_encoder_decoder = control_encoder_decoder
self._target_encoder_decoder = target_encoder_decoder
@property
def input_size(self):
"""The size of the concatenated control and target input vectors.
Returns:
An integer, the size of an input vector.
"""
return (self._control_encoder_decoder.input_size +
self._target_encoder_decoder.input_size)
@property
def num_classes(self):
"""The range of target labels used by this model.
Returns:
An integer, the range of integers that can be returned by
self.events_to_label.
"""
return self._target_encoder_decoder.num_classes
@property
def default_event_label(self):
"""The class label that represents a default target event.
Returns:
An integer, the class label that represents a default target event.
"""
return self._target_encoder_decoder.default_event_label
def events_to_input(self, control_events, target_events, position):
"""Returns the input vector for the given position in the sequence pair.
Returns the vector formed by concatenating the input vector for the control
sequence and the input vector for the target sequence.
Args:
control_events: A list-like sequence of control events.
target_events: A list-like sequence of target events.
position: An integer event position in the event sequences. When
predicting the target label at position `i + 1`, the input vector is
the concatenation of the control input vector at position `i + 1` and
the target input vector at position `i`.
Returns:
An input vector, a list of floats.
"""
return (
self._control_encoder_decoder.events_to_input(
control_events, position + 1) +
self._target_encoder_decoder.events_to_input(target_events, position))
def events_to_label(self, target_events, position):
"""Returns the label for the given position in the target event sequence.
Args:
target_events: A list-like sequence of target events.
position: An integer event position in the target event sequence.
Returns:
A label, an integer.
"""
return self._target_encoder_decoder.events_to_label(target_events, position)
def class_index_to_event(self, class_index, target_events):
"""Returns the event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An integer in the range [0, self.num_classes).
target_events: A list-like sequence of target events.
Returns:
A target event value.
"""
return self._target_encoder_decoder.class_index_to_event(
class_index, target_events)
def labels_to_num_steps(self, labels):
"""Returns the total number of time steps for a sequence of class labels.
Args:
labels: A list-like sequence of integers in the range
[0, self.num_classes).
Returns:
The total number of time steps for the label sequence, as determined by
the target encoder/decoder.
"""
return self._target_encoder_decoder.labels_to_num_steps(labels)
def encode(self, control_events, target_events):
"""Returns a SequenceExample for the given event sequence pair.
Args:
control_events: A list-like sequence of control events.
target_events: A list-like sequence of target events, the same length as
`control_events`.
Returns:
A tf.train.SequenceExample containing inputs and labels.
Raises:
ValueError: If the control and target event sequences have different
length.
"""
if len(control_events) != len(target_events):
raise ValueError('must have the same number of control and target events '
'(%d control events but %d target events)' % (
len(control_events), len(target_events)))
inputs = []
labels = []
for i in range(len(target_events) - 1):
inputs.append(self.events_to_input(control_events, target_events, i))
labels.append(self.events_to_label(target_events, i + 1))
return make_sequence_example(inputs, labels)
def get_inputs_batch(self, control_event_sequences, target_event_sequences,
full_length=False):
"""Returns an inputs batch for the given control and target event sequences.
Args:
control_event_sequences: A list of list-like control event sequences.
target_event_sequences: A list of list-like target event sequences, the
same length as `control_event_sequences`. Each target event sequence
must be shorter than the corresponding control event sequence.
full_length: If True, the inputs batch will be for the full length of
each control/target event sequence pair. If False, the inputs batch
will only be for the last event of each target event sequence. A full-
length inputs batch is used for the first step of extending the target
event sequences, since the RNN cell state needs to be initialized with
the priming target sequence. For subsequent generation steps, only a
last-event inputs batch is used.
Returns:
An inputs batch. If `full_length` is True, the shape will be
[len(target_event_sequences), len(target_event_sequences[0]), INPUT_SIZE].
If `full_length` is False, the shape will be
[len(target_event_sequences), 1, INPUT_SIZE].
Raises:
ValueError: If there are a different number of control and target event
sequences, or if one of the control event sequences is not shorter
than the corresponding control event sequence.
"""
if len(control_event_sequences) != len(target_event_sequences):
raise ValueError(
'%d control event sequences but %d target event sequences' %
(len(control_event_sequences, len(target_event_sequences))))
inputs_batch = []
for control_events, target_events in zip(
control_event_sequences, target_event_sequences):
if len(control_events) <= len(target_events):
raise ValueError('control event sequence must be longer than target '
'event sequence (%d control events but %d target '
'events)' % (len(control_events), len(target_events)))
inputs = []
if full_length:
for i in range(len(target_events)):
inputs.append(self.events_to_input(control_events, target_events, i))
else:
inputs.append(self.events_to_input(
control_events, target_events, len(target_events) - 1))
inputs_batch.append(inputs)
return inputs_batch
def extend_event_sequences(self, target_event_sequences, softmax):
"""Extends the event sequences by sampling the softmax probabilities.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list of chosen class indices, one for each target event sequence.
"""
return self._target_encoder_decoder.extend_event_sequences(
target_event_sequences, softmax)
def evaluate_log_likelihood(self, target_event_sequences, softmax):
"""Evaluate the log likelihood of multiple target event sequences.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of target event sequences. The
softmax vectors are assumed to have been generated by a full-length
inputs batch.
Returns:
A Python list containing the log likelihood of each target event sequence.
"""
return self._target_encoder_decoder.evaluate_log_likelihood(
target_event_sequences, softmax)
class OptionalEventSequenceEncoder(EventSequenceEncoderDecoder):
"""An encoder that augments a base encoder with a disable flag.
This encoder encodes event sequences consisting of tuples where the first
element is a disable flag. When set, the encoding consists of a 1 followed by
a zero-encoding the size of the base encoder's input. When unset, the encoding
consists of a 0 followed by the base encoder's encoding.
"""
def __init__(self, encoder):
"""Initialize an OptionalEventSequenceEncoder object.
Args:
encoder: The base EventSequenceEncoderDecoder to use.
"""
self._encoder = encoder
@property
def input_size(self):
return 1 + self._encoder.input_size
@property
def num_classes(self):
raise NotImplementedError
@property
def default_event_label(self):
raise NotImplementedError
def events_to_input(self, events, position):
# The event sequence is a list of tuples where the first element is a
# disable flag.
disable, _ = events[position]
if disable:
return [1.0] + [0.0] * self._encoder.input_size
else:
return [0.0] + self._encoder.events_to_input(
[event for _, event in events], position)
def events_to_label(self, events, position):
raise NotImplementedError
def class_index_to_event(self, class_index, events):
raise NotImplementedError
class MultipleEventSequenceEncoder(EventSequenceEncoderDecoder):
"""An encoder that concatenates multiple component encoders.
This class, largely intended for use with control sequences for conditional
encoder/decoders, encodes event sequences with multiple encoders and
concatenates the encodings.
Despite being an EventSequenceEncoderDecoder this class does not decode.
"""
def __init__(self, encoders, encode_single_sequence=False):
"""Initialize a MultipleEventSequenceEncoder object.
Args:
encoders: A list of component EventSequenceEncoderDecoder objects whose
output will be concatenated.
encode_single_sequence: If True, at encoding time all of the encoders will
be applied to a single event sequence. If False, each event of the
event sequence should be a tuple with size the same as the number of
encoders, each of which will be applied to the events in the
corresponding position in the tuple, i.e. the first encoder will be
applied to the first element of each event tuple, the second encoder
will be applied to the second element, etc.
"""
self._encoders = encoders
self._encode_single_sequence = encode_single_sequence
@property
def input_size(self):
return sum(encoder.input_size for encoder in self._encoders)
@property
def num_classes(self):
raise NotImplementedError
@property
def default_event_label(self):
raise NotImplementedError
def events_to_input(self, events, position):
input_ = []
if self._encode_single_sequence:
# Apply all encoders to the event sequence.
for encoder in self._encoders:
input_ += encoder.events_to_input(events, position)
else:
# The event sequence is a list of tuples. Apply each encoder to the
# elements in the corresponding tuple position.
event_sequences = list(zip(*events))
if len(event_sequences) != len(self._encoders):
raise ValueError(
'Event tuple size must be the same as the number of encoders.')
for encoder, event_sequence in zip(self._encoders, event_sequences):
input_ += encoder.events_to_input(event_sequence, position)
return input_
def events_to_label(self, events, position):
raise NotImplementedError
def class_index_to_event(self, class_index, events):
raise NotImplementedError
def make_sequence_example(inputs, labels):
"""Returns a SequenceExample for the given inputs and labels.
Args:
inputs: A list of input vectors. Each input vector is a list of floats.
labels: A list of ints.
Returns:
A tf.train.SequenceExample containing inputs and labels.
"""
input_features = [
tf.train.Feature(float_list=tf.train.FloatList(value=input_))
for input_ in inputs]
label_features = []
for label in labels:
if isinstance(label, numbers.Number):
label = [label]
label_features.append(
tf.train.Feature(int64_list=tf.train.Int64List(value=label)))
feature_list = {
'inputs': tf.train.FeatureList(feature=input_features),
'labels': tf.train.FeatureList(feature=label_features)
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
return tf.train.SequenceExample(feature_lists=feature_lists)
| 39.416747 | 89 | 0.658324 |
795b79295ec920b1e525b5e6e9eb0d82d927a6f4 | 2,405 | py | Python | training/src/tests/tests/python/RMSprop.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/RMSprop.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | training/src/tests/tests/python/RMSprop.py | steelONIONknight/bolt | 9bd3d08f2abb14435ca3ad0179889e48fa7e9b47 | [
"MIT"
] | null | null | null | # Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
from torch.optim import rmsprop
# Test 1
torch.manual_seed(0)
torch.set_printoptions(precision=6)
param = torch.rand(1, 2, 3, 4)
param.grad = torch.rand(1, 2, 3, 4)
print("Parameter: ", param)
print("Gradeient: ", param.grad)
# First step
opt = rmsprop.RMSprop(
[param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.1, centered=True
)
opt.step()
print("Parameter (after first step): ", param)
# Second step
opt.step()
print("Parameter (after second step): ", param)
# Test 2
param = torch.rand(1, 2, 3, 4)
param.grad = torch.rand(1, 2, 3, 4)
print("Parameter: ", param)
print("Gradeient: ", param.grad)
# First step
opt = rmsprop.RMSprop(
[param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.1, centered=False
)
opt.step()
print("Parameter (after first step): ", param)
# Second step
opt.step()
print("Parameter (after second step): ", param)
# Test 3
param = torch.rand(1, 2, 3, 4)
param.grad = torch.rand(1, 2, 3, 4)
print("Parameter: ", param)
print("Gradeient: ", param.grad)
# First step
opt = rmsprop.RMSprop(
[param], lr=0.1, alpha=0.9, eps=0.1, weight_decay=0.1, momentum=0.0, centered=False
)
opt.step()
print("Parameter (after first step): ", param)
# Second step
opt.step()
print("Parameter (after second step): ", param)
| 37 | 148 | 0.728067 |
795b7943291eb62c6f3b8a12a366541c850ae4fc | 2,281 | py | Python | exasol_integration_test_docker_environment/lib/test_environment/database_setup/populate_data.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 4 | 2020-06-25T20:47:31.000Z | 2021-09-10T15:22:51.000Z | exasol_integration_test_docker_environment/lib/test_environment/database_setup/populate_data.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 113 | 2020-06-02T08:51:08.000Z | 2022-03-31T08:47:41.000Z | exasol_integration_test_docker_environment/lib/test_environment/database_setup/populate_data.py | exasol/integration-test-docker-environment | 35850f67cd4cde010f03dd556d1a0f74b3291eb8 | [
"MIT"
] | 2 | 2020-05-19T10:57:47.000Z | 2020-06-22T13:32:20.000Z | import logging
from pathlib import Path
import luigi
from exasol_integration_test_docker_environment.lib.base.docker_base_task import DockerBaseTask
from exasol_integration_test_docker_environment.lib.base.json_pickle_parameter import JsonPickleParameter
from exasol_integration_test_docker_environment.lib.data.database_credentials import DatabaseCredentialsParameter
from exasol_integration_test_docker_environment.lib.data.environment_info import EnvironmentInfo
class PopulateEngineSmallTestDataToDatabase(DockerBaseTask, DatabaseCredentialsParameter):
logger = logging.getLogger('luigi-interface')
environment_name = luigi.Parameter()
reuse_data = luigi.BoolParameter(False, significant=False)
test_environment_info = JsonPickleParameter(
EnvironmentInfo, significant=False) # type: EnvironmentInfo
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._test_container_info = self.test_environment_info.test_container_info
self._database_info = self.test_environment_info.database_info
def run_task(self):
if self.reuse_data and self._database_info.reused:
self.logger.warning("Reusing data")
self.write_logs("Reused")
else:
self.populate_data()
def populate_data(self):
self.logger.warning("Uploading data")
username = self.db_user
password = self.db_password
with self._get_docker_client() as docker_client:
test_container = docker_client.containers.get(self._test_container_info.container_name)
cmd = f"""cd /tests/test/enginedb_small; $EXAPLUS -c '{self._database_info.host}:{self._database_info.db_port}' -u '{username}' -p '{password}' -f import.sql -jdbcparam 'validateservercertificate=0'"""
bash_cmd = f"""bash -c "{cmd}" """
exit_code, output = test_container.exec_run(cmd=bash_cmd)
self.write_logs(output.decode("utf-8"))
if exit_code != 0:
raise Exception("Failed to populate the database with data.\nLog: %s" % cmd + "\n" + output.decode("utf-8"))
def write_logs(self, output: str):
log_file = Path(self.get_log_path(), "log")
with log_file.open("w") as file:
file.write(output)
| 46.55102 | 213 | 0.724244 |
795b798c24001be92eb5943ca15d79967e02e838 | 4,182 | py | Python | dbbudget/settings.py | rwa-django/dbbudget | 5bcf1ac557513bd233b27e7d993ef1542d704b1e | [
"Apache-2.0"
] | null | null | null | dbbudget/settings.py | rwa-django/dbbudget | 5bcf1ac557513bd233b27e7d993ef1542d704b1e | [
"Apache-2.0"
] | null | null | null | dbbudget/settings.py | rwa-django/dbbudget | 5bcf1ac557513bd233b27e7d993ef1542d704b1e | [
"Apache-2.0"
] | null | null | null | """
Django settings for dbbudget project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '^x8(5ll-#gjb3+8&oe22s4kjyo9u)bp$qipw3by01u7l^!v!p+'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '^x8(5ll-#gjb3+8&oe22s4kjyo9u)bp$qipw3by01u7l^!v!p')
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = ['db-budget.herokuapp.com','127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'budget.apps.BudgetConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dbbudget.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dbbudget.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'de-CH'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/budget/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "__shared__"),
]
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | 29.450704 | 101 | 0.718077 |
795b7990318a1bab50ed45772abf34f17e5ba70a | 114 | py | Python | exercises/while_loops.py | noor19-meet/y2s18-python_review | de4033492983a3c7c9019a710c26953426c1e250 | [
"MIT"
] | null | null | null | exercises/while_loops.py | noor19-meet/y2s18-python_review | de4033492983a3c7c9019a710c26953426c1e250 | [
"MIT"
] | null | null | null | exercises/while_loops.py | noor19-meet/y2s18-python_review | de4033492983a3c7c9019a710c26953426c1e250 | [
"MIT"
] | null | null | null | # Write your solution for 1.3 here!
n = 1
sum = 0
while sum < 10000:
sum+=n
n+=2
print (sum)
print (n) | 14.25 | 35 | 0.570175 |
795b7b747d06c7ab3d6d0382330ec76cd112ba65 | 3,375 | py | Python | ibmsecurity/isam/web/authorization_server/logs.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 46 | 2017-03-21T21:08:59.000Z | 2022-02-20T22:03:46.000Z | ibmsecurity/isam/web/authorization_server/logs.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 201 | 2017-03-21T21:25:52.000Z | 2022-03-30T21:38:20.000Z | ibmsecurity/isam/web/authorization_server/logs.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 91 | 2017-03-22T16:25:36.000Z | 2022-02-04T04:36:29.000Z | import logging
import os.path
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
uri = "/isam/authzserver"
requires_modules = None
requires_version = None
requires_model = "Appliance"
def get_all(isamAppliance, id, check_mode=False, force=False):
"""
Retrieve the log file names of an existing instance
"""
return isamAppliance.invoke_get("Retrieve the log file names of an existing instance",
"{0}/{1}/logging/v1".format(uri, id),
requires_modules=requires_modules,
requires_version=requires_version,
requires_model=requires_model)
def get(isamAppliance, id, file_id, size=None, start=None, options=None, check_mode=False, force=False):
"""
Retrieve the log file snippet of an existing instance
"""
return isamAppliance.invoke_get("Retrieve the log file snippet of an existing instance",
"{0}/{1}/logging/{2}/v1{3}".format(uri, id, file_id,
tools.create_query_string(size=size, start=start,
options=options)),
requires_modules=requires_modules,
requires_version=requires_version,
requires_model=requires_model)
def export_file(isamAppliance, id, file_id, filepath, check_mode=False, force=False):
"""
Export the log file of an existing instance
"""
if os.path.exists(filepath) is True:
logger.info("File '{0}' already exists. Skipping export.".format(filepath))
warnings = ["File '{0}' already exists. Skipping export.".format(filepath)]
return isamAppliance.create_return_object(warnings=warnings)
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_get_file(
"Export the log file of an existing instance",
"{0}/{1}/logging/{2}/v1?export".format(uri, id, file_id), filepath, requires_model=requires_model
)
def delete(isamAppliance, id, file_id, check_mode=False, force=False):
"""
Clear the log file of an existing instance
"""
file_exists, warnings = _check(isamAppliance, id, file_id)
if force is True or file_exists is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete(
"Clear the log file of an existing instance",
"{0}/{1}/logging/{2}/v1".format(uri, id, file_id), requires_model=requires_model)
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance, id, file_id):
"""
Check to see if the file_id exists or not
"""
ret_obj = get_all(isamAppliance, id)
file_exists, warnings = False, ret_obj['warnings']
if not warnings:
for obj in ret_obj['data']:
if obj['id'] == file_id:
logger.info("Found file_id '{0}'".format(file_id))
file_exists = True
return file_exists, warnings | 38.352273 | 120 | 0.60237 |
795b7caf130aa1c6bc9e37e1f6515000ee1ddd41 | 3,156 | py | Python | testing/test_truncable_steps.py | vincent-antaki/Neuraxle | cef1284a261010c655f8ef02b4fca5b8bb45850c | [
"Apache-2.0"
] | 5 | 2019-07-30T21:35:17.000Z | 2020-02-25T09:08:09.000Z | testing/test_truncable_steps.py | Tubbz-alt/Neuraxle | 308f24248cdb242b7e2f6ec7c51daf2ee3e38834 | [
"Apache-2.0"
] | 1 | 2020-02-07T15:08:42.000Z | 2020-02-07T15:08:42.000Z | testing/test_truncable_steps.py | Tubbz-alt/Neuraxle | 308f24248cdb242b7e2f6ec7c51daf2ee3e38834 | [
"Apache-2.0"
] | null | null | null | from neuraxle.pipeline import Pipeline
from testing.mocks.step_mocks import SomeSplitStep, SomeStep, SomeTruncableStep
EXPECTED_STR_OUTPUT = """SomeTruncableStep
(
SomeTruncableStep(
name=SomeTruncableStep,
hyperparameters=HyperparameterSamples([('learning_rate', 0.1),
('l2_weight_reg', 0.001),
('hidden_size', 32),
('num_layers', 3),
('num_lstm_layers', 1),
('use_xavier_init', True),
('use_max_pool_else_avg_pool', True),
('dropout_drop_proba', 0.5),
('momentum', 0.1)])
)(
[('MockStep',
SomeStepWithHyperparams(
name=MockStep,
hyperparameters=HyperparameterSamples([('learning_rate', 0.1),
('l2_weight_reg', 0.001),
('hidden_size', 32),
('num_layers', 3),
('num_lstm_layers', 1),
('use_xavier_init', True),
('use_max_pool_else_avg_pool', True),
('dropout_drop_proba', 0.5),
('momentum', 0.1)])
)),
('MockStep1',
SomeStepWithHyperparams(
name=MockStep1,
hyperparameters=HyperparameterSamples([('learning_rate', 0.1),
('l2_weight_reg', 0.001),
('hidden_size', 32),
('num_layers', 3),
('num_lstm_layers', 1),
('use_xavier_init', True),
('use_max_pool_else_avg_pool', True),
('dropout_drop_proba', 0.5),
('momentum', 0.1)])
))]
)
)"""
def test_truncable_steps_should_split_by_type():
pipeline = Pipeline([
SomeStep(),
SomeStep(),
SomeSplitStep(),
SomeStep(),
SomeStep(),
SomeSplitStep(),
SomeStep(),
])
sub_pipelines = pipeline.split(SomeSplitStep)
assert 'SomeStep' in sub_pipelines[0]
assert 'SomeStep1' in sub_pipelines[0]
assert 'SomeSplitStep' in sub_pipelines[0]
assert 'SomeStep2' in sub_pipelines[1]
assert 'SomeStep3' in sub_pipelines[1]
assert 'SomeSplitStep1' in sub_pipelines[1]
assert 'SomeStep4' in sub_pipelines[2]
def test_set_train_should_set_train_to_false():
pipeline = Pipeline([
SomeStep(),
SomeStep(),
Pipeline([
SomeStep(),
])
])
pipeline.set_train(False)
assert not pipeline.is_train
assert not pipeline[0].is_train
assert not pipeline[1].is_train
assert not pipeline[2].is_train
assert not pipeline[2][0].is_train
def test_set_train_should_set_train_to_true():
pipeline = Pipeline([
SomeStep(),
SomeStep(),
Pipeline([
SomeStep(),
])
])
assert pipeline.is_train
assert pipeline[0].is_train
assert pipeline[1].is_train
assert pipeline[2].is_train
assert pipeline[2][0].is_train
def test_basestep_representation_works_correctly():
output = str(SomeTruncableStep())
assert output == EXPECTED_STR_OUTPUT
| 29.773585 | 79 | 0.56052 |
795b7cb1e2fd4ffd03d4ef47854d5864a0b956ad | 10,872 | py | Python | methods/GMAN/METR/model.py | kevin-xuan/Traffic-Benchmark | b9f8e40b4df9b58f5ad88432dc070cbbbcdc0228 | [
"MIT"
] | 120 | 2021-05-12T01:55:31.000Z | 2022-03-31T09:26:07.000Z | methods/GMAN/METR/model.py | swjtuer0/Traffic-Benchmark | b9f8e40b4df9b58f5ad88432dc070cbbbcdc0228 | [
"MIT"
] | 11 | 2021-05-14T00:57:58.000Z | 2022-03-08T05:54:45.000Z | methods/GMAN/METR/model.py | swjtuer0/Traffic-Benchmark | b9f8e40b4df9b58f5ad88432dc070cbbbcdc0228 | [
"MIT"
] | 40 | 2021-05-12T02:13:56.000Z | 2022-03-24T14:38:31.000Z | import tf_utils
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def placeholder(P, Q, N):
X = tf.placeholder(
shape = (None, P, N), dtype = tf.float32, name = 'X')
TE = tf.placeholder(
shape = (None, P + Q, 2), dtype = tf.int32, name = 'TE')
label = tf.placeholder(
shape = (None, Q, N), dtype = tf.float32, name = 'label')
is_training = tf.placeholder(
shape = (), dtype = tf.bool, name = 'is_training')
return X, TE, label, is_training
def FC(x, units, activations, bn, bn_decay, is_training, use_bias = True, drop = None):
if isinstance(units, int):
units = [units]
activations = [activations]
elif isinstance(units, tuple):
units = list(units)
activations = list(activations)
assert type(units) == list
for num_unit, activation in zip(units, activations):
if drop is not None:
x = tf_utils.dropout(x, drop = drop, is_training = is_training)
x = tf_utils.conv2d(
x, output_dims = num_unit, kernel_size = [1, 1], stride = [1, 1],
padding = 'VALID', use_bias = use_bias, activation = activation,
bn = bn, bn_decay = bn_decay, is_training = is_training)
return x
def STEmbedding(SE, TE, T, D, bn, bn_decay, is_training):
'''
spatio-temporal embedding
SE: [N, D]
TE: [batch_size, P + Q, 2] (dayofweek, timeofday)
T: num of time steps in one day
D: output dims
retrun: [batch_size, P + Q, N, D]
'''
# spatial embedding
SE = tf.expand_dims(tf.expand_dims(SE, axis = 0), axis = 0)#李府显:添加了两个维度但未扩展至b,t
SE = FC(
SE, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
# temporal embedding
dayofweek = tf.one_hot(TE[..., 0], depth = 7)
timeofday = tf.one_hot(TE[..., 1], depth = T)
TE = tf.concat((dayofweek, timeofday), axis = -1)
TE = tf.expand_dims(TE, axis = 2) #李府显:添加了第三维度但未扩展至n
TE = FC(
TE, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
return tf.add(SE, TE)
def spatialAttention(X, STE, K, d, bn, bn_decay, is_training):
'''
spatial attention mechanism
X: [batch_size, num_step, N, D]
STE: [batch_size, num_step, N, D]
K: number of attention heads
d: dimension of each attention outputs
return: [batch_size, num_step, N, D]
'''
D = K * d
X = tf.concat((X, STE), axis = -1)
# [batch_size, num_step, N, K * d]
query = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
key = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
value = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
# [K * batch_size, num_step, N, d]
query = tf.concat(tf.split(query, K, axis = -1), axis = 0)
key = tf.concat(tf.split(key, K, axis = -1), axis = 0)
value = tf.concat(tf.split(value, K, axis = -1), axis = 0)
# [K * batch_size, num_step, N, N]
attention = tf.matmul(query, key, transpose_b = True)
attention /= (d ** 0.5)
attention = tf.nn.softmax(attention, -1)
# [batch_size, num_step, N, D]
X = tf.matmul(attention, value)
X = tf.concat(tf.split(X, K, axis = 0), axis = -1)
X = FC(
X, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
return X
def temporalAttention(X, STE, K, d, bn, bn_decay, is_training, mask = True):
'''
temporal attention mechanism
X: [batch_size, num_step, N, D]
STE: [batch_size, num_step, N, D]
K: number of attention heads
d: dimension of each attention outputs
return: [batch_size, num_step, N, D]
'''
D = K * d
X = tf.concat((X, STE), axis = -1)
# [batch_size, num_step, N, K * d]
query = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
key = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
value = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
# [K * batch_size, num_step, N, d]
query = tf.concat(tf.split(query, K, axis = -1), axis = 0)
key = tf.concat(tf.split(key, K, axis = -1), axis = 0)
value = tf.concat(tf.split(value, K, axis = -1), axis = 0)
# query: [K * batch_size, N, num_step, d]
# key: [K * batch_size, N, d, num_step]
# value: [K * batch_size, N, num_step, d]
query = tf.transpose(query, perm = (0, 2, 1, 3))
key = tf.transpose(key, perm = (0, 2, 3, 1))
value = tf.transpose(value, perm = (0, 2, 1, 3))
# [K * batch_size, N, num_step, num_step]
attention = tf.matmul(query, key)
attention /= (d ** 0.5)
# mask attention score
if mask:
batch_size = tf.shape(X)[0]
num_step = X.get_shape()[1].value
N = X.get_shape()[2].value
mask = tf.ones(shape = (num_step, num_step))
mask = tf.linalg.LinearOperatorLowerTriangular(mask).to_dense()
mask = tf.expand_dims(tf.expand_dims(mask, axis = 0), axis = 0)
mask = tf.tile(mask, multiples = (K * batch_size, N, 1, 1))
mask = tf.cast(mask, dtype = tf.bool)
attention = tf.compat.v2.where(
condition = mask, x = attention, y = -2 ** 15 + 1)
# softmax
attention = tf.nn.softmax(attention, axis = -1)
# [batch_size, num_step, N, D]
X = tf.matmul(attention, value)
X = tf.transpose(X, perm = (0, 2, 1, 3))
X = tf.concat(tf.split(X, K, axis = 0), axis = -1)
X = FC(
X, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
return X
def gatedFusion(HS, HT, D, bn, bn_decay, is_training):
'''
gated fusion
HS: [batch_size, num_step, N, D]
HT: [batch_size, num_step, N, D]
D: output dims
return: [batch_size, num_step, N, D]
'''
XS = FC(
HS, units = D, activations = None,
bn = bn, bn_decay = bn_decay,
is_training = is_training, use_bias = False)
XT = FC(
HT, units = D, activations = None,
bn = bn, bn_decay = bn_decay,
is_training = is_training, use_bias = True)
z = tf.nn.sigmoid(tf.add(XS, XT))
H = tf.add(tf.multiply(z, HS), tf.multiply(1 - z, HT))
H = FC(
H, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
return H
def STAttBlock(X, STE, K, d, bn, bn_decay, is_training, mask = True):
HS = spatialAttention(X, STE, K, d, bn, bn_decay, is_training)
HT = temporalAttention(X, STE, K, d, bn, bn_decay, is_training, mask = mask)
H = gatedFusion(HS, HT, K * d, bn, bn_decay, is_training)
return tf.add(X, H)
def transformAttention(X, STE_P, STE_Q, K, d, bn, bn_decay, is_training):
'''
transform attention mechanism
X: [batch_size, P, N, D]
STE_P: [batch_size, P, N, D]
STE_Q: [batch_size, Q, N, D]
K: number of attention heads
d: dimension of each attention outputs
return: [batch_size, Q, N, D]
'''
D = K * d
# query: [batch_size, Q, N, K * d]
# key: [batch_size, P, N, K * d]
# value: [batch_size, P, N, K * d]
query = FC(
STE_Q, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
key = FC(
STE_P, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
value = FC(
X, units = D, activations = tf.nn.relu,
bn = bn, bn_decay = bn_decay, is_training = is_training)
# query: [K * batch_size, Q, N, d]
# key: [K * batch_size, P, N, d]
# value: [K * batch_size, P, N, d]
query = tf.concat(tf.split(query, K, axis = -1), axis = 0)
key = tf.concat(tf.split(key, K, axis = -1), axis = 0)
value = tf.concat(tf.split(value, K, axis = -1), axis = 0)
# query: [K * batch_size, N, Q, d]
# key: [K * batch_size, N, d, P]
# value: [K * batch_size, N, P, d]
query = tf.transpose(query, perm = (0, 2, 1, 3))
key = tf.transpose(key, perm = (0, 2, 3, 1))
value = tf.transpose(value, perm = (0, 2, 1, 3))
# [K * batch_size, N, Q, P]
attention = tf.matmul(query, key)
attention /= (d ** 0.5)
attention = tf.nn.softmax(attention, axis = -1)
# [batch_size, Q, N, D]
X = tf.matmul(attention, value)
X = tf.transpose(X, perm = (0, 2, 1, 3))
X = tf.concat(tf.split(X, K, axis = 0), axis = -1)
X = FC(
X, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
return X
def GMAN(X, TE, SE, P, Q, T, L, K, d, bn, bn_decay, is_training):
'''
GMAN
X: [batch_size, P, N]
TE: [batch_size, P + Q, 2] (time-of-day, day-of-week)
SE: [N, K * d]
P: number of history steps
Q: number of prediction steps
T: one day is divided into T steps
L: number of STAtt blocks in the encoder/decoder
K: number of attention heads
d: dimension of each attention head outputs
return: [batch_size, Q, N]
'''
D = K * d
# input
X = tf.expand_dims(X, axis = -1)
X = FC(
X, units = [D, D], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training)
# STE
STE = STEmbedding(SE, TE, T, D, bn, bn_decay, is_training)
STE_P = STE[:, : P]
STE_Q = STE[:, P :]
# encoder
for _ in range(L):
X = STAttBlock(X, STE_P, K, d, bn, bn_decay, is_training)
# transAtt
X = transformAttention(
X, STE_P, STE_Q, K, d, bn, bn_decay, is_training)
# decoder
for _ in range(L):
X = STAttBlock(X, STE_Q, K, d, bn, bn_decay, is_training)
# output
X = FC(
X, units = [D, 1], activations = [tf.nn.relu, None],
bn = bn, bn_decay = bn_decay, is_training = is_training,
use_bias = True, drop = 0.1)
return tf.squeeze(X, axis = 3)
def mae_loss(pred, label):
mask = tf.not_equal(label, 0)
mask = tf.cast(mask, tf.float32)
mask /= tf.reduce_mean(mask)
mask = tf.compat.v2.where(
condition = tf.math.is_nan(mask), x = 0., y = mask)
loss = tf.abs(tf.subtract(pred, label))
loss *= mask
loss = tf.compat.v2.where(
condition = tf.math.is_nan(loss), x = 0., y = loss)
loss = tf.reduce_mean(loss)
return loss
| 38.553191 | 87 | 0.57018 |
795b7cd018647055ae4d502676067aded917f140 | 619 | py | Python | podcasts/migrations/0001_initial.py | cyanidesayonara/dop3pod | 987d7d51321a75c1b78d220768ffe3b178d05a6b | [
"MIT"
] | null | null | null | podcasts/migrations/0001_initial.py | cyanidesayonara/dop3pod | 987d7d51321a75c1b78d220768ffe3b178d05a6b | [
"MIT"
] | 7 | 2020-10-10T16:22:50.000Z | 2021-07-14T18:36:47.000Z | podcasts/migrations/0001_initial.py | cyanidesayonara/dop3pod | 987d7d51321a75c1b78d220768ffe3b178d05a6b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-11 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Podcast',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('feed_url', models.CharField(max_length=200)),
('artwork_url', models.CharField(max_length=200)),
],
),
]
| 25.791667 | 114 | 0.575121 |
795b7cd946059676eb469fbf9d5e22a92e1fb64d | 2,620 | py | Python | model/Gradients.py | BhasherBEL/ComposIm | 719424862bb92682b9d5c3ff22aa92f4c0b1f28f | [
"MIT"
] | null | null | null | model/Gradients.py | BhasherBEL/ComposIm | 719424862bb92682b9d5c3ff22aa92f4c0b1f28f | [
"MIT"
] | null | null | null | model/Gradients.py | BhasherBEL/ComposIm | 719424862bb92682b9d5c3ff22aa92f4c0b1f28f | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
# coding: utf-8
import numpy as np
import operator
from PIL import Image
import scipy
import scipy.cluster
import random
colors = {'black': np.array([0, 0, 0]),
'white': np.array([255, 255, 255]),
'red': np.array([255, 0, 0]),
'green': np.array([0, 255, 0]),
'blue': np.array([0, 0, 255]),
'yellow': np.array([255, 255, 0]),
'cyan': np.array([0, 255, 255]),
'magenta': np.array([255, 0, 255]),
#'gray': np.array([128, 128, 128]),
'dark_green': np.array([0, 128, 0]),
'dark_cyan': np.array([0, 128, 128])}
def get_gradient(image: Image) -> np.array:
return get_k_gradient(image, 20)
def get_mean_gradient(data: list) -> np.array:
return np.array(data).mean(axis=0).astype(dtype=int)
def get_table_gradient(data: list) -> np.array:
pts = {}
for color in colors.keys():
pts[color] = 0
for pixel in data:
for color, value in colors.items():
pts[color] += sum((255-abs(np.array(pixel)-value))**2)/(10**9)
return colors[max(pts.items(), key=operator.itemgetter(1))[0]]
def get_cluster_gradient(image: Image) -> np.array:
num_clusters = 5
ar = np.asarray(image)
shape = ar.shape
ar = ar.reshape(scipy.product(shape[:2]), shape[2]).astype(float)
codes, dist = scipy.cluster.vq.kmeans(ar, num_clusters)
vecs, dist = scipy.cluster.vq.vq(ar, codes)
counts, bins = scipy.histogram(vecs, len(codes))
index_max = scipy.argmax(counts)
peak = np.array(codes[index_max]).astype(int)
return peak
def get_present_gradient(image: Image) -> np.array:
shape = image.shape
pixels = image.getcolors(shape[0] * shape[1])
sorted_pixels = sorted(pixels, key=lambda t: t[0])
dominant_color = sorted_pixels[-1][1]
return np.array(dominant_color)
def get_k_gradient(image: Image, k) -> np.array:
data = np.array(image.getdata())
k_ids = {}
k_values = {}
for pixel in data:
do = False
for k_id, k_value in k_ids.items():
if np.abs(k_value - pixel).sum() <= k:
do = True
if k_id in k_values:
k_values[k_id].append(pixel)
else:
k_values[k_id] = [pixel]
break
if not do:
key = len(k_ids)
k_ids[key] = pixel
k_values[key] = [pixel]
longer = np.array([len(v) for v in k_values.values()]).argmax()
final_value = k_values[longer][np.random.randint(0, len(k_values[longer])-1)]
return final_value
| 28.478261 | 81 | 0.580153 |
795b7e2765e6e9f32519298f4e8b12a8d16fbda6 | 3,643 | py | Python | build_msvc/msvc-autogen.py | adgeese/gbcr | 51145945d78c588f3ec3dc8595e14b1faea1a4e7 | [
"MIT"
] | null | null | null | build_msvc/msvc-autogen.py | adgeese/gbcr | 51145945d78c588f3ec3dc8595e14b1faea1a4e7 | [
"MIT"
] | null | null | null | build_msvc/msvc-autogen.py | adgeese/gbcr | 51145945d78c588f3ec3dc8595e14b1faea1a4e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Gold BCR Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import re
import argparse
from shutil import copyfile
SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src'))
DEFAULT_PLATFORM_TOOLSET = R'v141'
libs = [
'libgoldbcr_cli',
'libgoldbcr_common',
'libgoldbcr_crypto',
'libgoldbcr_server',
'libgoldbcr_util',
'libgoldbcr_wallet_tool',
'libgoldbcr_wallet',
'libgoldbcr_zmq',
'bench_goldbcr',
'libtest_util',
]
ignore_list = [
]
lib_sources = {}
def parse_makefile(makefile):
with open(makefile, 'r', encoding='utf-8') as file:
current_lib = ''
for line in file.read().splitlines():
if current_lib:
source = line.split()[0]
if source.endswith('.cpp') and not source.startswith('$') and source not in ignore_list:
source_filename = source.replace('/', '\\')
object_filename = source.replace('/', '_')[:-4] + ".obj"
lib_sources[current_lib].append((source_filename, object_filename))
if not line.endswith('\\'):
current_lib = ''
continue
for lib in libs:
_lib = lib.replace('-', '_')
if re.search(_lib + '.*_SOURCES \\= \\\\', line):
current_lib = lib
lib_sources[current_lib] = []
break
def set_common_properties(toolset):
with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'r', encoding='utf-8') as rfile:
s = rfile.read()
s = re.sub('<PlatformToolset>.*?</PlatformToolset>', '<PlatformToolset>'+toolset+'</PlatformToolset>', s)
with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'w', encoding='utf-8',newline='\n') as wfile:
wfile.write(s)
def main():
parser = argparse.ArgumentParser(description='Gold BCR-core msbuild configuration initialiser.')
parser.add_argument('-toolset', nargs='?',help='Optionally sets the msbuild platform toolset, e.g. v142 for Visual Studio 2019.'
' default is %s.'%DEFAULT_PLATFORM_TOOLSET)
args = parser.parse_args()
if args.toolset:
set_common_properties(args.toolset)
for makefile_name in os.listdir(SOURCE_DIR):
if 'Makefile' in makefile_name:
parse_makefile(os.path.join(SOURCE_DIR, makefile_name))
for key, value in lib_sources.items():
vcxproj_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), key, key + '.vcxproj'))
content = ''
for source_filename, object_filename in value:
content += ' <ClCompile Include="..\\..\\src\\' + source_filename + '">\n'
content += ' <ObjectFileName>$(IntDir)' + object_filename + '</ObjectFileName>\n'
content += ' </ClCompile>\n'
with open(vcxproj_filename + '.in', 'r', encoding='utf-8') as vcxproj_in_file:
with open(vcxproj_filename, 'w', encoding='utf-8') as vcxproj_file:
vcxproj_file.write(vcxproj_in_file.read().replace(
'@SOURCE_FILES@\n', content))
copyfile(os.path.join(SOURCE_DIR,'../build_msvc/goldbcr_config.h'), os.path.join(SOURCE_DIR, 'config/goldbcr-config.h'))
copyfile(os.path.join(SOURCE_DIR,'../build_msvc/libsecp256k1_config.h'), os.path.join(SOURCE_DIR, 'secp256k1/src/libsecp256k1-config.h'))
if __name__ == '__main__':
main()
| 41.873563 | 141 | 0.623113 |
795b7eadc29bbde4f897fbeabaf17d1b8f1fb3e8 | 94,416 | py | Python | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/aio/operations/_iot_hub_resource_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/aio/operations/_iot_hub_resource_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2020_03_01/aio/operations/_iot_hub_resource_operations.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotHubResourceOperations:
"""IotHubResourceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.IotHubDescription":
"""Get the non-security related metadata of an IoT hub.
Get the non-security related metadata of an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(iot_hub_description, 'IotHubDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["_models.IotHubDescription"]:
"""Create or update the metadata of an IoT hub.
Create or update the metadata of an Iot hub. The usual pattern to modify a property is to
retrieve the IoT hub metadata and security metadata, and then combine them with the modified
values in a new body to update the IoT hub. If certain properties are missing in the JSON,
updating IoT Hub may cause these values to fallback to default, which may lead to unexpected
behavior.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param iot_hub_description: The IoT hub metadata and security metadata.
:type iot_hub_description: ~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription
:param if_match: ETag of the IoT Hub. Do not specify for creating a brand new IoT Hub. Required
to update an existing IoT Hub.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_description=iot_hub_description,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_tags: "_models.TagsResource",
**kwargs
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(iot_hub_tags, 'TagsResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_tags: "_models.TagsResource",
**kwargs
) -> AsyncLROPoller["_models.IotHubDescription"]:
"""Update an existing IoT Hubs tags.
Update an existing IoT Hub tags. to update other fields use the CreateOrUpdate method.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param resource_name: Name of iot hub to update.
:type resource_name: str
:param iot_hub_tags: Updated tag information to set into the iot hub instance.
:type iot_hub_tags: ~azure.mgmt.iothub.v2020_03_01.models.TagsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_tags=iot_hub_tags,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 404:
deserialized = self._deserialize('ErrorDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncLROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
"""Delete an IoT hub.
Delete an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a subscription.
Get all the IoT hubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a resource group.
Get all the IoT hubs in a resource group.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore
async def get_stats(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.RegistryStatistics":
"""Get the statistics from an IoT hub.
Get the statistics from an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistryStatistics, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.RegistryStatistics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistryStatistics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore
def get_valid_skus(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubSkuDescriptionListResult"]:
"""Get the list of valid SKUs for an IoT hub.
Get the list of valid SKUs for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubSkuDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_valid_skus.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubSkuDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore
def list_event_hub_consumer_groups(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
**kwargs
) -> AsyncIterable["_models.EventHubConsumerGroupsListResult"]:
"""Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an IoT hub.
Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint.
:type event_hub_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_event_hub_consumer_groups.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EventHubConsumerGroupsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore
async def get_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> "_models.EventHubConsumerGroupInfo":
"""Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to retrieve.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
async def create_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> "_models.EventHubConsumerGroupInfo":
"""Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to add.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.create_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
async def delete_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> None:
"""Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.delete_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
def list_jobs(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.JobResponseListResult"]:
"""Get a list of all the jobs in an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResponseListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.JobResponseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_jobs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobResponseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore
async def get_job(
self,
resource_group_name: str,
resource_name: str,
job_id: str,
**kwargs
) -> "_models.JobResponse":
"""Get the details of a job from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param job_id: The job identifier.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore
def get_quota_metrics(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubQuotaMetricInfoListResult"]:
"""Get the quota metrics for an IoT hub.
Get the quota metrics for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.IotHubQuotaMetricInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_quota_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubQuotaMetricInfoListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore
def get_endpoint_health(
self,
resource_group_name: str,
iot_hub_name: str,
**kwargs
) -> AsyncIterable["_models.EndpointHealthDataListResult"]:
"""Get the health for routing endpoints.
Get the health for routing endpoints.
:param resource_group_name:
:type resource_group_name: str
:param iot_hub_name:
:type iot_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EndpointHealthDataListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.EndpointHealthDataListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EndpointHealthDataListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_endpoint_health.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EndpointHealthDataListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_endpoint_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routingEndpointsHealth'} # type: ignore
async def check_name_availability(
self,
operation_inputs: "_models.OperationInputs",
**kwargs
) -> "_models.IotHubNameAvailabilityInfo":
"""Check if an IoT hub name is available.
Check if an IoT hub name is available.
:param operation_inputs: Set the name parameter in the OperationInputs structure to the name of
the IoT hub to check.
:type operation_inputs: ~azure.mgmt.iothub.v2020_03_01.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubNameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.IotHubNameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(operation_inputs, 'OperationInputs')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore
async def test_all_routes(
self,
iot_hub_name: str,
resource_group_name: str,
input: "_models.TestAllRoutesInput",
**kwargs
) -> "_models.TestAllRoutesResult":
"""Test all routes.
Test all routes configured in this Iot Hub.
:param iot_hub_name: IotHub to be tested.
:type iot_hub_name: str
:param resource_group_name: resource group which Iot Hub belongs to.
:type resource_group_name: str
:param input: Input for testing all routes.
:type input: ~azure.mgmt.iothub.v2020_03_01.models.TestAllRoutesInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestAllRoutesResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.TestAllRoutesResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestAllRoutesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.test_all_routes.metadata['url'] # type: ignore
path_format_arguments = {
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'TestAllRoutesInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestAllRoutesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
test_all_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routing/routes/$testall'} # type: ignore
async def test_route(
self,
iot_hub_name: str,
resource_group_name: str,
input: "_models.TestRouteInput",
**kwargs
) -> "_models.TestRouteResult":
"""Test the new route.
Test the new route for this Iot Hub.
:param iot_hub_name: IotHub to be tested.
:type iot_hub_name: str
:param resource_group_name: resource group which Iot Hub belongs to.
:type resource_group_name: str
:param input: Route that needs to be tested.
:type input: ~azure.mgmt.iothub.v2020_03_01.models.TestRouteInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TestRouteResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.TestRouteResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TestRouteResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.test_route.metadata['url'] # type: ignore
path_format_arguments = {
'iotHubName': self._serialize.url("iot_hub_name", iot_hub_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'TestRouteInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TestRouteResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
test_route.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{iotHubName}/routing/routes/$testnew'} # type: ignore
def list_keys(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]:
"""Get the security metadata for an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2020_03_01.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore
async def get_keys_for_key_name(
self,
resource_group_name: str,
resource_name: str,
key_name: str,
**kwargs
) -> "_models.SharedAccessSignatureAuthorizationRule":
"""Get a shared access policy by name from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param key_name: The name of the shared access policy.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.SharedAccessSignatureAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_keys_for_key_name.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'keyName': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore
async def export_devices(
self,
resource_group_name: str,
resource_name: str,
export_devices_parameters: "_models.ExportDevicesRequest",
**kwargs
) -> "_models.JobResponse":
"""Exports all the device identities in the IoT hub identity registry to an Azure Storage blob container. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param export_devices_parameters: The parameters that specify the export devices operation.
:type export_devices_parameters: ~azure.mgmt.iothub.v2020_03_01.models.ExportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.export_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore
async def import_devices(
self,
resource_group_name: str,
resource_name: str,
import_devices_parameters: "_models.ImportDevicesRequest",
**kwargs
) -> "_models.JobResponse":
"""Import, update, or delete device identities in the IoT hub identity registry from a blob. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param import_devices_parameters: The parameters that specify the import devices operation.
:type import_devices_parameters: ~azure.mgmt.iothub.v2020_03_01.models.ImportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2020_03_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
| 51.035676 | 254 | 0.671179 |
795b7ec3f68905e94f2adadab671114b364e8197 | 9,910 | py | Python | ihaleGozlemevi/lib/legacy_pdf_parser.py | efebarlas/ihale | 4cd647e1048287e0e106a961f1e360a9c44de0c8 | [
"MIT"
] | null | null | null | ihaleGozlemevi/lib/legacy_pdf_parser.py | efebarlas/ihale | 4cd647e1048287e0e106a961f1e360a9c44de0c8 | [
"MIT"
] | null | null | null | ihaleGozlemevi/lib/legacy_pdf_parser.py | efebarlas/ihale | 4cd647e1048287e0e106a961f1e360a9c44de0c8 | [
"MIT"
] | null | null | null | from pdfminer.high_level import extract_pages as pdf_extract_pages
from pdfminer.layout import LTText, LTTextContainer
# # import-time monkey patching of LTTextContainer
# # so we can easily retrieve the text color
# # there's a chance that the text has characters with
# # different colors. so we just look at three characters and
# # return 'mixed' if they aren't all the same.
# # we don't look at each and every character because
# # i suspect that is expensive to do for all text.
# def textColor(self):
# from random import randrange
# txtObj = self._objs[0]
# charColors = tuple(txtObj._objs[randrange(0, len(txtObj))].ncs.name for _ in range(3))
# c = charColors[0]
# for i in charColors:
# if i != c:
# return "mixed"
# return c
# LTTextContainer.text_color = textColor
# from more_itertools import seekable
from ihaleGozlemevi.lib import utils
from dataclasses import dataclass
import re
from ihaleGozlemevi.lib.faults import *
from datetime import datetime as dt
from pathlib import Path
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# generic pdf stuff
def getPdfTree(pdfFilePath):
# returns generator[PDF pages]
return pdf_extract_pages(pdfFilePath)
# def safeSeek(seekbl, idx):
# seekbl.seek(idx)
# try:
# return next(seekbl)
# except StopIteration:
# return IndexFault(seekbl, idx)
# TODO: consider using a different pdfminer.six api, because random seeking with extract_pages is very slow
def safeSeek(seekbl, idx):
seekbl.seek(idx)
try:
return next(seekbl)
except StopIteration:
return IndexFault(seekbl, idx)
@dataclass
# TODO: upgrade Python to 3.10 and use slots=True for performance improvements
class BBox:
"""Class for bulten tests where the expected answer is known."""
x0: float
x1: float
y0: float
y1: float
class PDFCursor():
#__slots__ = ("pageNum","pdfComponent")
def __init__(self, LTPage, LTComponent=None):
self.pageNum = LTPage.pageid
#self.LTComponent = LTComponent
if LTComponent == None:
self.bbox = BBox(0,0,LTPage.y1,LTPage.y1)
else:
self.bbox = BBox(LTComponent.x0, LTComponent.x1, LTComponent.y0, LTComponent.y1)
self.pageBBox = BBox(LTPage.x0, LTPage.x1, LTPage.y0, LTPage.y1)
def getBBox(self):
return self.bbox
def getPageBelow(self):
# returns bbox that cuts out the part above and including the cursor's bbox
return BBox(self.pageBBox.x0, self.pageBBox.x1, self.pageBBox.y0, self.bbox.y0)
def isAbove(self, bbox):
pageBelow = self.getPageBelow()
return isWithinBBox(pageBelow, bbox)
# ihale specific stuff
class Bulten():
def __init__(self, pdfFilePath):
self.pageCursor = getPdfTree(pdfFilePath)
self.pdfFilePath = pdfFilePath
self.documentName = Path(pdfFilePath).name
self._date = None
self._currPageNum = -1
def getPage(self,pageNum):
if pageNum < self._currPageNum:
self.pageCursor = getPdfTree(self.pdfFilePath)
self._currPageNum = -1
return self.getPage(pageNum)
try:
for _ in range(pageNum - self._currPageNum - 1):
next(self.pageCursor)
page = next(self.pageCursor)
except StopIteration:
return IndexFault(self.pageCursor, pageNum)
self._currPageNum = pageNum
log.debug(f'New page fetched: {self.documentName}:{pageNum}')
return page
def getIhaleTipi(self):
for i in self.getPage(0):
if isinstance(i, LTTextContainer):
print(i.get_text())
def printBultenText(self):
for i in self.pageCursor:
print(f'\n\n***PAGE {i}***\n\n')
for j in i:
if isinstance(j, LTTextContainer):
print(utils.asciify(j.get_text()))
def getIhaleList(self):
# returns: seekable generator which looks through pdf and parses ihale's
# TODO: hello
def ihaleGenerator():
ihale = "lol"
yield ihale
return ihaleGenerator
def getDate(self):
if self._date != None:
return self._date
second_page = self.getPage(1)
dateBBox = BBox(300, 600, 750, 842.04)
for i in second_page:
if isinstance(i, LTTextContainer) and isWithinBBox(dateBBox, i):
dateStr = utils.asciify(i.get_text().lower())
break
regex = re.compile('(\\d{,2}) (ocak|subat|mart|nisan|mayıs|mayis|haziran|temmuz|agustos|eylul|ekim|kasım|kasim|aralık|aralik) (\\d{4})')
try:
day, month, year = regex.search(dateStr).groups()
except:
return DocumentFault('bulten tarihi', self.documentName)
monthNum = utils.monthNameToNum(month)
date = dt.strptime(f'{day}/{monthNum}/{year}', '%d/%m/%Y')
self._date = date
return date
def getYear(self):
date = self.getDate()
if isinstance(date, Fault):
return date
return date.year
def isSonuc(self):
pg = self.pdf.pages[1]
def textSearcher(self, text, cursor=None):
# returns generator which yields PDFCursors to components with the specified text
# NOTE: search queries are case-insensitive and asciified!!
# TODO: we may only care about visible text (i.e., black text on white bg). there should be an arg about this!
textQuery = utils.asciify(text.lower())
if textQuery != text:
log.warning(f'PDF is searched for text {text}, which isn\'t in lower case OR has non-ASCII characters!')
if cursor is None:
cursor = PDFCursor(self.getPage(0))
startPage = self.getPage(cursor.pageNum)
cpn = cursor.pageNum
# TODO: could be more functional with maps and all
for component in startPage:
if isinstance(component, LTTextContainer) and \
cursor.isAbove(component):
componentText = utils.asciify(component.get_text().lower())
if componentText.find(textQuery) != -1:
log.debug(f'text found in page num: {cpn}')
yield component
pageNum = cursor.pageNum + 1
page = self.getPage(pageNum)
while not isinstance(page, Fault):
for component in page:
if isinstance(component, LTTextContainer):
componentText = utils.asciify(component.get_text().lower())
if componentText.find(textQuery) != -1:
log.debug(f'text found in page num: {pageNum}')
yield component
pageNum += 1
page = self.getPage(pageNum)
if not isinstance(page, IndexFault):
log.warning('Page retrieval failed unexpectedly')
log.warning(page)
def isWithinBBox(capturingBBox, capturedBBox):
# this function supports both LTComponents and BBox objects
a = capturingBBox
b = capturedBBox
return (a.x0 <= b.x0 and a.x1 >= b.x1 and a.y0 <=b.y0 and a.y1 >= b.y1)
def findKeyBBoxes(cursor: PDFCursor):
# returns a partitioning of the document space to capture value bboxes into keys
# TODO: must be aware of page breaks!!
# ne olabilir: key onceki sayfada kalir ama yazi page break'e gider, header
# ve footer yaziyi ortadan boler.
# bir element'in header'in onunde oldugunu header cizgisinden anlayabiliriz.
# hatta, sanki header ve footer hep ayni absolut konumda gibi.
# in that case, sonraki sayfadaki value yazisini onceki sayfayla birlestirmek icin
# looks below provided cursor
# REASONING:
# Ihale bultenlerindeki ilanlardaki metadata,
# sola dayali key ve saga dayali degerlerden olusuyor.
#
# Key X'in degeri ardisik key'in yatay pozisyonuna dusmuyor.
# Ardisik key'lerin yatay pozisyonlari alinarak o key'e
# karsilik sonuc bu sekilde bulunabilir.
# Tek istisna: Merkez-aligned degerler key'in ustune gecebiliyor.
# data-driven-design: En cok yatay deger overlap'e sahip key o value ile eslestirilir.
#textQuery = utils.asciify(text.lower())
#if textQuery != text:
# log.warning(f'PDF is searched for text {text}, which isn\'t in lower case OR has non-ASCII characters!')
if cursor is None:
cursor = PDFCursor(self.getPage(0))
startPage = self.getPage(cursor.pageNum)
# TODO: could be more functional with maps and all
for component in startPage:
if isinstance(component, LTTextContainer) and \
cursor.isAbove(component):
componentText = utils.asciify(component.get_text().lower())
if componentText.find(textQuery) != -1:
yield component
pageNum = cursor.pageNum + 1
page = self.getPage(pageNum)
while not isinstance(page, Fault):
for component in page:
if isinstance(component, LTTextContainer):
componentText = utils.asciify(component.get_text().lower())
if componentText.find(textQuery) != -1:
yield component
pageNum += 1
page = self.getPage(pageNum)
if not isinstance(page, IndexFault):
log.warning('Page retrieval failed unexpectedly')
log.warning(page)
pass
def findValueBBoxes():
# returns all bboxes that are likely to be values
pass
def BBoxToDict():
# given the key partitioning and value bboxes, will return a dictionary of key-value pairs
pass
# if __name__ == "__main__":
# #testIndexFault()
# bulten = Bulten("./BULTEN_28032022_MAL_SONUC.pdf")
# #bulten.getIhaleTipi()
# #print('h')
# bulten.printBultenText()
| 36.433824 | 144 | 0.636024 |
795b7f5d25933e9510d742b68271568ffb44414c | 22,147 | py | Python | test/functional/test_framework/util.py | ComputerCraftr/peps-new | e92cc732a96a567b66c0ce665a03496e15b2702d | [
"MIT"
] | 2 | 2021-09-23T17:35:44.000Z | 2021-09-23T17:35:56.000Z | test/functional/test_framework/util.py | theabundancecoin/TACC | fd7d38c6a04dcb2da3b2755879b153b4731cddb2 | [
"MIT"
] | null | null | null | test/functional/test_framework/util.py | theabundancecoin/TACC | fd7d38c6a04dcb2da3b2755879b153b4731cddb2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s PIV too low! (Should be %s PIV)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s PIV too high! (Should be %s PIV)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_true(condition, message = ""):
if not condition:
raise AssertionError(message)
def assert_false(condition, message = ""):
assert_true(not condition, message)
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate,
*,
attempts=float('inf'),
timeout=float('inf'),
lock=None,
sendpings=None,
mocktime=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
if sendpings is not None:
sendpings()
if mocktime is not None:
mocktime(1)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "pivx.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("spendzeroconfchange=1\n")
f.write("printtoconsole=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "pivx.conf")):
with open(os.path.join(datadir, "pivx.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
### PIVX specific utils ###
vZC_DENOMS = [1, 5, 10, 50, 100, 500, 1000, 5000]
DEFAULT_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
"""Return Decimal from float for equality checks against rpc outputs"""
return Decimal("{:0.8f}".format(x))
# Find a coinstake/coinbase address on the node, filtering by the number of UTXOs it has.
# If no filter is provided, returns the coinstake/coinbase address on the node containing
# the greatest number of spendable UTXOs.
# The default cached chain has one address per coinbase output.
def get_coinstake_address(node, expected_utxos=None):
addrs = [utxo['address'] for utxo in node.listunspent() if utxo['generated']]
assert(len(set(addrs)) > 0)
if expected_utxos is None:
addrs = [(addrs.count(a), a) for a in set(addrs)]
return sorted(addrs, reverse=True)[0][1]
addrs = [a for a in set(addrs) if addrs.count(a) == expected_utxos]
assert(len(addrs) > 0)
return addrs[0]
| 37.601019 | 142 | 0.650291 |
795b8085b923811263343e965e46d664e9b5bc4f | 1,460 | py | Python | icubam/messaging/server.py | Inria-Chile/icubam | 96df2c54976d1b7bc25732c8a5f67fafa0adffab | [
"Apache-2.0"
] | null | null | null | icubam/messaging/server.py | Inria-Chile/icubam | 96df2c54976d1b7bc25732c8a5f67fafa0adffab | [
"Apache-2.0"
] | null | null | null | icubam/messaging/server.py | Inria-Chile/icubam | 96df2c54976d1b7bc25732c8a5f67fafa0adffab | [
"Apache-2.0"
] | null | null | null | from absl import logging # noqa: F401
from tornado import queues
import tornado.routing
import tornado.web
from icubam import base_server
from icubam.messaging import sms_sender
from icubam.messaging import scheduler
from icubam.messaging.handlers import onoff, schedule
class MessageServer(base_server.BaseServer):
"""Sends and schedule SMS."""
def __init__(self, config, port=8889):
super().__init__(config, port)
self.port = port if port is not None else self.config.messaging.port
self.sender = sms_sender.get(self.config)
self.queue = queues.Queue()
self.scheduler = scheduler.MessageScheduler(
config=self.config, db=self.db_factory.create(), queue=self.queue)
self.callbacks = [self.process]
def make_app(self):
kwargs = dict(db_factory=self.db_factory, scheduler=self.scheduler)
self.add_handler(onoff.OnOffHandler, **kwargs)
self.add_handler(schedule.ScheduleHandler, **kwargs)
# Only accepts request from same host
return tornado.web.Application([
(tornado.routing.HostMatches(r'(localhost|127\.0\.0\.1)'), self.routes)
])
async def process(self):
async for msg in self.queue:
try:
self.sender.send(msg.phone, msg.text)
except Exception as e:
logging.error(f'Could not send message in message loop {e}.')
finally:
self.queue.task_done()
def run(self, delay=None):
self.scheduler.schedule_all(delay)
super().run()
| 31.73913 | 79 | 0.713014 |
795b82199727337979ceef30de24f1e28d59c72a | 12,222 | py | Python | dags/ethereumetl_airflow/build_parse_dag.py | Falcon77/ethereum-etl-airflow | 2fe30ceb14e76bfc51b35f25af50027b210db75d | [
"MIT"
] | null | null | null | dags/ethereumetl_airflow/build_parse_dag.py | Falcon77/ethereum-etl-airflow | 2fe30ceb14e76bfc51b35f25af50027b210db75d | [
"MIT"
] | null | null | null | dags/ethereumetl_airflow/build_parse_dag.py | Falcon77/ethereum-etl-airflow | 2fe30ceb14e76bfc51b35f25af50027b210db75d | [
"MIT"
] | null | null | null | from __future__ import print_function
import json
import logging
import os
import time
from datetime import datetime, timedelta
from glob import glob
from airflow import models
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.sensors import ExternalTaskSensor
from airflow.operators.email_operator import EmailOperator
from google.api_core.exceptions import Conflict
from google.cloud import bigquery
from eth_utils import event_abi_to_log_topic, function_abi_to_4byte_selector
from google.cloud.bigquery import TimePartitioning
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
dags_folder = os.environ.get('DAGS_FOLDER', '/home/airflow/gcs/dags')
def build_parse_dag(
dag_id,
dataset_folder,
parse_destination_dataset_project_id,
notification_emails=None,
parse_start_date=datetime(2018, 7, 1),
schedule_interval='0 0 * * *',
enabled=True,
parse_all_partitions=True
):
if not enabled:
logging.info('enabled is False, the DAG will not be built.')
return None
logging.info('parse_all_partitions is {}'.format(parse_all_partitions))
SOURCE_PROJECT_ID = 'bigquery-public-data'
SOURCE_DATASET_NAME = 'crypto_ethereum'
environment = {
'source_project_id': SOURCE_PROJECT_ID,
'source_dataset_name': SOURCE_DATASET_NAME
}
default_dag_args = {
'depends_on_past': False,
'start_date': parse_start_date,
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': timedelta(minutes=5)
}
if notification_emails and len(notification_emails) > 0:
default_dag_args['email'] = [email.strip() for email in notification_emails.split(',')]
dag = models.DAG(
dag_id,
catchup=False,
schedule_interval=schedule_interval,
default_args=default_dag_args)
def create_task_and_add_to_dag(task_config):
dataset_name = 'ethereum_' + task_config['table']['dataset_name']
table_name = task_config['table']['table_name']
table_description = task_config['table']['table_description']
schema = task_config['table']['schema']
parser = task_config['parser']
parser_type = parser.get('type', 'log')
abi = json.dumps(parser['abi'])
columns = [c.get('name') for c in schema]
def parse_task(ds, **kwargs):
template_context = kwargs.copy()
template_context['ds'] = ds
template_context['params'] = environment
template_context['params']['table_name'] = table_name
template_context['params']['columns'] = columns
template_context['params']['parser'] = parser
template_context['params']['abi'] = abi
if parser_type == 'log':
template_context['params']['event_topic'] = abi_to_event_topic(parser['abi'])
elif parser_type == 'trace':
template_context['params']['method_selector'] = abi_to_method_selector(parser['abi'])
template_context['params']['struct_fields'] = create_struct_string_from_schema(schema)
template_context['params']['parse_all_partitions'] = parse_all_partitions
client = bigquery.Client()
# # # Create a temporary table
dataset_name_temp = 'parse_temp'
create_dataset(client, dataset_name_temp)
temp_table_name = 'temp_{table_name}_{milliseconds}'\
.format(table_name=table_name, milliseconds=int(round(time.time() * 1000)))
temp_table_ref = client.dataset(dataset_name_temp).table(temp_table_name)
temp_table = bigquery.Table(temp_table_ref, schema=read_bigquery_schema_from_dict(schema, parser_type))
temp_table.description = table_description
temp_table.time_partitioning = TimePartitioning(field='block_timestamp')
logging.info('Creating table: ' + json.dumps(temp_table.to_api_repr()))
temp_table = client.create_table(temp_table)
assert temp_table.table_id == temp_table_name
# # # Query to temporary table
job_config = bigquery.QueryJobConfig()
job_config.priority = bigquery.QueryPriority.INTERACTIVE
job_config.destination = temp_table_ref
sql_template = get_parse_sql_template(parser_type)
sql = kwargs['task'].render_template('', sql_template, template_context)
logging.info(sql)
query_job = client.query(sql, location='US', job_config=job_config)
submit_bigquery_job(query_job, job_config)
assert query_job.state == 'DONE'
# # # Copy / merge to destination
if parse_all_partitions:
# Copy temporary table to destination
copy_job_config = bigquery.CopyJobConfig()
copy_job_config.write_disposition = 'WRITE_TRUNCATE'
dest_table_ref = client.dataset(dataset_name, project=parse_destination_dataset_project_id).table(table_name)
copy_job = client.copy_table(temp_table_ref, dest_table_ref, location='US', job_config=copy_job_config)
submit_bigquery_job(copy_job, copy_job_config)
assert copy_job.state == 'DONE'
# Need to do update description as copy above won't repect the description in case destination table
# already exists
table = client.get_table(dest_table_ref)
table.description = table_description
table = client.update_table(table, ["description"])
assert table.description == table_description
else:
# Merge
# https://cloud.google.com/bigquery/docs/reference/standard-sql/dml-syntax#merge_statement
merge_job_config = bigquery.QueryJobConfig()
# Finishes faster, query limit for concurrent interactive queries is 50
merge_job_config.priority = bigquery.QueryPriority.INTERACTIVE
merge_sql_template = get_merge_table_sql_template()
merge_template_context = template_context.copy()
merge_template_context['params']['source_table'] = temp_table_name
merge_template_context['params']['destination_dataset_project_id'] = parse_destination_dataset_project_id
merge_template_context['params']['destination_dataset_name'] = dataset_name
merge_template_context['params']['dataset_name_temp'] = dataset_name_temp
merge_template_context['params']['columns'] = columns
merge_sql = kwargs['task'].render_template('', merge_sql_template, merge_template_context)
print('Merge sql:')
print(merge_sql)
merge_job = client.query(merge_sql, location='US', job_config=merge_job_config)
submit_bigquery_job(merge_job, merge_job_config)
assert merge_job.state == 'DONE'
# Delete temp table
client.delete_table(temp_table_ref)
parsing_operator = PythonOperator(
task_id=table_name,
python_callable=parse_task,
provide_context=True,
execution_timeout=timedelta(minutes=60),
dag=dag
)
return parsing_operator
wait_for_ethereum_load_dag_task = ExternalTaskSensor(
task_id='wait_for_ethereum_load_dag',
external_dag_id='ethereum_load_dag',
external_task_id='verify_logs_have_latest',
dag=dag)
files = get_list_of_json_files(dataset_folder)
logging.info('files')
logging.info(files)
all_parse_tasks = []
for f in files:
task_config = read_json_file(f)
task = create_task_and_add_to_dag(task_config)
wait_for_ethereum_load_dag_task >> task
all_parse_tasks.append(task)
if notification_emails and len(notification_emails) > 0:
send_email_task = EmailOperator(
task_id='send_email',
to=[email.strip() for email in notification_emails.split(',')],
subject='Ethereum ETL Airflow Parse DAG Succeeded',
html_content='Ethereum ETL Airflow Parse DAG Succeeded',
dag=dag
)
for task in all_parse_tasks:
task >> send_email_task
return dag
def abi_to_event_topic(abi):
return '0x' + event_abi_to_log_topic(abi).hex()
def abi_to_method_selector(abi):
return '0x' + function_abi_to_4byte_selector(abi).hex()
def get_list_of_json_files(dataset_folder):
logging.info('get_list_of_json_files')
logging.info(dataset_folder)
logging.info(os.path.join(dataset_folder, '*.json'))
return [f for f in glob(os.path.join(dataset_folder, '*.json'))]
def get_parse_sql_template(parser_type):
return get_parse_logs_sql_template() if parser_type == 'log' else get_parse_traces_sql_template()
def get_parse_logs_sql_template():
filepath = os.path.join(dags_folder, 'resources/stages/parse/sqls/parse_logs.sql')
with open(filepath) as file_handle:
content = file_handle.read()
return content
def get_parse_traces_sql_template():
filepath = os.path.join(dags_folder, 'resources/stages/parse/sqls/parse_traces.sql')
with open(filepath) as file_handle:
content = file_handle.read()
return content
def get_merge_table_sql_template():
filepath = os.path.join(dags_folder, 'resources/stages/parse/sqls/merge_table.sql')
with open(filepath) as file_handle:
content = file_handle.read()
return content
def read_json_file(filepath):
with open(filepath) as file_handle:
content = file_handle.read()
return json.loads(content)
def create_struct_string_from_schema(schema):
return ', '.join(['`' + f.get('name') + '` ' + f.get('type') for f in schema])
def read_bigquery_schema_from_dict(schema, parser_type):
result = [
bigquery.SchemaField(
name='block_timestamp',
field_type='TIMESTAMP',
mode='REQUIRED',
description='Timestamp of the block where this event was emitted'),
bigquery.SchemaField(
name='block_number',
field_type='INTEGER',
mode='REQUIRED',
description='The block number where this event was emitted'),
bigquery.SchemaField(
name='transaction_hash',
field_type='STRING',
mode='REQUIRED',
description='Hash of the transactions in which this event was emitted')
]
if parser_type == 'log':
result.append(bigquery.SchemaField(
name='log_index',
field_type='INTEGER',
mode='REQUIRED',
description='Integer of the log index position in the block of this event'))
elif parser_type == 'trace':
result.append(bigquery.SchemaField(
name='trace_address',
field_type='STRING',
description='Comma separated list of trace address in call tree'))
for field in schema:
result.append(bigquery.SchemaField(
name=field.get('name'),
field_type=field.get('type', 'STRING'),
mode=field.get('mode', 'NULLABLE'),
description=field.get('description')))
return result
def submit_bigquery_job(job, configuration):
try:
logging.info('Creating a job: ' + json.dumps(configuration.to_api_repr()))
result = job.result()
logging.info(result)
assert job.errors is None or len(job.errors) == 0
return result
except Exception:
logging.info(job.errors)
raise
def create_dataset(client, dataset_name):
dataset = client.dataset(dataset_name)
try:
logging.info('Creating new dataset ...')
dataset = client.create_dataset(dataset)
logging.info('New dataset created: ' + dataset_name)
except Conflict as error:
logging.info('Dataset already exists')
return dataset
| 39.425806 | 125 | 0.660367 |
795b823f275b320e0d756250769abfd82cc394a7 | 22,449 | py | Python | python/jdspider/JDSpider/spiders/JDSpider.py | mrlittlenew/springboot-webmagic | fd4ef5a7a7dad931a551e57552f6ae2b9dbb5f74 | [
"Apache-2.0"
] | null | null | null | python/jdspider/JDSpider/spiders/JDSpider.py | mrlittlenew/springboot-webmagic | fd4ef5a7a7dad931a551e57552f6ae2b9dbb5f74 | [
"Apache-2.0"
] | null | null | null | python/jdspider/JDSpider/spiders/JDSpider.py | mrlittlenew/springboot-webmagic | fd4ef5a7a7dad931a551e57552f6ae2b9dbb5f74 | [
"Apache-2.0"
] | 1 | 2019-08-24T10:17:53.000Z | 2019-08-24T10:17:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Kandy.Ye'
__mtime__ = '2017/4/12'
"""
import re
import logging
import json
import requests
from scrapy import Spider
from scrapy.selector import Selector
from scrapy.http import Request
from JDSpider.items import *
key_word = ['book', 'e', 'channel', 'mvd', 'list']
Base_url = 'https://list.jd.com'
price_url = 'https://p.3.cn/prices/mgets?skuIds=J_'
comment_url = 'https://club.jd.com/comment/productPageComments.action?productId=%s&score=0&sortType=5&page=%s&pageSize=10'
favourable_url = 'https://cd.jd.com/promotion/v2?skuId=%s&area=1_72_2799_0&shopId=%s&venderId=%s&cat=%s'
class JDSpider(Spider):
name = "JDSpider"
allowed_domains = ["jd.com"]
start_urls = [
'https://www.jd.com/allSort.aspx'
]
logging.getLogger("requests").setLevel(logging.WARNING) # 将requests的日志级别设成WARNING
def start_requests(self):
for url in self.start_urls:
yield Request(url=url, callback=self.parse_category)
def parse_category(self, response):
"""获取分类页"""
selector = Selector(response)
try:
texts = selector.xpath('//div[@class="category-item m"]/div[@class="mc"]/div[@class="items"]/dl/dd/a').extract()
for text in texts:
items = re.findall(r'<a href="(.*?)" target="_blank">(.*?)</a>', text)
for item in items:
if item[0].split('.')[0][2:] in key_word:
if item[0].split('.')[0][2:] != 'list':
yield Request(url='https:' + item[0], callback=self.parse_category)
else:
categoriesItem = CategoriesItem()
categoriesItem['name'] = item[1]
categoriesItem['url'] = 'https:' + item[0]
categoriesItem['_id'] = item[0].split('=')[1].split('&')[0]
yield categoriesItem
yield Request(url='https:' + item[0], callback=self.parse_list)
except Exception as e:
print('error:', e)
# 测试
# yield Request(url='https://list.jd.com/list.html?cat=1315,1343,9720', callback=self.parse_list)
def parse_list(self, response):
"""分别获得商品的地址和下一页地址"""
meta = dict()
meta['category'] = response.url.split('=')[1].split('&')[0]
selector = Selector(response)
texts = selector.xpath('//*[@id="plist"]/ul/li/div/div[@class="p-img"]/a').extract()
for text in texts:
items = re.findall(r'<a target="_blank" href="(.*?)">', text)
yield Request(url='https:' + items[0], callback=self.parse_product, meta=meta)
# 测试
# print('2')
# yield Request(url='https://item.jd.hk/3460655.html', callback=self.parse_product, meta=meta)
# next page
next_list = response.xpath('//a[@class="pn-next"]/@href').extract()
if next_list:
# print('next page:', Base_url + next_list[0])
yield Request(url=Base_url + next_list[0], callback=self.parse_list)
def parse_product(self, response):
"""商品页获取title,price,product_id"""
category = response.meta['category']
ids = re.findall(r"venderId:(.*?),\s.*?shopId:'(.*?)'", response.text)
if not ids:
ids = re.findall(r"venderId:(.*?),\s.*?shopId:(.*?),", response.text)
vender_id = ids[0][0]
shop_id = ids[0][1]
# shop
shopItem = ShopItem()
shopItem['shopId'] = shop_id
shopItem['venderId'] = vender_id
shopItem['url1'] = 'http://mall.jd.com/index-%s.html' % (shop_id)
try:
shopItem['url2'] = 'https:' + response.xpath('//ul[@class="parameter2 p-parameter-list"]/li/a/@href').extract()[0]
except:
shopItem['url2'] = shopItem['url1']
name = ''
if shop_id == '0':
name = '京东自营'
else:
try:
name = response.xpath('//ul[@class="parameter2 p-parameter-list"]/li/a//text()').extract()[0]
except:
try:
name = response.xpath('//div[@class="name"]/a//text()').extract()[0].strip()
except:
try:
name = response.xpath('//div[@class="shopName"]/strong/span/a//text()').extract()[0].strip()
except:
try:
name = response.xpath('//div[@class="seller-infor"]/a//text()').extract()[0].strip()
except:
name = u'京东自营'
shopItem['name'] = name
shopItem['_id'] = name
yield shopItem
productsItem = ProductsItem()
productsItem['shopId'] = shop_id
productsItem['category'] = category
try:
title = response.xpath('//div[@class="sku-name"]/text()').extract()[0].replace(u"\xa0", "").strip()
except Exception as e:
title = response.xpath('//div[@id="name"]/h1/text()').extract()[0]
productsItem['name'] = title
product_id = response.url.split('/')[-1][:-5]
productsItem['_id'] = product_id
productsItem['url'] = response.url
# description
desc = response.xpath('//ul[@class="parameter2 p-parameter-list"]//text()').extract()
productsItem['description'] = ';'.join(i.strip() for i in desc)
# price
response = requests.get(url=price_url + product_id)
price_json = response.json()
productsItem['reallyPrice'] = price_json[0]['p']
productsItem['originalPrice'] = price_json[0]['m']
# 优惠
res_url = favourable_url % (product_id, shop_id, vender_id, category.replace(',', '%2c'))
# print(res_url)
response = requests.get(res_url)
fav_data = response.json()
if fav_data['skuCoupon']:
desc1 = []
for item in fav_data['skuCoupon']:
start_time = item['beginTime']
end_time = item['endTime']
time_dec = item['timeDesc']
fav_price = item['quota']
fav_count = item['discount']
fav_time = item['addDays']
desc1.append(u'有效期%s至%s,满%s减%s' % (start_time, end_time, fav_price, fav_count))
productsItem['favourableDesc1'] = ';'.join(desc1)
if fav_data['prom'] and fav_data['prom']['pickOneTag']:
desc2 = []
for item in fav_data['prom']['pickOneTag']:
desc2.append(item['content'])
productsItem['favourableDesc1'] = ';'.join(desc2)
data = dict()
data['product_id'] = product_id
yield productsItem
yield Request(url=comment_url % (product_id, '0'), callback=self.parse_comments, meta=data)
def parse_comments(self, response):
"""获取商品comment"""
try:
data = json.loads(response.text)
except Exception as e:
print('get comment failed:', e)
return None
product_id = response.meta['product_id']
commentSummaryItem = CommentSummaryItem()
commentSummary = data.get('productCommentSummary')
commentSummaryItem['goodRateShow'] = commentSummary.get('goodRateShow')
commentSummaryItem['poorRateShow'] = commentSummary.get('poorRateShow')
commentSummaryItem['poorCountStr'] = commentSummary.get('poorCountStr')
commentSummaryItem['averageScore'] = commentSummary.get('averageScore')
commentSummaryItem['generalCountStr'] = commentSummary.get('generalCountStr')
commentSummaryItem['showCount'] = commentSummary.get('showCount')
commentSummaryItem['showCountStr'] = commentSummary.get('showCountStr')
commentSummaryItem['goodCount'] = commentSummary.get('goodCount')
commentSummaryItem['generalRate'] = commentSummary.get('generalRate')
commentSummaryItem['generalCount'] = commentSummary.get('generalCount')
commentSummaryItem['skuId'] = commentSummary.get('skuId')
commentSummaryItem['goodCountStr'] = commentSummary.get('goodCountStr')
commentSummaryItem['poorRate'] = commentSummary.get('poorRate')
commentSummaryItem['afterCount'] = commentSummary.get('afterCount')
commentSummaryItem['goodRateStyle'] = commentSummary.get('goodRateStyle')
commentSummaryItem['poorCount'] = commentSummary.get('poorCount')
commentSummaryItem['skuIds'] = commentSummary.get('skuIds')
commentSummaryItem['poorRateStyle'] = commentSummary.get('poorRateStyle')
commentSummaryItem['generalRateStyle'] = commentSummary.get('generalRateStyle')
commentSummaryItem['commentCountStr'] = commentSummary.get('commentCountStr')
commentSummaryItem['commentCount'] = commentSummary.get('commentCount')
commentSummaryItem['productId'] = commentSummary.get('productId') # 同ProductsItem的id相同
commentSummaryItem['_id'] = commentSummary.get('productId')
commentSummaryItem['afterCountStr'] = commentSummary.get('afterCountStr')
commentSummaryItem['goodRate'] = commentSummary.get('goodRate')
commentSummaryItem['generalRateShow'] = commentSummary.get('generalRateShow')
commentSummaryItem['jwotestProduct'] = data.get('jwotestProduct')
commentSummaryItem['maxPage'] = data.get('maxPage')
commentSummaryItem['score'] = data.get('score')
commentSummaryItem['soType'] = data.get('soType')
commentSummaryItem['imageListCount'] = data.get('imageListCount')
yield commentSummaryItem
for hotComment in data['hotCommentTagStatistics']:
hotCommentTagItem = HotCommentTagItem()
hotCommentTagItem['_id'] = hotComment.get('id')
hotCommentTagItem['name'] = hotComment.get('name')
hotCommentTagItem['status'] = hotComment.get('status')
hotCommentTagItem['rid'] = hotComment.get('rid')
hotCommentTagItem['productId'] = hotComment.get('productId')
hotCommentTagItem['count'] = hotComment.get('count')
hotCommentTagItem['created'] = hotComment.get('created')
hotCommentTagItem['modified'] = hotComment.get('modified')
hotCommentTagItem['type'] = hotComment.get('type')
hotCommentTagItem['canBeFiltered'] = hotComment.get('canBeFiltered')
yield hotCommentTagItem
for comment_item in data['comments']:
comment = CommentItem()
comment['_id'] = comment_item.get('id')
comment['productId'] = product_id
comment['guid'] = comment_item.get('guid')
comment['content'] = comment_item.get('content')
comment['creationTime'] = comment_item.get('creationTime')
comment['isTop'] = comment_item.get('isTop')
comment['referenceId'] = comment_item.get('referenceId')
comment['referenceName'] = comment_item.get('referenceName')
comment['referenceType'] = comment_item.get('referenceType')
comment['referenceTypeId'] = comment_item.get('referenceTypeId')
comment['firstCategory'] = comment_item.get('firstCategory')
comment['secondCategory'] = comment_item.get('secondCategory')
comment['thirdCategory'] = comment_item.get('thirdCategory')
comment['replyCount'] = comment_item.get('replyCount')
comment['score'] = comment_item.get('score')
comment['status'] = comment_item.get('status')
comment['title'] = comment_item.get('title')
comment['usefulVoteCount'] = comment_item.get('usefulVoteCount')
comment['uselessVoteCount'] = comment_item.get('uselessVoteCount')
comment['userImage'] = 'http://' + comment_item.get('userImage')
comment['userImageUrl'] = 'http://' + comment_item.get('userImageUrl')
comment['userLevelId'] = comment_item.get('userLevelId')
comment['userProvince'] = comment_item.get('userProvince')
comment['viewCount'] = comment_item.get('viewCount')
comment['orderId'] = comment_item.get('orderId')
comment['isReplyGrade'] = comment_item.get('isReplyGrade')
comment['nickname'] = comment_item.get('nickname')
comment['userClient'] = comment_item.get('userClient')
comment['mergeOrderStatus'] = comment_item.get('mergeOrderStatus')
comment['discussionId'] = comment_item.get('discussionId')
comment['productColor'] = comment_item.get('productColor')
comment['productSize'] = comment_item.get('productSize')
comment['imageCount'] = comment_item.get('imageCount')
comment['integral'] = comment_item.get('integral')
comment['userImgFlag'] = comment_item.get('userImgFlag')
comment['anonymousFlag'] = comment_item.get('anonymousFlag')
comment['userLevelName'] = comment_item.get('userLevelName')
comment['plusAvailable'] = comment_item.get('plusAvailable')
comment['recommend'] = comment_item.get('recommend')
comment['userLevelColor'] = comment_item.get('userLevelColor')
comment['userClientShow'] = comment_item.get('userClientShow')
comment['isMobile'] = comment_item.get('isMobile')
comment['days'] = comment_item.get('days')
comment['afterDays'] = comment_item.get('afterDays')
yield comment
if 'images' in comment_item:
for image in comment_item['images']:
commentImageItem = CommentImageItem()
commentImageItem['_id'] = image.get('id')
commentImageItem['associateId'] = image.get('associateId') # 和CommentItem的discussionId相同
commentImageItem['productId'] = image.get('productId') # 不是ProductsItem的id,这个值为0
commentImageItem['imgUrl'] = 'http:' + image.get('imgUrl')
commentImageItem['available'] = image.get('available')
commentImageItem['pin'] = image.get('pin')
commentImageItem['dealt'] = image.get('dealt')
commentImageItem['imgTitle'] = image.get('imgTitle')
commentImageItem['isMain'] = image.get('isMain')
yield commentImageItem
# next page
max_page = int(data.get('maxPage', '1'))
if max_page > 60:
max_page = 60
for i in range(1, max_page):
url = comment_url % (product_id, str(i))
meta = dict()
meta['product_id'] = product_id
yield Request(url=url, callback=self.parse_comments2, meta=meta)
def parse_comments2(self, response):
"""获取商品comment"""
try:
data = json.loads(response.text)
except Exception as e:
print('get comment failed:', e)
return None
product_id = response.meta['product_id']
commentSummaryItem = CommentSummaryItem()
commentSummary = data.get('productCommentSummary')
commentSummaryItem['goodRateShow'] = commentSummary.get('goodRateShow')
commentSummaryItem['poorRateShow'] = commentSummary.get('poorRateShow')
commentSummaryItem['poorCountStr'] = commentSummary.get('poorCountStr')
commentSummaryItem['averageScore'] = commentSummary.get('averageScore')
commentSummaryItem['generalCountStr'] = commentSummary.get('generalCountStr')
commentSummaryItem['showCount'] = commentSummary.get('showCount')
commentSummaryItem['showCountStr'] = commentSummary.get('showCountStr')
commentSummaryItem['goodCount'] = commentSummary.get('goodCount')
commentSummaryItem['generalRate'] = commentSummary.get('generalRate')
commentSummaryItem['generalCount'] = commentSummary.get('generalCount')
commentSummaryItem['skuId'] = commentSummary.get('skuId')
commentSummaryItem['goodCountStr'] = commentSummary.get('goodCountStr')
commentSummaryItem['poorRate'] = commentSummary.get('poorRate')
commentSummaryItem['afterCount'] = commentSummary.get('afterCount')
commentSummaryItem['goodRateStyle'] = commentSummary.get('goodRateStyle')
commentSummaryItem['poorCount'] = commentSummary.get('poorCount')
commentSummaryItem['skuIds'] = commentSummary.get('skuIds')
commentSummaryItem['poorRateStyle'] = commentSummary.get('poorRateStyle')
commentSummaryItem['generalRateStyle'] = commentSummary.get('generalRateStyle')
commentSummaryItem['commentCountStr'] = commentSummary.get('commentCountStr')
commentSummaryItem['commentCount'] = commentSummary.get('commentCount')
commentSummaryItem['productId'] = commentSummary.get('productId') # 同ProductsItem的id相同
commentSummaryItem['_id'] = commentSummary.get('productId')
commentSummaryItem['afterCountStr'] = commentSummary.get('afterCountStr')
commentSummaryItem['goodRate'] = commentSummary.get('goodRate')
commentSummaryItem['generalRateShow'] = commentSummary.get('generalRateShow')
commentSummaryItem['jwotestProduct'] = data.get('jwotestProduct')
commentSummaryItem['maxPage'] = data.get('maxPage')
commentSummaryItem['score'] = data.get('score')
commentSummaryItem['soType'] = data.get('soType')
commentSummaryItem['imageListCount'] = data.get('imageListCount')
yield commentSummaryItem
for hotComment in data['hotCommentTagStatistics']:
hotCommentTagItem = HotCommentTagItem()
hotCommentTagItem['_id'] = hotComment.get('id')
hotCommentTagItem['name'] = hotComment.get('name')
hotCommentTagItem['status'] = hotComment.get('status')
hotCommentTagItem['rid'] = hotComment.get('rid')
hotCommentTagItem['productId'] = hotComment.get('productId')
hotCommentTagItem['count'] = hotComment.get('count')
hotCommentTagItem['created'] = hotComment.get('created')
hotCommentTagItem['modified'] = hotComment.get('modified')
hotCommentTagItem['type'] = hotComment.get('type')
hotCommentTagItem['canBeFiltered'] = hotComment.get('canBeFiltered')
yield hotCommentTagItem
for comment_item in data['comments']:
comment = CommentItem()
comment['_id'] = comment_item.get('id')
comment['productId'] = product_id
comment['guid'] = comment_item.get('guid')
comment['content'] = comment_item.get('content')
comment['creationTime'] = comment_item.get('creationTime')
comment['isTop'] = comment_item.get('isTop')
comment['referenceId'] = comment_item.get('referenceId')
comment['referenceName'] = comment_item.get('referenceName')
comment['referenceType'] = comment_item.get('referenceType')
comment['referenceTypeId'] = comment_item.get('referenceTypeId')
comment['firstCategory'] = comment_item.get('firstCategory')
comment['secondCategory'] = comment_item.get('secondCategory')
comment['thirdCategory'] = comment_item.get('thirdCategory')
comment['replyCount'] = comment_item.get('replyCount')
comment['score'] = comment_item.get('score')
comment['status'] = comment_item.get('status')
comment['title'] = comment_item.get('title')
comment['usefulVoteCount'] = comment_item.get('usefulVoteCount')
comment['uselessVoteCount'] = comment_item.get('uselessVoteCount')
comment['userImage'] = 'http://' + comment_item.get('userImage')
comment['userImageUrl'] = 'http://' + comment_item.get('userImageUrl')
comment['userLevelId'] = comment_item.get('userLevelId')
comment['userProvince'] = comment_item.get('userProvince')
comment['viewCount'] = comment_item.get('viewCount')
comment['orderId'] = comment_item.get('orderId')
comment['isReplyGrade'] = comment_item.get('isReplyGrade')
comment['nickname'] = comment_item.get('nickname')
comment['userClient'] = comment_item.get('userClient')
comment['mergeOrderStatus'] = comment_item.get('mergeOrderStatus')
comment['discussionId'] = comment_item.get('discussionId')
comment['productColor'] = comment_item.get('productColor')
comment['productSize'] = comment_item.get('productSize')
comment['imageCount'] = comment_item.get('imageCount')
comment['integral'] = comment_item.get('integral')
comment['userImgFlag'] = comment_item.get('userImgFlag')
comment['anonymousFlag'] = comment_item.get('anonymousFlag')
comment['userLevelName'] = comment_item.get('userLevelName')
comment['plusAvailable'] = comment_item.get('plusAvailable')
comment['recommend'] = comment_item.get('recommend')
comment['userLevelColor'] = comment_item.get('userLevelColor')
comment['userClientShow'] = comment_item.get('userClientShow')
comment['isMobile'] = comment_item.get('isMobile')
comment['days'] = comment_item.get('days')
comment['afterDays'] = comment_item.get('afterDays')
yield comment
if 'images' in comment_item:
for image in comment_item['images']:
commentImageItem = CommentImageItem()
commentImageItem['_id'] = image.get('id')
commentImageItem['associateId'] = image.get('associateId') # 和CommentItem的discussionId相同
commentImageItem['productId'] = image.get('productId') # 不是ProductsItem的id,这个值为0
commentImageItem['imgUrl'] = 'http:' + image.get('imgUrl')
commentImageItem['available'] = image.get('available')
commentImageItem['pin'] = image.get('pin')
commentImageItem['dealt'] = image.get('dealt')
commentImageItem['imgTitle'] = image.get('imgTitle')
commentImageItem['isMain'] = image.get('isMain')
yield commentImageItem
| 52.697183 | 126 | 0.612366 |
795b830c6839a5075003cf65bdd61c8840332721 | 520 | py | Python | Chatbot_Model/Text_Similarity/test.py | guci314/Chatbot_CN | 02044eed4a141aa8c61d6064c166f95dbdae894c | [
"Apache-2.0"
] | 8 | 2019-08-12T12:38:35.000Z | 2022-02-21T03:25:04.000Z | Chatbot_Model/Text_Similarity/test.py | JianboTang/Chatbot_CN | a0f7194252a189f8bc2b62fd16eb2abe432c0bf9 | [
"Apache-2.0"
] | null | null | null | Chatbot_Model/Text_Similarity/test.py | JianboTang/Chatbot_CN | a0f7194252a189f8bc2b62fd16eb2abe432c0bf9 | [
"Apache-2.0"
] | 5 | 2020-01-08T15:30:38.000Z | 2022-02-15T02:39:46.000Z | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test.py
Description :
Author : charl
date: 2018/11/27
-------------------------------------------------
Change Activity: 2018/11/27:
-------------------------------------------------
"""
import re
line = "想做/ 兼_职/学生_/ 的 、加,我Q: 1 5. 8 0. !!?? 8 6 。0. 2。 3 有,惊,喜,哦"
# line = line.decode("utf8")
string = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。:??、~@#¥%……&*()]+", "", line)
print(string)
| 26 | 78 | 0.313462 |
795b834e229f484b2777e3dde64e6efd9b1ae8d7 | 1,166 | py | Python | AlphaDDA1/Othello/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | 11 | 2021-11-13T01:43:28.000Z | 2021-12-19T06:40:34.000Z | AlphaZero/Othello66/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | null | null | null | AlphaZero/Othello66/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | null | null | null | #---------------------------------------
#Since : 2019/04/24
#Update: 2019/07/25
# -*- coding: utf-8 -*-
#---------------------------------------
import numpy as np
class RingBuffer:
def __init__(self, buf_size):
self.size = buf_size
self.buf = []
for i in range(self.size):
self.buf.append([])
self.start = 0
self.end = 0
def add(self, el):
self.buf[self.end] = el
self.end = (self.end + 1) % self.size
if self.end == self.start:
self.start = (self.start + 1) % self.size
def Get_buffer(self):
array = []
for i in range(self.size):
buf_num = (self.end - i) % self.size
array.append(self.buf[buf_num])
return array
def Get_buffer_start_end(self):
array = []
for i in range(self.size):
buf_num = (self.start + i) % self.size
if self.buf[buf_num] == []:
return array
array.append(self.buf[buf_num])
return array
def get(self):
val = self.buf[self.start]
self.start =(self.start + 1) % self.size
return val
| 26.5 | 53 | 0.482847 |
795b83c59c07689c2b839af48e13fdce0b8a212a | 1,514 | py | Python | venv/lib/python3.6/site-packages/click/globals.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/click/globals.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | 1 | 2021-06-01T23:32:38.000Z | 2021-06-01T23:32:38.000Z | venv/lib/python3.6/site-packages/click/globals.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | from threading import local
_local = local()
def get_current_context(silent=False):
"""Returns the current click context. This can be used as a way to
access the current context object from anywhere. This is a more implicit
alternative to the :func:`pass_context` decorator. This function is
primarily useful for helpers such as :func:`echo` which might be
interested in changing its behavior based on the current context.
To push the current context, :meth:`Context.scope` can be used.
.. versionadded:: 5.0
:param silent: is set to `True` the return value is `None` if no context
is available. The default behavior is to raise a
:exc:`RuntimeError`.
"""
try:
return getattr(_local, "stack")[-1]
except (AttributeError, IndexError):
if not silent:
raise RuntimeError("There is no active click context.")
def push_context(ctx):
"""Pushes a new context to the current stack."""
_local.__dict__.setdefault("stack", []).append(ctx)
def pop_context():
"""Removes the top level from the stack."""
_local.stack.pop()
def resolve_color_default(color=None):
""""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
| 30.897959 | 77 | 0.669749 |
795b83ef1f6e7c7f8c9def970422a5d9e17362db | 504 | py | Python | nevergrad/instrumentation/__init__.py | vishalshar/nevergrad | 07b5b332786ce5ff831dfabee892bb9397838f70 | [
"MIT"
] | 1 | 2021-07-22T16:18:01.000Z | 2021-07-22T16:18:01.000Z | nevergrad/instrumentation/__init__.py | akhti/nevergrad | 98a4ca92dff704f9df0bc58554bd51e5fa477362 | [
"MIT"
] | null | null | null | nevergrad/instrumentation/__init__.py | akhti/nevergrad | 98a4ca92dff704f9df0bc58554bd51e5fa477362 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .instantiate import register_file_type, FolderFunction
from . import variables
from . import variables as var
from .core import Instrumentation, InstrumentedFunction
from .utils import TemporaryDirectoryCopy, CommandFunction
__all__ = ["Instrumentation", "var", "CommandFunction", "FolderFunction"]
| 38.769231 | 73 | 0.797619 |
795b84e95c3276ba53c01d072c06e293591cc54c | 783 | py | Python | rest_framework/__init__.py | sanoma/django-rest-framework | 45e90c3398e326b10e3c5522a2e9ea765786ca08 | [
"BSD-2-Clause"
] | 1 | 2020-03-18T22:31:29.000Z | 2020-03-18T22:31:29.000Z | rest_framework/__init__.py | sanoma/django-rest-framework | 45e90c3398e326b10e3c5522a2e9ea765786ca08 | [
"BSD-2-Clause"
] | 3 | 2020-03-23T18:01:51.000Z | 2021-03-19T23:15:15.000Z | pyenv/lib/python3.6/site-packages/rest_framework/__init__.py | ronald-rgr/ai-chatbot-smartguide | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | [
"Apache-2.0"
] | 1 | 2019-06-26T15:51:46.000Z | 2019-06-26T15:51:46.000Z | """
______ _____ _____ _____ __
| ___ \ ___/ ___|_ _| / _| | |
| |_/ / |__ \ `--. | | | |_ _ __ __ _ _ __ ___ _____ _____ _ __| |__
| /| __| `--. \ | | | _| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
| |\ \| |___/\__/ / | | | | | | | (_| | | | | | | __/\ V V / (_) | | | <
\_| \_\____/\____/ \_/ |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_|
"""
__title__ = 'Django REST framework'
__version__ = '3.5.3'
__author__ = 'Tom Christie'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2011-2016 Tom Christie'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
| 32.625 | 80 | 0.484036 |
795b8558cc34eb007597a86f3c85f23982339fec | 1,736 | py | Python | protocols/telnetAPI.py | Diapolo10/Grail-0.6 | 3e7b42f105546132f7a6bbe7751a82c9bcc76dd5 | [
"CNRI-Jython"
] | 8 | 2015-02-18T18:50:50.000Z | 2022-03-15T22:21:03.000Z | protocols/telnetAPI.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | null | null | null | protocols/telnetAPI.py | SimiCode/Grail-Web-Browser | 16b86d3215068d334eacf6153b71a748eed53d3d | [
"CNRI-Jython"
] | 3 | 2016-04-04T23:54:07.000Z | 2020-10-29T04:25:42.000Z | """Telnet protocol handler for URLs of the form telnet://host[:port]
For Unix only; requires xterm in your $PATH.
"""
import os, urllib
from nullAPI import null_access
class telnet_access(null_access):
def __init__(self, url, method, params):
null_access.__init__(self, url, method, params)
host, junk = urllib.splithost(url)
userpasswd, host = urllib.splituser(host)
host, port = urllib.splitport(host)
# XXX I tried doing this using os.system(), but the file
# descriptors that Grail has open seemed to be confusing
# telnet or xterm. So we need to close all file descriptors,
# and this must be done in the child process, so now that
# we're forking anyway, we might as well use os.exec*.
# XXX To do: reap child processes after they've died!
# Use os.waitpid(-1, os.WNOHANG) to do this.
# But perhaps we should only wait for pids originating in this
# module.
cmd = ["xterm", "-e", "telnet", host]
if port:
cmd.append(str(port))
pid = os.fork()
if pid:
# Parent process
return
# Child process
try:
# Close all file descriptors
# XXX How to know how many there are?
for i in range(3, 200):
try:
os.close(i)
except os.error:
pass
# XXX Assume xterm is on $PATH
os.execvp(cmd[0], cmd)
# This doesn't return when successful
except:
print "Exception in os.execvp() or os.close()"
# Don't fall back in the the parent's stack!
os._exit(127)
| 32.754717 | 70 | 0.565092 |
795b8595131718dadc74a32e7ebb9bbebbe1fb69 | 434 | py | Python | lab7/l7z1.py | pawelgalka/AGH_Operational_Research | f1962f1bf93099b4ded734533a297c92e4f28283 | [
"MIT"
] | null | null | null | lab7/l7z1.py | pawelgalka/AGH_Operational_Research | f1962f1bf93099b4ded734533a297c92e4f28283 | [
"MIT"
] | null | null | null | lab7/l7z1.py | pawelgalka/AGH_Operational_Research | f1962f1bf93099b4ded734533a297c92e4f28283 | [
"MIT"
] | null | null | null | #Zadanie 1 Paweł Gałka
from scipy.optimize import linprog
import numpy as np
np.set_printoptions(precision=3)
#f(x) = 2*x_1 + x_2 + 3*x_3 -> max więc -f(x) -> min
# A*x <= b
goal_function_coeff = [-2,-1,-3]
constraints_matrix = [[1,1,1],[-1,-1,-1],[-1,-2,-1],[0,2,1]]
constraints_values = [30, -30,-10,20]
result = linprog(goal_function_coeff, A_ub=constraints_matrix, b_ub=constraints_values)
print("Result parameters", result.x) | 28.933333 | 87 | 0.693548 |
795b8676d58c59dcb82a7c7d168c9f1151f49357 | 8,233 | py | Python | test/toolset-mock/src/MockProgram.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 1 | 2020-04-28T15:15:28.000Z | 2020-04-28T15:15:28.000Z | test/toolset-mock/src/MockProgram.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 2 | 2017-05-23T08:01:11.000Z | 2019-09-06T20:49:05.000Z | test/toolset-mock/src/MockProgram.py | MaxSac/build | 482c25f3a26171073c7e6c59f0427f2259a63fec | [
"BSL-1.0"
] | 8 | 2015-11-03T14:12:19.000Z | 2020-09-22T19:20:54.000Z | # Copyright 2017 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from __future__ import print_function
import sys
import os
import re
import fnmatch
# Represents a sequence of arguments that must appear
# in a fixed order.
class ordered:
def __init__(self, *args):
self.args = args
def match(self, command_line, pos, outputs):
for p in self.args:
res = try_match(command_line, pos, p, outputs)
if res is None:
return
pos = res
return pos
# Represents a sequence of arguments that can appear
# in any order.
class unordered:
def __init__(self, *args):
self.args = list(args)
def match(self, command_line, pos, outputs):
unmatched = self.args[:]
while len(unmatched) > 0:
res = try_match_one(command_line, pos, unmatched, outputs)
if res is None:
return
pos = res
return pos
# Represents a single input file.
# If id is set, then the file must have been created
# by a prior use of output_file.
# If source is set, then the file must be that source file.
class input_file:
def __init__(self, id=None, source=None):
assert((id is None) ^ (source is None))
self.id = id
self.source = source
def check(self, path):
if path.startswith("-"):
return
if self.id is not None:
try:
with open(path, "r") as f:
data = f.read()
if data == make_file_contents(self.id):
return True
else:
return
except:
return
elif self.source is not None:
if self.source == path:
return True
else:
return
assert(False)
def match(self, command_line, pos, outputs):
if self.check(command_line[pos]):
return pos + 1
# Matches an output file.
# If the full pattern is matched, The
# file will be created.
class output_file:
def __init__(self, id):
self.id = id
def match(self, command_line, pos, outputs):
if command_line[pos].startswith("-"):
return
outputs.append((command_line[pos], self.id))
return pos + 1
class arg_file:
def __init__(self, id):
self.id = id
def match(self, command_line, pos, outputs):
if command_line[pos].startswith("-"):
return
if fnmatch.fnmatch(command_line[pos], self.id):
return pos + 1
else:
return
# Matches the directory containing an input_file
class target_path(object):
def __init__(self, id):
self.tester = input_file(id=id)
def match(self, command_line, pos, outputs):
arg = command_line[pos]
if arg.startswith("-"):
return
try:
for path in os.listdir(arg):
if self.tester.check(os.path.join(arg, path)):
return pos + 1
except:
return
# Matches a single argument, which is composed of a prefix and a path
# for example arguments of the form -ofilename.
class arg(object):
def __init__(self, prefix, a):
# The prefix should be a string, a should be target_path or input_file.
self.prefix = prefix
self.a = a
def match(self, command_line, pos, outputs):
s = command_line[pos]
if s.startswith(self.prefix) and try_match([s[len(self.prefix):]], 0, self.a, outputs) == 1:
return pos + 1
# Given a file id, returns a string that will be
# written to the file to allow it to be recognized.
def make_file_contents(id):
return id
# Matches a single pattern from a list.
# If it succeeds, the matching pattern
# is removed from the list.
# Returns the index after the end of the match
def try_match_one(command_line, pos, patterns, outputs):
for p in patterns:
tmp = outputs[:]
res = try_match(command_line, pos, p, tmp)
if res is not None:
outputs[:] = tmp
patterns.remove(p)
return res
# returns the end of the match if any
def try_match(command_line, pos, pattern, outputs):
if pos == len(command_line):
return
elif type(pattern) is str:
if pattern == command_line[pos]:
return pos + 1
else:
return pattern.match(command_line, pos, outputs)
known_patterns = []
program_name = None
# Registers a command
# The arguments should be a sequence of:
# str, ordered, unordered, arg, input_file, output_file, target_path
# kwarg: stdout is text that will be printed on success.
def command(*args, **kwargs):
global known_patterns
global program_name
stdout = kwargs.get("stdout", None)
pattern = ordered(*args)
known_patterns += [(pattern, stdout)]
if program_name is None:
program_name = args[0]
else:
assert(program_name == args[0])
# Use this to filter the recognized commands, based on the properties
# passed to b2.
def allow_properties(*args):
try:
return all(a in os.environ["B2_PROPERTIES"].split(" ") for a in args)
except KeyError:
return True
# Use this in the stdout argument of command to print the command
# for running another script.
def script(name):
return os.path.join(os.path.dirname(__file__), "bin", re.sub('\.py$', '', name))
def match(command_line):
for (p, stdout) in known_patterns:
outputs = []
if try_match(command_line, 0, p, outputs) == len(command_line):
return (stdout, outputs)
# Every mock program should call this after setting up all the commands.
def main():
command_line = [program_name] + sys.argv[1:]
result = match(command_line)
if result is not None:
(stdout, outputs) = result
if stdout is not None:
print(stdout)
for (file,id) in outputs:
with open(file, "w") as f:
f.write(make_file_contents(id))
exit(0)
else:
print("ERROR on command: %s"%(" ".join(command_line)))
exit(1)
# file should be the name of a file in the same directory
# as this. Must be called after verify_setup
def verify_file(filename):
global known_files
if filename not in known_files:
known_files.add(filename)
srcdir = os.path.dirname(__file__)
execfile(os.path.join(srcdir, filename), {})
def verify_setup():
"""Override the behavior of most module components
in order to detect whether they are being used correctly."""
global main
global allow_properties
global output_file
global input_file
global target_path
global script
global command
global verify_errors
global output_ids
global input_ids
global known_files
def allow_properties(*args):
return True
def main():
pass
def output_file(id):
global output_ids
global verify_error
if id in output_ids:
verify_error("duplicate output_file: %s" % id)
output_ids.add(id)
def input_file(id=None, source=None):
if id is not None:
input_ids.add(id)
def target_path(id):
input_ids.add(id)
def script(filename):
verify_file(filename)
def command(*args, **kwargs):
pass
verify_errors = []
output_ids = set()
input_ids = set()
known_files = set()
def verify_error(message):
global verify_errors
verify_errors += [message]
def verify_finalize():
for id in input_ids:
if not id in output_ids:
verify_error("Input file does not exist: %s" % id)
for error in verify_errors:
print("error: %s" % error)
if len(verify_errors) != 0:
return 1
else:
return 0
def verify():
srcdir = os.path.dirname(__file__)
if srcdir == '':
srcdir = '.'
verify_setup()
for f in os.listdir(srcdir):
if re.match(r"(gcc|clang|darwin|intel)-.*\.py", f):
verify_file(f)
exit(verify_finalize())
| 29.938182 | 100 | 0.612049 |
795b867aa37bf8efe3a09c8b27f356e7f2f971a3 | 1,276 | py | Python | assertions/dictionary/assert_dictionary_keys.py | snafis/Utils | 37998a3a253b7fdee52f5f79cf9c02ec2a3363d4 | [
"MIT"
] | 3 | 2019-02-14T21:16:15.000Z | 2020-05-05T20:20:42.000Z | assertions/dictionary/assert_dictionary_keys.py | snafis/Utils | 37998a3a253b7fdee52f5f79cf9c02ec2a3363d4 | [
"MIT"
] | null | null | null | assertions/dictionary/assert_dictionary_keys.py | snafis/Utils | 37998a3a253b7fdee52f5f79cf9c02ec2a3363d4 | [
"MIT"
] | 1 | 2019-07-11T02:06:49.000Z | 2019-07-11T02:06:49.000Z | def assert_dictionary_keys(dictionary, required_keys, verbose=0):
missing_keys = []
for key in required_keys:
if key not in dictionary.keys():
missing_keys.append(key)
if missing_keys:
if verbose > 0:
print(
f'Missing keys {missing_keys} not found in dictionary with keys {dictionary.keys()}')
return False
else:
return True
def assert_nested_dictionary_keys(dictionary, nested_keys_dict, verbose=0):
missing_keys = []
missing_nested_keys = []
for key, nested_keys in nested_keys_dict.items():
# Check that all top level keys are in the dictionary
if key not in dictionary.keys():
missing_keys.append(key)
for nested_key in nested_keys:
if nested_key not in dictionary[key].keys():
missing_nested_keys.append(nested_key)
if missing_keys:
if verbose > 0:
print(
f'Missing keys {missing_keys} not found in dictionary with keys {dictionary.keys()}')
return False
if missing_nested_keys:
if verbose > 0:
print(
f'Nested keys: {missing_nested_keys} not found in dictionary.')
return False
else:
return True
| 34.486486 | 101 | 0.619122 |
795b876c2e612caf05b8e9c3a224bd416b664419 | 121 | py | Python | apps/exploit/admin.py | macdaliot/exist | 65244f79c602c5a00c3ea6a7eef512ce9c21e60a | [
"MIT"
] | 159 | 2019-03-15T10:46:19.000Z | 2022-03-12T09:19:31.000Z | apps/exploit/admin.py | macdaliot/exist | 65244f79c602c5a00c3ea6a7eef512ce9c21e60a | [
"MIT"
] | 6 | 2019-03-16T12:51:24.000Z | 2020-07-09T02:25:42.000Z | apps/exploit/admin.py | macdaliot/exist | 65244f79c602c5a00c3ea6a7eef512ce9c21e60a | [
"MIT"
] | 36 | 2019-03-16T10:37:14.000Z | 2021-11-14T21:04:18.000Z | from django.contrib import admin
# Register your models here.
from .models import Exploit
admin.site.register(Exploit)
| 17.285714 | 32 | 0.801653 |
795b880ae6cbef48fe3affc1f39e448407e13c50 | 766 | py | Python | example/bpapi/app.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | example/bpapi/app.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | 15 | 2020-12-05T13:52:13.000Z | 2020-12-19T10:14:40.000Z | example/bpapi/app.py | rog-works/lambda-fw | 715b36fc2d8d0ea0388aa4ac1336dc8cd5543778 | [
"CNRI-Python"
] | null | null | null | from logging import Logger
from typing import Optional
from lf3py.app.webapp import WebApp
from lf3py.i18n.i18n import I18n
from lf3py.lang.cache import Cache
from lf3py.lang.di import DI
class MyApp(WebApp):
__instance: Optional['MyApp'] = None
@classmethod
def get(cls) -> 'MyApp':
if cls.__instance is None:
raise AssertionError()
return cls.__instance
def __init__(self, di: DI) -> None:
super(MyApp, self).__init__(di)
MyApp.__instance = self
@property
def i18n(self) -> I18n:
return self._di.resolve(I18n)
@property
def logger(self) -> Logger:
return self._di.resolve(Logger)
@property
def cache(self) -> Cache:
return self._di.resolve(Cache)
| 21.885714 | 40 | 0.651436 |
795b8810f951610c5666fdfdd13ab3c85f0e5ab2 | 4,037 | py | Python | pcap_bw.py | Cjen1/haoc21-ae | 59f9f790a75048bae86e43ee912844115a226dcb | [
"MIT"
] | null | null | null | pcap_bw.py | Cjen1/haoc21-ae | 59f9f790a75048bae86e43ee912844115a226dcb | [
"MIT"
] | null | null | null | pcap_bw.py | Cjen1/haoc21-ae | 59f9f790a75048bae86e43ee912844115a226dcb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (C) 2020 Richard Mortier <mort@cantab.net>. All Rights Reserved.
#
# Licensed under the GPL v3; see LICENSE.md in the root of this distribution or
# the full text at https://opensource.org/licenses/GPL-3.0
# Computes total and all (src, dst) pairs bandwidth given a PCAP trace.
# Currently assumes a "cooked Linux" (SLL) format trace captured using `tcpdump
# -i any` from a mininet simulation.
#
# Requires `pip|pip3 install dpkt`.
#
# Useful pre-processing command lines for large PCAP files include:
#
# $ editcap -S0 -d -A"YYYY-MM-DD HH:mm:SS" -B"YYYY-MM-DD HH:mm:SS" in.pcap \
# fragment.pcap
import sys, socket, pprint, json
import dpkt
import argparse
## dpkt.pcap.Reader iterator doesn't provide the PCAP header, only the timestamp
class R(dpkt.pcap.Reader):
def __iter__(self):
while 1:
buf = self._Reader__f.read(dpkt.pcap.PktHdr.__hdr_len__)
if not buf:
break
hdr = self._Reader__ph(buf)
buf = self._Reader__f.read(hdr.caplen)
yield (hdr.tv_sec + (hdr.tv_usec / self._divisor), hdr, buf)
## from dpkt print_pcap example
def inet_to_str(inet):
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Get the bandwidth per window from a pcap file.")
parser.add_argument('INPUT', help="Pcap file to analyse")
parser.add_argument('-w', '--window', dest="WINDOW", default=1, help="Window size for bandwidth averaging. Measured in seconds", type=int)
parser.add_argument('HOSTS', default=[], help="Hosts to calculate bandwith usage between", nargs='*')
args = parser.parse_args()
INPUT = args.INPUT
WINDOW = args.WINDOW ## seconds
HOSTS = args.HOSTS
with open(INPUT, 'rb') as f:
pcap = R(f)
cnt = 0
prevts = 0
prevwindow = 0
totbw = 0
hostbw = { i: { j: 0 for j in HOSTS } for i in HOSTS }
for ts, hdr, buf in pcap:
if prevts == 0:
s = ",".join(
",".join([":".join([s,d]) for d in HOSTS])
for s in HOSTS
)
print("# time,totalbw,%s" % s, sep=",", flush=True)
cnt += 1
if cnt % 10000 == 0:
print(cnt, "...", end="", sep="", flush=True, file=sys.stderr)
sll = dpkt.sll.SLL(buf) ## i happen to know the input linktype = SLL
pkt = None
if sll.ethtype == 0x0800: ## IPv4
if sll.type == 3: ## sent to someone else
pkt = sll.ip
elif sll.type == 4: ## sent by us, ie., emitted from switch
pass
else:
print("[dropped %04x / %d]..." % (sll.ethtype, sll.type),
end="", sep="", file=sys.stderr)
elif sll.ethtype == 0x0806: ## ARP
print("[dropped ARP / %d bytes]..." % hdr.len,
end="", sep="", file=sys.stderr)
else:
print("[dropped %04x / %d]..." % (sll.ethtype, sll.type),
end="", sep="", file=sys.stderr)
if not pkt: continue
window = int(ts) if WINDOW == 1 else WINDOW * (int(ts) // WINDOW)
if prevwindow == 0: prevwindow = window
if prevwindow != window:
hostbws = ", ".join(
",".join([ str(hostbw[s][d]) for d in HOSTS ])
for s in HOSTS
)
print(prevwindow, totbw, hostbws, sep=", ", flush=True)
totbw = 0
hostbw = { i: { j: 0 for j in HOSTS } for i in HOSTS }
prevwindow = window
totbw += pkt.len
src = inet_to_str(pkt.src)
dst = inet_to_str(pkt.dst)
hostbw[src][dst] += pkt.len
prevts = ts
| 37.37963 | 142 | 0.539014 |
795b88a0cacf2fe5986e31ce7fe03c40a635c6c8 | 3,951 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 10 | 2019-05-11T18:07:14.000Z | 2021-08-20T03:02:47.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 1 | 2020-07-10T08:25:36.000Z | 2020-07-10T08:25:36.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/spanish/timeperiod_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 18 | 2019-08-19T12:11:00.000Z | 2021-10-12T09:36:27.000Z | from typing import Pattern, Dict
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.spanish.extractors import SpanishIntegerExtractor
from ...resources.spanish_date_time import SpanishDateTime
from ..extractors import DateTimeExtractor
from ..parsers import DateTimeParser
from ..base_configs import BaseDateParserConfiguration, DateTimeUtilityConfiguration
from ..base_timeperiod import TimePeriodParserConfiguration, MatchedTimeRegex
from ..constants import Constants
from ..utilities import TimexUtil
class SpanishTimePeriodParserConfiguration(TimePeriodParserConfiguration):
@property
def time_extractor(self) -> DateTimeExtractor:
return self._time_extractor
@property
def time_parser(self) -> DateTimeParser:
return self._time_parser
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def pure_number_from_to_regex(self) -> Pattern:
return self._pure_number_from_to_regex
@property
def pure_number_between_and_regex(self) -> Pattern:
return self._pure_number_between_and_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def numbers(self) -> Dict[str, int]:
return self._numbers
@property
def utility_configuration(self) -> DateTimeUtilityConfiguration:
return self._utility_configuration
def __init__(self, config: BaseDateParserConfiguration):
self._time_extractor = config.time_extractor
self._time_parser = config.time_parser
self._integer_extractor = config.integer_extractor
self._numbers = config.numbers
self._utility_configuration = config.utility_configuration
self._pure_number_from_to_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.PureNumFromTo)
self._pure_number_between_and_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.PureNumBetweenAnd)
self._time_of_day_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.TimeOfDayRegex)
self._till_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.TillRegex)
def get_matched_timex_range(self, source: str) -> MatchedTimeRegex:
trimmed_text = source.strip().lower()
if trimmed_text.endswith('s'):
trimmed_text = trimmed_text[:-1]
timex = ''
begin_hour = 0
end_hour = 0
end_min = 0
time_of_day = ""
if any(trimmed_text.endswith(o) for o in SpanishDateTime.EarlyMorningTermList):
time_of_day = Constants.EarlyMorning
elif any(trimmed_text.endswith(o) for o in SpanishDateTime.MorningTermList):
time_of_day = Constants.Morning
elif any(trimmed_text.endswith(o) for o in SpanishDateTime.AfternoonTermList):
time_of_day = Constants.Afternoon
elif any(trimmed_text.endswith(o) for o in SpanishDateTime.EveningTermList):
time_of_day = Constants.Evening
elif any(trimmed_text.endswith(o) for o in SpanishDateTime.NightTermList):
time_of_day = Constants.Night
else:
return MatchedTimeRegex(
matched=False,
timex='',
begin_hour=0,
end_hour=0,
end_min=0
)
parse_result = TimexUtil.parse_time_of_day(time_of_day)
timex = parse_result.timex
begin_hour = parse_result.begin_hour
end_hour = parse_result.end_hour
end_min = parse_result.end_min
return MatchedTimeRegex(
matched=True,
timex=timex,
begin_hour=begin_hour,
end_hour=end_hour,
end_min=end_min
)
| 35.918182 | 87 | 0.698304 |
795b8a75dafe25c3b8202c1e8dc16828c8ee1f8f | 13,525 | py | Python | FastText/train_fast.py | ssbuild/Multi-Label-Text-Classification | 65c67e7a3b69cc3f015c7c3822cbb89ae887c6fb | [
"Apache-2.0"
] | 524 | 2017-10-19T14:18:45.000Z | 2022-03-21T18:25:48.000Z | FastText/train_fast.py | ssbuild/Multi-Label-Text-Classification | 65c67e7a3b69cc3f015c7c3822cbb89ae887c6fb | [
"Apache-2.0"
] | 26 | 2018-05-27T01:19:19.000Z | 2022-03-24T10:17:49.000Z | FastText/train_fast.py | ssbuild/Multi-Label-Text-Classification | 65c67e7a3b69cc3f015c7c3822cbb89ae887c6fb | [
"Apache-2.0"
] | 133 | 2017-11-10T10:51:21.000Z | 2022-03-26T11:41:47.000Z | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
from tensorboard.plugins import projector
from text_fast import TextFAST
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, average_precision_score
args = parser.parameter_parser()
OPTION = dh._option(pattern=0)
logger = dh.logger_fn("tflog", "logs/{0}-{1}.log".format('Train' if OPTION == 'T' else 'Restore', time.asctime()))
def create_input_data(data: dict):
return zip(data['pad_seqs'], data['onehot_labels'])
def train_fasttext():
"""Training FASTTEXT model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load sentences, labels, and training parameters
logger.info("Loading data...")
logger.info("Data processing...")
train_data = dh.load_data_and_labels(args, args.train_file, word2idx)
val_data = dh.load_data_and_labels(args, args.validation_file, word2idx)
# Build a graph and fasttext object
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
fasttext = TextFAST(
sequence_length=args.pad_seq_len,
vocab_size=len(word2idx),
embedding_type=args.embedding_type,
embedding_size=args.embedding_dim,
num_classes=args.num_classes,
l2_reg_lambda=args.l2_lambda,
pretrained_embedding=embedding_matrix)
# Define training procedure
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate,
global_step=fasttext.global_step,
decay_steps=args.decay_steps,
decay_rate=args.decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads, vars = zip(*optimizer.compute_gradients(fasttext.loss))
grads, _ = tf.clip_by_global_norm(grads, clip_norm=args.norm_ratio)
train_op = optimizer.apply_gradients(zip(grads, vars), global_step=fasttext.global_step,
name="train_op")
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in zip(grads, vars):
if g is not None:
grad_hist_summary = tf.summary.histogram("{0}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{0}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
out_dir = dh.get_out_dir(OPTION, logger)
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
best_checkpoint_dir = os.path.abspath(os.path.join(out_dir, "bestcheckpoints"))
# Summaries for loss
loss_summary = tf.summary.scalar("loss", fasttext.loss)
# Train summaries
train_summary_op = tf.summary.merge([loss_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Validation summaries
validation_summary_op = tf.summary.merge([loss_summary])
validation_summary_dir = os.path.join(out_dir, "summaries", "validation")
validation_summary_writer = tf.summary.FileWriter(validation_summary_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=args.num_checkpoints)
best_saver = cm.BestCheckpointSaver(save_dir=best_checkpoint_dir, num_to_keep=3, maximize=True)
if OPTION == 'R':
# Load fasttext model
logger.info("Loading model...")
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir)
logger.info(checkpoint_file)
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
if OPTION == 'T':
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# Embedding visualization config
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = "embedding"
embedding_conf.metadata_path = args.metadata_file
projector.visualize_embeddings(train_summary_writer, config)
projector.visualize_embeddings(validation_summary_writer, config)
# Save the embedding visualization
saver.save(sess, os.path.join(out_dir, "embedding", "embedding.ckpt"))
current_step = sess.run(fasttext.global_step)
def train_step(batch_data):
"""A single training step"""
x, y_onehot = zip(*batch_data)
feed_dict = {
fasttext.input_x: x,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: args.dropout_rate,
fasttext.is_training: True
}
_, step, summaries, loss = sess.run(
[train_op, fasttext.global_step, train_summary_op, fasttext.loss], feed_dict)
logger.info("step {0}: loss {1:g}".format(step, loss))
train_summary_writer.add_summary(summaries, step)
def validation_step(val_loader, writer=None):
"""Evaluates model on a validation set."""
batches_validation = dh.batch_iter(list(create_input_data(val_loader)), args.batch_size, 1)
# Predict classes by threshold or topk ('ts': threshold; 'tk': topk)
eval_counter, eval_loss = 0, 0.0
eval_pre_tk = [0.0] * args.topK
eval_rec_tk = [0.0] * args.topK
eval_F1_tk = [0.0] * args.topK
true_onehot_labels = []
predicted_onehot_scores = []
predicted_onehot_labels_ts = []
predicted_onehot_labels_tk = [[] for _ in range(args.topK)]
for batch_validation in batches_validation:
x, y_onehot = zip(*batch_validation)
feed_dict = {
fasttext.input_x: x,
fasttext.input_y: y_onehot,
fasttext.dropout_keep_prob: 1.0,
fasttext.is_training: False
}
step, summaries, scores, cur_loss = sess.run(
[fasttext.global_step, validation_summary_op, fasttext.scores, fasttext.loss], feed_dict)
# Prepare for calculating metrics
for i in y_onehot:
true_onehot_labels.append(i)
for j in scores:
predicted_onehot_scores.append(j)
# Predict by threshold
batch_predicted_onehot_labels_ts = \
dh.get_onehot_label_threshold(scores=scores, threshold=args.threshold)
for k in batch_predicted_onehot_labels_ts:
predicted_onehot_labels_ts.append(k)
# Predict by topK
for top_num in range(args.topK):
batch_predicted_onehot_labels_tk = dh.get_onehot_label_topk(scores=scores, top_num=top_num+1)
for i in batch_predicted_onehot_labels_tk:
predicted_onehot_labels_tk[top_num].append(i)
eval_loss = eval_loss + cur_loss
eval_counter = eval_counter + 1
if writer:
writer.add_summary(summaries, step)
eval_loss = float(eval_loss / eval_counter)
# Calculate Precision & Recall & F1
eval_pre_ts = precision_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_ts), average='micro')
eval_rec_ts = recall_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_ts), average='micro')
eval_F1_ts = f1_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_ts), average='micro')
for top_num in range(args.topK):
eval_pre_tk[top_num] = precision_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
eval_rec_tk[top_num] = recall_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
eval_F1_tk[top_num] = f1_score(y_true=np.array(true_onehot_labels),
y_pred=np.array(predicted_onehot_labels_tk[top_num]),
average='micro')
# Calculate the average AUC
eval_auc = roc_auc_score(y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores), average='micro')
# Calculate the average PR
eval_prc = average_precision_score(y_true=np.array(true_onehot_labels),
y_score=np.array(predicted_onehot_scores), average='micro')
return eval_loss, eval_auc, eval_prc, eval_pre_ts, eval_rec_ts, eval_F1_ts, \
eval_pre_tk, eval_rec_tk, eval_F1_tk
# Generate batches
batches_train = dh.batch_iter(list(create_input_data(train_data)), args.batch_size, args.epochs)
num_batches_per_epoch = int((len(train_data['pad_seqs']) - 1) / args.batch_size) + 1
# Training loop. For each batch...
for batch_train in batches_train:
train_step(batch_train)
current_step = tf.train.global_step(sess, fasttext.global_step)
if current_step % args.evaluate_steps == 0:
logger.info("\nEvaluation:")
eval_loss, eval_auc, eval_prc, \
eval_pre_ts, eval_rec_ts, eval_F1_ts, eval_pre_tk, eval_rec_tk, eval_F1_tk = \
validation_step(val_data, writer=validation_summary_writer)
logger.info("All Validation set: Loss {0:g} | AUC {1:g} | AUPRC {2:g}"
.format(eval_loss, eval_auc, eval_prc))
# Predict by threshold
logger.info("Predict by threshold: Precision {0:g}, Recall {1:g}, F1 {2:g}"
.format(eval_pre_ts, eval_rec_ts, eval_F1_ts))
# Predict by topK
logger.info("Predict by topK:")
for top_num in range(args.topK):
logger.info("Top{0}: Precision {1:g}, Recall {2:g}, F1 {3:g}"
.format(top_num+1, eval_pre_tk[top_num], eval_rec_tk[top_num], eval_F1_tk[top_num]))
best_saver.handle(eval_prc, sess, current_step)
if current_step % args.checkpoint_steps == 0:
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logger.info("Saved model checkpoint to {0}\n".format(path))
if current_step % num_batches_per_epoch == 0:
current_epoch = current_step // num_batches_per_epoch
logger.info("Epoch {0} has finished!".format(current_epoch))
logger.info("All Done.")
if __name__ == '__main__':
train_fasttext() | 50.655431 | 120 | 0.574713 |
795b8ab1c03a5b6d6d2448ddb57a60fa6f5bd3de | 3,099 | py | Python | backend/core/services/security/auth.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | 1 | 2021-12-26T00:10:15.000Z | 2021-12-26T00:10:15.000Z | backend/core/services/security/auth.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | 7 | 2020-11-07T16:42:47.000Z | 2022-01-21T23:51:38.000Z | backend/core/services/security/auth.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Authentication system."""
from fastapi import Depends
from fastapi.security.oauth2 import OAuth2AuthorizationCodeBearer
from jose import JWTError
from pydantic import UUID4
from core.config import LOGIN_ENDPOINT, SWAP_TOKEN_ENDPOINT
from core.database import ProductGinoModel, UserGinoModel, WishlistGinoModel
from core.schemas import AccessToken, GoogleIdInfo, RefreshToken
from core.utils import CREDENTIALS_EX, INACTIVE_EX, NOT_AN_OWNER
oauth2_scheme = OAuth2AuthorizationCodeBearer(
authorizationUrl=LOGIN_ENDPOINT, tokenUrl=SWAP_TOKEN_ENDPOINT
)
async def get_or_create_user(id_info: GoogleIdInfo):
"""Get/Update or Create new user."""
return await UserGinoModel.insert_or_update_by_ext_id(
sub=id_info.sub,
username=id_info.username,
family_name=id_info.family_name,
given_name=id_info.given_name,
full_name=id_info.name,
)
async def get_user_for_refresh(token: str):
try:
token_info = RefreshToken.decode_and_create(token=token)
user = await UserGinoModel.get(token_info.id)
if user is None or user.disabled:
raise INACTIVE_EX
token_valid = await user.token_is_valid(token)
if not token_valid:
raise CREDENTIALS_EX
except (JWTError, ValueError):
raise CREDENTIALS_EX
return user
async def get_current_user(token: str = Depends(oauth2_scheme)): # noqa: B008
"""Validate token and get user db model instance.
Example:
@auth_router.get(f"{SUCCESS_ROUTE}", response_model=UserDBModel)
@version(1)
async def read_users_me( # noqa: D103
current_user: UserDBModel = Depends(get_current_user), # noqa: B008
):
return current_user
"""
try:
token_info = AccessToken.decode_and_create(token=token)
user = await UserGinoModel.get(token_info.id)
if user is None or user.disabled:
raise INACTIVE_EX
except (JWTError, ValueError):
raise CREDENTIALS_EX
return user
async def get_wishlist(id: UUID4): # noqa: A002
"""Return WishlistGinoModel instance."""
return await WishlistGinoModel.get_or_404(id)
async def get_product(id: UUID4): # noqa: A002
"""Return ProductGinoModel instance."""
return await ProductGinoModel.get_or_404(id)
async def get_user_wishlist(
wishlist: WishlistGinoModel = Depends(get_wishlist), # noqa: B008
current_user: UserGinoModel = Depends(get_current_user), # noqa: B008
) -> WishlistGinoModel:
"""Return WishlistGinoModel if user has rights on it."""
if current_user.superuser or wishlist.user_id == current_user.id:
return wishlist
raise NOT_AN_OWNER
async def get_user_product(
product: ProductGinoModel = Depends(get_product), # noqa: B008
current_user: UserGinoModel = Depends(get_current_user), # noqa: B008
) -> ProductGinoModel:
"""Return ProductGinoModel if user has rights on it."""
if current_user.superuser or product.user_id == current_user.id:
return product
raise NOT_AN_OWNER
| 33.322581 | 80 | 0.716683 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.