max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
docx/oxml/section.py
|
majacQ/python-docx
| 0
|
6627151
|
encoding: utf-8
"""Section-related custom element classes"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
<<<<<<< feature/header
from ..enum.header import WD_HEADER_FOOTER
from ..enum.section import WD_ORIENTATION, WD_SECTION_START
from .simpletypes import (
ST_RelationshipId, ST_SignedTwipsMeasure, ST_TwipsMeasure
)
from .xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore,
ZeroOrOne
)
class CT_HdrFtrRef(BaseOxmlElement):
"""
`w:headerReference` and `w:footerReference` elements, specifying the
various headers and footers for a section.
"""
rId == RequiredAttribute('r:id', ST_RelationshipId)
=======
from docx.enum.section import WD_HEADER_FOOTER, WD_ORIENTATION, WD_SECTION_START
from docx.oxml.simpletypes import ST_SignedTwipsMeasure, ST_TwipsMeasure, XsdString
from docx.oxml.xmlchemy import (
BaseOxmlElement,
OptionalAttribute,
RequiredAttribute,
ZeroOrMore,
ZeroOrOne,
)
class CT_HdrFtr(BaseOxmlElement):
"""`w:hdr` and `w:ftr`, the root element for header and footer part respectively"""
p = ZeroOrMore("w:p", successors=())
tbl = ZeroOrMore("w:tbl", successors=())
bookmarkStart = ZeroOrMore("w:bookmarkStart", successors=())
bookmarkEnd = ZeroOrMore("w:bookmarkEnd", successors=())
def add_bookmarkEnd(self, bookmark_id):
"""Return `w:bookmarkEnd` element added at end of this header or footer.
The newly added `w:bookmarkEnd` element is linked to it's `w:bookmarkStart`
counterpart by `bookmark_id`. It is the caller's responsibility to determine
`bookmark_id` matches that of the intended `bookmarkStart` element.
"""
bookmarkEnd = self._add_bookmarkEnd()
bookmarkEnd.id = bookmark_id
return bookmarkEnd
def add_bookmarkStart(self, name, bookmark_id):
"""Return `w:bookmarkStart` element added at the end of this header or footer.
The newly added `w:bookmarkStart` element is identified by both `name` and
`bookmark_id`. It is the caller's responsibility to determine that both `name`
and `bookmark_id` are unique, document-wide.
"""
bookmarkStart = self._add_bookmarkStart()
bookmarkStart.name = name
bookmarkStart.id = bookmark_id
return bookmarkStart
class CT_HdrFtrRef(BaseOxmlElement):
"""`w:headerReference` and `w:footerReference` elements"""
<<<<<<< feature/bookmarks
type_ = RequiredAttribute("w:type", WD_HEADER_FOOTER)
rId = RequiredAttribute("r:id", XsdString)
=======
type_ = RequiredAttribute('w:type', WD_HEADER_FOOTER)
rId = RequiredAttribute('r:id', XsdString)
>>>>>>> master
>>>>>>> develop
class CT_PageMar(BaseOxmlElement):
"""
``<w:pgMar>`` element, defining page margins.
"""
top = OptionalAttribute("w:top", ST_SignedTwipsMeasure)
right = OptionalAttribute("w:right", ST_TwipsMeasure)
bottom = OptionalAttribute("w:bottom", ST_SignedTwipsMeasure)
left = OptionalAttribute("w:left", ST_TwipsMeasure)
header = OptionalAttribute("w:header", ST_TwipsMeasure)
footer = OptionalAttribute("w:footer", ST_TwipsMeasure)
gutter = OptionalAttribute("w:gutter", ST_TwipsMeasure)
class CT_PageSz(BaseOxmlElement):
"""
``<w:pgSz>`` element, defining page dimensions and orientation.
"""
w = OptionalAttribute("w:w", ST_TwipsMeasure)
h = OptionalAttribute("w:h", ST_TwipsMeasure)
orient = OptionalAttribute(
"w:orient", WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT
)
class CT_SectPr(BaseOxmlElement):
<<<<<<< feature/header
"""
``<w:sectPr>`` element, the container element for section properties.
"""
_tag_seq = (
'w:headerReference', 'w:footerReference', 'w:footnotePr',
'w:endnotePr', 'w:type', 'w:pgSz', 'w:pgMar', 'w:paperSrc',
'w:pgBorders', 'w:lnNumType', 'w:pgNumType', 'w:cols', 'w:formProt',
'w:vAlign', 'w:noEndnote', 'w:titlePg', 'w:textDirection', 'w:bidi',
'w:rtlGutter', 'w:docGrid', 'w:printerSettings', 'w:sectPrChange',
)
headerReference = ZeroOrMore('w:headerReference', successors=_tag_seq[1:])
type = ZeroOrOne('w:type', successors=_tag_seq[5:])
pgSz = ZeroOrOne('w:pgSz', successors=_tag_seq[6:])
pgMar = ZeroOrOne('w:pgMar', successors=_tag_seq[7:])
del _tag_seq
=======
"""`w:sectPr` element, the container element for section properties"""
_tag_seq = (
"w:footnotePr",
"w:endnotePr",
"w:type",
"w:pgSz",
"w:pgMar",
"w:paperSrc",
"w:pgBorders",
"w:lnNumType",
"w:pgNumType",
"w:cols",
"w:formProt",
"w:vAlign",
"w:noEndnote",
"w:titlePg",
"w:textDirection",
"w:bidi",
"w:rtlGutter",
"w:docGrid",
"w:printerSettings",
"w:sectPrChange",
)
headerReference = ZeroOrMore("w:headerReference", successors=_tag_seq)
footerReference = ZeroOrMore("w:footerReference", successors=_tag_seq)
type = ZeroOrOne("w:type", successors=_tag_seq[3:])
pgSz = ZeroOrOne("w:pgSz", successors=_tag_seq[4:])
pgMar = ZeroOrOne("w:pgMar", successors=_tag_seq[5:])
titlePg = ZeroOrOne("w:titlePg", successors=_tag_seq[14:])
del _tag_seq
def add_footerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:footerReference`.
"""
footerReference = self._add_footerReference()
footerReference.type_ = type_
footerReference.rId = rId
return footerReference
def add_headerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:headerReference`.
"""
headerReference = self._add_headerReference()
headerReference.type_ = type_
headerReference.rId = rId
return headerReference
>>>>>>> master
@property
def bottom_margin(self):
"""
The value of the ``w:bottom`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.bottom
@bottom_margin.setter
def bottom_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.bottom = value
def clone(self):
"""
Return an exact duplicate of this ``<w:sectPr>`` element tree
suitable for use in adding a section break. All rsid* attributes are
removed from the root ``<w:sectPr>`` element.
"""
clone_sectPr = deepcopy(self)
clone_sectPr.attrib.clear()
return clone_sectPr
@property
def footer(self):
"""
The value of the ``w:footer`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.footer
@footer.setter
def footer(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.footer = value
<<<<<<< feature/header
def get_headerReference_of_type(self, type_member):
"""
Return the `w:headerReference` child having type attribute value
associated with *type_member*, or |None| if not present.
"""
type_str = WD_HEADER_FOOTER.to_xml(type_member)
matches = self.xpath('w:headerReference[@w:type="%s"]' % type_str)
if matches:
return matches[0]
return None
=======
def get_footerReference(self, type_):
"""Return footerReference element of *type_* or None if not present."""
path = "./w:footerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
footerReferences = self.xpath(path)
if not footerReferences:
return None
return footerReferences[0]
def get_headerReference(self, type_):
"""Return headerReference element of *type_* or None if not present."""
matching_headerReferences = self.xpath(
"./w:headerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
)
if len(matching_headerReferences) == 0:
return None
return matching_headerReferences[0]
>>>>>>> master
@property
def gutter(self):
"""
The value of the ``w:gutter`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.gutter
@gutter.setter
def gutter(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.gutter = value
@property
def header(self):
"""
The value of the ``w:header`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.header
@header.setter
def header(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.header = value
@property
def left_margin(self):
"""
The value of the ``w:left`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.left
@left_margin.setter
def left_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.left = value
@property
def orientation(self):
"""
The member of the ``WD_ORIENTATION`` enumeration corresponding to the
value of the ``orient`` attribute of the ``<w:pgSz>`` child element,
or ``WD_ORIENTATION.PORTRAIT`` if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return WD_ORIENTATION.PORTRAIT
return pgSz.orient
@orientation.setter
def orientation(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.orient = value
@property
def page_height(self):
"""
Value in EMU of the ``h`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.h
@page_height.setter
def page_height(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.h = value
@property
def page_width(self):
"""
Value in EMU of the ``w`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.w
@page_width.setter
def page_width(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.w = value
@property
def preceding_sectPr(self):
"""sectPr immediately preceding this one or None if this is the first."""
# ---[1] predicate returns list of zero or one value---
preceding_sectPrs = self.xpath("./preceding::w:sectPr[1]")
return preceding_sectPrs[0] if len(preceding_sectPrs) > 0 else None
def remove_footerReference(self, type_):
"""Return rId of w:footerReference child of *type_* after removing it."""
footerReference = self.get_footerReference(type_)
rId = footerReference.rId
self.remove(footerReference)
return rId
def remove_headerReference(self, type_):
"""Return rId of w:headerReference child of *type_* after removing it."""
headerReference = self.get_headerReference(type_)
rId = headerReference.rId
self.remove(headerReference)
return rId
@property
def right_margin(self):
"""
The value of the ``w:right`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.right
@right_margin.setter
def right_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.right = value
@property
def start_type(self):
"""
The member of the ``WD_SECTION_START`` enumeration corresponding to
the value of the ``val`` attribute of the ``<w:type>`` child element,
or ``WD_SECTION_START.NEW_PAGE`` if not present.
"""
type = self.type
if type is None or type.val is None:
return WD_SECTION_START.NEW_PAGE
return type.val
@start_type.setter
def start_type(self, value):
if value is None or value is WD_SECTION_START.NEW_PAGE:
self._remove_type()
return
type = self.get_or_add_type()
type.val = value
@property
def titlePg_val(self):
"""Value of `w:titlePg/@val` or |None| if not present"""
titlePg = self.titlePg
if titlePg is None:
return False
return titlePg.val
@titlePg_val.setter
def titlePg_val(self, value):
if value in [None, False]:
self._remove_titlePg()
else:
self.get_or_add_titlePg().val = value
@property
def top_margin(self):
"""
The value of the ``w:top`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.top
@top_margin.setter
def top_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.top = value
class CT_SectType(BaseOxmlElement):
"""
``<w:sectType>`` element, defining the section start type.
"""
val = OptionalAttribute("w:val", WD_SECTION_START)
|
encoding: utf-8
"""Section-related custom element classes"""
from __future__ import absolute_import, division, print_function, unicode_literals
from copy import deepcopy
<<<<<<< feature/header
from ..enum.header import WD_HEADER_FOOTER
from ..enum.section import WD_ORIENTATION, WD_SECTION_START
from .simpletypes import (
ST_RelationshipId, ST_SignedTwipsMeasure, ST_TwipsMeasure
)
from .xmlchemy import (
BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrMore,
ZeroOrOne
)
class CT_HdrFtrRef(BaseOxmlElement):
"""
`w:headerReference` and `w:footerReference` elements, specifying the
various headers and footers for a section.
"""
rId == RequiredAttribute('r:id', ST_RelationshipId)
=======
from docx.enum.section import WD_HEADER_FOOTER, WD_ORIENTATION, WD_SECTION_START
from docx.oxml.simpletypes import ST_SignedTwipsMeasure, ST_TwipsMeasure, XsdString
from docx.oxml.xmlchemy import (
BaseOxmlElement,
OptionalAttribute,
RequiredAttribute,
ZeroOrMore,
ZeroOrOne,
)
class CT_HdrFtr(BaseOxmlElement):
"""`w:hdr` and `w:ftr`, the root element for header and footer part respectively"""
p = ZeroOrMore("w:p", successors=())
tbl = ZeroOrMore("w:tbl", successors=())
bookmarkStart = ZeroOrMore("w:bookmarkStart", successors=())
bookmarkEnd = ZeroOrMore("w:bookmarkEnd", successors=())
def add_bookmarkEnd(self, bookmark_id):
"""Return `w:bookmarkEnd` element added at end of this header or footer.
The newly added `w:bookmarkEnd` element is linked to it's `w:bookmarkStart`
counterpart by `bookmark_id`. It is the caller's responsibility to determine
`bookmark_id` matches that of the intended `bookmarkStart` element.
"""
bookmarkEnd = self._add_bookmarkEnd()
bookmarkEnd.id = bookmark_id
return bookmarkEnd
def add_bookmarkStart(self, name, bookmark_id):
"""Return `w:bookmarkStart` element added at the end of this header or footer.
The newly added `w:bookmarkStart` element is identified by both `name` and
`bookmark_id`. It is the caller's responsibility to determine that both `name`
and `bookmark_id` are unique, document-wide.
"""
bookmarkStart = self._add_bookmarkStart()
bookmarkStart.name = name
bookmarkStart.id = bookmark_id
return bookmarkStart
class CT_HdrFtrRef(BaseOxmlElement):
"""`w:headerReference` and `w:footerReference` elements"""
<<<<<<< feature/bookmarks
type_ = RequiredAttribute("w:type", WD_HEADER_FOOTER)
rId = RequiredAttribute("r:id", XsdString)
=======
type_ = RequiredAttribute('w:type', WD_HEADER_FOOTER)
rId = RequiredAttribute('r:id', XsdString)
>>>>>>> master
>>>>>>> develop
class CT_PageMar(BaseOxmlElement):
"""
``<w:pgMar>`` element, defining page margins.
"""
top = OptionalAttribute("w:top", ST_SignedTwipsMeasure)
right = OptionalAttribute("w:right", ST_TwipsMeasure)
bottom = OptionalAttribute("w:bottom", ST_SignedTwipsMeasure)
left = OptionalAttribute("w:left", ST_TwipsMeasure)
header = OptionalAttribute("w:header", ST_TwipsMeasure)
footer = OptionalAttribute("w:footer", ST_TwipsMeasure)
gutter = OptionalAttribute("w:gutter", ST_TwipsMeasure)
class CT_PageSz(BaseOxmlElement):
"""
``<w:pgSz>`` element, defining page dimensions and orientation.
"""
w = OptionalAttribute("w:w", ST_TwipsMeasure)
h = OptionalAttribute("w:h", ST_TwipsMeasure)
orient = OptionalAttribute(
"w:orient", WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT
)
class CT_SectPr(BaseOxmlElement):
<<<<<<< feature/header
"""
``<w:sectPr>`` element, the container element for section properties.
"""
_tag_seq = (
'w:headerReference', 'w:footerReference', 'w:footnotePr',
'w:endnotePr', 'w:type', 'w:pgSz', 'w:pgMar', 'w:paperSrc',
'w:pgBorders', 'w:lnNumType', 'w:pgNumType', 'w:cols', 'w:formProt',
'w:vAlign', 'w:noEndnote', 'w:titlePg', 'w:textDirection', 'w:bidi',
'w:rtlGutter', 'w:docGrid', 'w:printerSettings', 'w:sectPrChange',
)
headerReference = ZeroOrMore('w:headerReference', successors=_tag_seq[1:])
type = ZeroOrOne('w:type', successors=_tag_seq[5:])
pgSz = ZeroOrOne('w:pgSz', successors=_tag_seq[6:])
pgMar = ZeroOrOne('w:pgMar', successors=_tag_seq[7:])
del _tag_seq
=======
"""`w:sectPr` element, the container element for section properties"""
_tag_seq = (
"w:footnotePr",
"w:endnotePr",
"w:type",
"w:pgSz",
"w:pgMar",
"w:paperSrc",
"w:pgBorders",
"w:lnNumType",
"w:pgNumType",
"w:cols",
"w:formProt",
"w:vAlign",
"w:noEndnote",
"w:titlePg",
"w:textDirection",
"w:bidi",
"w:rtlGutter",
"w:docGrid",
"w:printerSettings",
"w:sectPrChange",
)
headerReference = ZeroOrMore("w:headerReference", successors=_tag_seq)
footerReference = ZeroOrMore("w:footerReference", successors=_tag_seq)
type = ZeroOrOne("w:type", successors=_tag_seq[3:])
pgSz = ZeroOrOne("w:pgSz", successors=_tag_seq[4:])
pgMar = ZeroOrOne("w:pgMar", successors=_tag_seq[5:])
titlePg = ZeroOrOne("w:titlePg", successors=_tag_seq[14:])
del _tag_seq
def add_footerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:footerReference`.
"""
footerReference = self._add_footerReference()
footerReference.type_ = type_
footerReference.rId = rId
return footerReference
def add_headerReference(self, type_, rId):
"""Return newly added CT_HdrFtrRef element of *type_* with *rId*.
The element tag is `w:headerReference`.
"""
headerReference = self._add_headerReference()
headerReference.type_ = type_
headerReference.rId = rId
return headerReference
>>>>>>> master
@property
def bottom_margin(self):
"""
The value of the ``w:bottom`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.bottom
@bottom_margin.setter
def bottom_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.bottom = value
def clone(self):
"""
Return an exact duplicate of this ``<w:sectPr>`` element tree
suitable for use in adding a section break. All rsid* attributes are
removed from the root ``<w:sectPr>`` element.
"""
clone_sectPr = deepcopy(self)
clone_sectPr.attrib.clear()
return clone_sectPr
@property
def footer(self):
"""
The value of the ``w:footer`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.footer
@footer.setter
def footer(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.footer = value
<<<<<<< feature/header
def get_headerReference_of_type(self, type_member):
"""
Return the `w:headerReference` child having type attribute value
associated with *type_member*, or |None| if not present.
"""
type_str = WD_HEADER_FOOTER.to_xml(type_member)
matches = self.xpath('w:headerReference[@w:type="%s"]' % type_str)
if matches:
return matches[0]
return None
=======
def get_footerReference(self, type_):
"""Return footerReference element of *type_* or None if not present."""
path = "./w:footerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
footerReferences = self.xpath(path)
if not footerReferences:
return None
return footerReferences[0]
def get_headerReference(self, type_):
"""Return headerReference element of *type_* or None if not present."""
matching_headerReferences = self.xpath(
"./w:headerReference[@w:type='%s']" % WD_HEADER_FOOTER.to_xml(type_)
)
if len(matching_headerReferences) == 0:
return None
return matching_headerReferences[0]
>>>>>>> master
@property
def gutter(self):
"""
The value of the ``w:gutter`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.gutter
@gutter.setter
def gutter(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.gutter = value
@property
def header(self):
"""
The value of the ``w:header`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.header
@header.setter
def header(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.header = value
@property
def left_margin(self):
"""
The value of the ``w:left`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.left
@left_margin.setter
def left_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.left = value
@property
def orientation(self):
"""
The member of the ``WD_ORIENTATION`` enumeration corresponding to the
value of the ``orient`` attribute of the ``<w:pgSz>`` child element,
or ``WD_ORIENTATION.PORTRAIT`` if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return WD_ORIENTATION.PORTRAIT
return pgSz.orient
@orientation.setter
def orientation(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.orient = value
@property
def page_height(self):
"""
Value in EMU of the ``h`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.h
@page_height.setter
def page_height(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.h = value
@property
def page_width(self):
"""
Value in EMU of the ``w`` attribute of the ``<w:pgSz>`` child
element, or |None| if not present.
"""
pgSz = self.pgSz
if pgSz is None:
return None
return pgSz.w
@page_width.setter
def page_width(self, value):
pgSz = self.get_or_add_pgSz()
pgSz.w = value
@property
def preceding_sectPr(self):
"""sectPr immediately preceding this one or None if this is the first."""
# ---[1] predicate returns list of zero or one value---
preceding_sectPrs = self.xpath("./preceding::w:sectPr[1]")
return preceding_sectPrs[0] if len(preceding_sectPrs) > 0 else None
def remove_footerReference(self, type_):
"""Return rId of w:footerReference child of *type_* after removing it."""
footerReference = self.get_footerReference(type_)
rId = footerReference.rId
self.remove(footerReference)
return rId
def remove_headerReference(self, type_):
"""Return rId of w:headerReference child of *type_* after removing it."""
headerReference = self.get_headerReference(type_)
rId = headerReference.rId
self.remove(headerReference)
return rId
@property
def right_margin(self):
"""
The value of the ``w:right`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.right
@right_margin.setter
def right_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.right = value
@property
def start_type(self):
"""
The member of the ``WD_SECTION_START`` enumeration corresponding to
the value of the ``val`` attribute of the ``<w:type>`` child element,
or ``WD_SECTION_START.NEW_PAGE`` if not present.
"""
type = self.type
if type is None or type.val is None:
return WD_SECTION_START.NEW_PAGE
return type.val
@start_type.setter
def start_type(self, value):
if value is None or value is WD_SECTION_START.NEW_PAGE:
self._remove_type()
return
type = self.get_or_add_type()
type.val = value
@property
def titlePg_val(self):
"""Value of `w:titlePg/@val` or |None| if not present"""
titlePg = self.titlePg
if titlePg is None:
return False
return titlePg.val
@titlePg_val.setter
def titlePg_val(self, value):
if value in [None, False]:
self._remove_titlePg()
else:
self.get_or_add_titlePg().val = value
@property
def top_margin(self):
"""
The value of the ``w:top`` attribute in the ``<w:pgMar>`` child
element, as a |Length| object, or |None| if either the element or the
attribute is not present.
"""
pgMar = self.pgMar
if pgMar is None:
return None
return pgMar.top
@top_margin.setter
def top_margin(self, value):
pgMar = self.get_or_add_pgMar()
pgMar.top = value
class CT_SectType(BaseOxmlElement):
"""
``<w:sectType>`` element, defining the section start type.
"""
val = OptionalAttribute("w:val", WD_SECTION_START)
|
en
| 0.743806
|
Section-related custom element classes `w:headerReference` and `w:footerReference` elements, specifying the various headers and footers for a section. `w:hdr` and `w:ftr`, the root element for header and footer part respectively Return `w:bookmarkEnd` element added at end of this header or footer. The newly added `w:bookmarkEnd` element is linked to it's `w:bookmarkStart` counterpart by `bookmark_id`. It is the caller's responsibility to determine `bookmark_id` matches that of the intended `bookmarkStart` element. Return `w:bookmarkStart` element added at the end of this header or footer. The newly added `w:bookmarkStart` element is identified by both `name` and `bookmark_id`. It is the caller's responsibility to determine that both `name` and `bookmark_id` are unique, document-wide. `w:headerReference` and `w:footerReference` elements ``<w:pgMar>`` element, defining page margins. ``<w:pgSz>`` element, defining page dimensions and orientation. ``<w:sectPr>`` element, the container element for section properties. `w:sectPr` element, the container element for section properties Return newly added CT_HdrFtrRef element of *type_* with *rId*. The element tag is `w:footerReference`. Return newly added CT_HdrFtrRef element of *type_* with *rId*. The element tag is `w:headerReference`. The value of the ``w:bottom`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. Return an exact duplicate of this ``<w:sectPr>`` element tree suitable for use in adding a section break. All rsid* attributes are removed from the root ``<w:sectPr>`` element. The value of the ``w:footer`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. Return the `w:headerReference` child having type attribute value associated with *type_member*, or |None| if not present. Return footerReference element of *type_* or None if not present. Return headerReference element of *type_* or None if not present. The value of the ``w:gutter`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. The value of the ``w:header`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. The value of the ``w:left`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. The member of the ``WD_ORIENTATION`` enumeration corresponding to the value of the ``orient`` attribute of the ``<w:pgSz>`` child element, or ``WD_ORIENTATION.PORTRAIT`` if not present. Value in EMU of the ``h`` attribute of the ``<w:pgSz>`` child element, or |None| if not present. Value in EMU of the ``w`` attribute of the ``<w:pgSz>`` child element, or |None| if not present. sectPr immediately preceding this one or None if this is the first. # ---[1] predicate returns list of zero or one value--- Return rId of w:footerReference child of *type_* after removing it. Return rId of w:headerReference child of *type_* after removing it. The value of the ``w:right`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. The member of the ``WD_SECTION_START`` enumeration corresponding to the value of the ``val`` attribute of the ``<w:type>`` child element, or ``WD_SECTION_START.NEW_PAGE`` if not present. Value of `w:titlePg/@val` or |None| if not present The value of the ``w:top`` attribute in the ``<w:pgMar>`` child element, as a |Length| object, or |None| if either the element or the attribute is not present. ``<w:sectType>`` element, defining the section start type.
| 2.155281
| 2
|
ProxyPool/proxypool/importer.py
|
binyoucai/ProxyPool
| 0
|
6627152
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@content : 手动导入模块
@Time : 2018/7/28 上午10:57
@Author : 北冥神君
@File : importer.py
@Software: PyCharm
"""
from proxypool.db import RedisClient
conn = RedisClient()
def set(proxy):
result = conn.add(proxy)
print(proxy)
print('录入成功' if result else '录入失败')
def scan():
print('请输入代理, 输入exit退出读入')
while True:
proxy = input()
if proxy == 'exit':
break
set(proxy)
if __name__ == '__main__':
scan()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@content : 手动导入模块
@Time : 2018/7/28 上午10:57
@Author : 北冥神君
@File : importer.py
@Software: PyCharm
"""
from proxypool.db import RedisClient
conn = RedisClient()
def set(proxy):
result = conn.add(proxy)
print(proxy)
print('录入成功' if result else '录入失败')
def scan():
print('请输入代理, 输入exit退出读入')
while True:
proxy = input()
if proxy == 'exit':
break
set(proxy)
if __name__ == '__main__':
scan()
|
zh
| 0.373774
|
#!/usr/bin/python3 # -*- coding: utf-8 -*- @content : 手动导入模块 @Time : 2018/7/28 上午10:57 @Author : 北冥神君 @File : importer.py @Software: PyCharm
| 2.428527
| 2
|
profiles/admin.py
|
xhnilic3/django-profiles
| 0
|
6627153
|
from django.contrib import admin
from profiles.profiles.models import Profile, Address, School, Membership
class ProfileAdmin(admin.ModelAdmin):
pass
class AddressAdmin(admin.ModelAdmin):
pass
class SchoolAdmin(admin.ModelAdmin):
pass
class MembershipAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(School, SchoolAdmin)
admin.site.register(Membership, MembershipAdmin)
|
from django.contrib import admin
from profiles.profiles.models import Profile, Address, School, Membership
class ProfileAdmin(admin.ModelAdmin):
pass
class AddressAdmin(admin.ModelAdmin):
pass
class SchoolAdmin(admin.ModelAdmin):
pass
class MembershipAdmin(admin.ModelAdmin):
pass
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Address, AddressAdmin)
admin.site.register(School, SchoolAdmin)
admin.site.register(Membership, MembershipAdmin)
|
none
| 1
| 1.734532
| 2
|
|
birdseye/results.py
|
IQTLabs/BirdsEye
| 7
|
6627154
|
import ast
import os
import re
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .definitions import RUN_DIR
from .utils import read_header_log
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance'}
def plotter(plot_func, title=None, target_start=78, sensors=['drone', 'signalstrength'], rewards=['range_reward', 'entropy_collision_reward'], **kwargs):
fig = plt.figure(figsize=(20, 16), constrained_layout=True)
fig.suptitle(title)
subfig_rows = fig.subfigures(nrows=len(sensors), ncols=1)
axs = [row.subplots(nrows=1, ncols=len(rewards)) for row in subfig_rows]
for i, s in enumerate(sensors):
for j, r in enumerate(rewards):
print('{} & {}'.format(sensor_str[s], reward_str[r]))
config = {'datetime_start': '2021-06-18T00:00:00', 'reward': r,
'sensor': s, 'target_start': target_start, 'target_speed': 1}
plot_func(axs[i][j], config, **kwargs)
plt.show()
def separate_plotter(plot_func, title=None, target_start=78, sensors=['drone', 'signalstrength'], rewards=['range_reward', 'entropy_collision_reward'], **kwargs):
fig = plt.figure(figsize=(20, 26), constrained_layout=True)
fig.suptitle(title)
subfig_rows = fig.subfigures(nrows=len(sensors)*len(rewards), ncols=1)
axs = [row.subplots(nrows=1, ncols=2) for row in subfig_rows]
for i, s in enumerate(sensors):
for j, r in enumerate(rewards):
config = {'datetime_start': '2021-06-18T00:00:00', 'reward': r,
'sensor': s, 'target_start': target_start, 'target_speed': 1}
plot_func(axs[i*len(sensors)+j][0], config, dqn=False, **kwargs)
plot_func(axs[i*len(sensors)+j][1], config, mcts=False, **kwargs)
plt.show()
def two_metric_grid(ax1, config, mcts=True, dqn=True, metric1='r_err', metric2='theta_err', variance_bars=False, verbose=False, timing=True, limit=1, y_lim=125):
# was panel_dual_axis
# set strings
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {
'centroid_err': 'Centroid Distance (m)', 'r_err': r'$\delta_{r}$ (m)', 'theta_err': r'$\delta_{\theta}$ (degrees)'}
# get configs and run data
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
ax2 = ax1.twinx()
# blue green color scheme
if limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#d53e4f'])
ax2.set_prop_cycle(color=['#66c2a5', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
lns = []
if mcts:
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n', config, '\n', '========================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data1 = np.abs(list(data[metric1].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med1 = np.percentile(list(plot_data1), 50, axis=0)
low1 = np.percentile(list(plot_data1), 16, axis=0)
high1 = np.percentile(list(plot_data1), 84, axis=0)
l1 = ax1.plot(med1, '-', label='MCTS, '+metric_str[metric1])
plot_data2 = np.abs(list(data[metric2].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med2 = np.percentile(list(plot_data2), 50, axis=0)
low2 = np.percentile(list(plot_data2), 16, axis=0)
high2 = np.percentile(list(plot_data2), 84, axis=0)
l2 = ax2.plot(med2, '-', label='MCTS, '+metric_str[metric2])
if variance_bars:
ax1.fill_between(np.arange(len(med1)), low1, high1, alpha=0.2)
ax2.fill_between(np.arange(len(med2)), low2, high2, alpha=0.2)
lns += l1+l2
# 3
if timing:
print('MCTS inference time={:.2e}s'.format(
mcts_avg_inference_time))
if dqn:
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n', config, '\n', '========================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data1 = np.abs(list(data[metric1].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med1 = np.percentile(list(plot_data1), 50, axis=0)
low1 = np.percentile(list(plot_data1), 16, axis=0)
high1 = np.percentile(list(plot_data1), 84, axis=0)
l3 = ax1.plot(med1, '--', label='DQN, '+metric_str[metric1])
plot_data2 = np.abs(list(data[metric2].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med2 = np.percentile(list(plot_data2), 50, axis=0)
low2 = np.percentile(list(plot_data2), 16, axis=0)
high2 = np.percentile(list(plot_data2), 84, axis=0)
l4 = ax2.plot(med2, '--', label='DQN, '+metric_str[metric2])
if variance_bars:
ax1.fill_between(np.arange(len(med1)), low1, high1, alpha=0.2)
ax2.fill_between(np.arange(len(med2)), low2, high2, alpha=0.2)
lns += l3+l4
if timing:
print('DQN inference time={:.2e}s'.format(
dqn_avg_inference_time))
if mcts and dqn and timing:
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(metric_str[metric1], fontsize=24)
ax2.set_ylabel(metric_str[metric2], fontsize=24)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax2.tick_params(axis='both', which='both', labelsize=14)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fontsize=20)
def single_std_dev(ax1, config, metric='r', variance_bars=False, verbose=False, limit=1, y_lim=125):
# was single_plot_var
cov_idx = {'r': 0, 'theta': 1}
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
reward_labels = {'range_reward': 'state',
'entropy_collision_reward': 'belief'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'r': 'r', 'theta': '\\theta'}
metric_s = metric_str.get(metric, metric)
label_str = ''
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#d53e4f', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
variance = np.sqrt(pf_cov[:, :, cov_idx[metric], cov_idx[metric]])
variance_med = np.percentile(variance, 50, axis=0)
variance_low = np.percentile(variance, 16, axis=0)
variance_high = np.percentile(variance, 84, axis=0)
if limit == 2:
label_str = r', $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(variance_med, '-', label='MCTS'+'{}'.format(label_str))
if variance_bars:
ax1.fill_between(np.arange(len(variance_med)),
variance_low, variance_high, alpha=0.2)
print('MCTS, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
variance = np.sqrt(pf_cov[:, :, cov_idx[metric], cov_idx[metric]])
variance_med = np.percentile(variance, 50, axis=0)
variance_low = np.percentile(variance, 16, axis=0)
variance_high = np.percentile(variance, 84, axis=0)
if limit == 2:
label_str = r', $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(variance_med, '--', label='DQN'+'{}'.format(label_str))
if variance_bars:
ax1.fill_between(np.arange(len(variance_med)),
variance_low, variance_high, alpha=0.2)
# plt caption
print('DQN, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(r'$\sigma_{{{}}}$'.format(metric_s), fontsize=22)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
def std_dev_grid(ax1, config, mcts=True, dqn=True, variance_bars=False, verbose=False, timing=True, limit=1, y_lim=125):
# was single_plot_combined_cov
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
ax2 = ax1.twinx()
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#d53e4f'])
ax2.set_prop_cycle(color=['#66c2a5', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
lns = []
if mcts:
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
r_var = np.sqrt(pf_cov[:, :, 0, 0])
theta_var = np.sqrt(pf_cov[:, :, 1, 1])
r_med = np.percentile(r_var, 50, axis=0)
r_low = np.percentile(r_var, 16, axis=0)
r_high = np.percentile(r_var, 84, axis=0)
theta_med = np.percentile(theta_var, 50, axis=0)
theta_low = np.percentile(theta_var, 16, axis=0)
theta_high = np.percentile(theta_var, 84, axis=0)
l1 = ax1.plot(r_med, '-', label=r'MCTS, $\sigma_{r}$')
l2 = ax2.plot(theta_med, '-', label=r'MCTS, $\sigma_{\theta}$')
lns += l1+l2
if variance_bars:
ax1.fill_between(np.arange(len(r_med)),
r_low, r_high, alpha=0.2)
ax2.fill_between(np.arange(len(theta_med)),
theta_low, theta_high, alpha=0.2)
if timing:
print('MCTS inference time={:.2e}s'.format(
mcts_avg_inference_time))
if dqn:
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
r_var = np.sqrt(pf_cov[:, :, 0, 0])
theta_var = np.sqrt(pf_cov[:, :, 1, 1])
r_med = np.percentile(r_var, 50, axis=0)
r_low = np.percentile(r_var, 16, axis=0)
r_high = np.percentile(r_var, 84, axis=0)
theta_med = np.percentile(theta_var, 50, axis=0)
theta_low = np.percentile(theta_var, 16, axis=0)
theta_high = np.percentile(theta_var, 84, axis=0)
l3 = ax1.plot(r_med, '--', label=r'DQN, $\sigma_{r}$')
l4 = ax2.plot(theta_med, '--', label=r'DQN, $\sigma_{\theta}$')
lns += l3+l4
if variance_bars:
ax1.fill_between(np.arange(len(r_med)),
r_low, r_high, alpha=0.2)
ax2.fill_between(np.arange(len(theta_med)),
theta_low, theta_high, alpha=0.2)
if timing:
print('DQN inference time={:.2e}s'.format(
dqn_avg_inference_time))
if mcts and dqn and timing:
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(r'$\sigma_{r}$ (m)', fontsize=24)
ax2.set_ylabel(r'$\sigma_{\theta}$ (degrees)', fontsize=24)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax2.tick_params(axis='both', which='both', labelsize=14)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fontsize=20)
def single_metric_grid(ax1, config, metric='centroid_err', variance_bars=False, verbose=False, limit=1, y_lim=125):
# was single_plot_combined
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance (m)'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
data = get_data('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = np.abs(list(data[metric].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
y = np.mean(list(plot_data), axis=0)
if verbose:
print(r, '\n')
print(config)
print(y)
print('=======================')
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
ax1.plot(med, '-', label='MCTS')
if variance_bars:
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.2)
print('MCTS inference time={:.2e}s'.format(mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = np.abs(list(data[metric].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
# median, 16, 84%
y = np.mean(list(plot_data), axis=0)
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
ax1.plot(med, '--', label='DQN')
if variance_bars:
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.2)
# plt caption
print('DQN inference time={:.2e}s'.format(dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_ylim(0, y_lim)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
def starting_position_plots(config, limit=1, metric='centroid_err'):
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance (m)'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sorted_filtered_mcts_runs = sorted(filtered_mcts_runs[-limit:], key=lambda r: int(
get_config('mcts', r)['Methods']['target_start']), reverse=True)
sorted_filtered_dqn_runs = sorted(filtered_dqn_runs[-limit:], key=lambda r: int(
get_config('dqn', r)['Methods']['target_start']), reverse=True)
fig = plt.figure(figsize=(20, 6))
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3)
if limit == 3:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#abdda4'])
ax2.set_prop_cycle(color=['#d53e4f', '#f46d43', '#fdae61'])
ax3.set_prop_cycle(
color=['#3288bd', '#66c2a5', '#abdda4', '#d53e4f', '#f46d43', '#fdae61'])
for r in sorted_filtered_mcts_runs:
config = get_config('mcts', r)
data = get_data('mcts', r)
plot_data = list(data['centroid_err'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
target_start = int(config['Methods']['target_start'])
ax1.plot(np.mean(list(plot_data), axis=0), '-',
label=r'$r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax3.plot(np.mean(list(plot_data), axis=0), '-',
label=r'MCTS, $r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
for r in sorted_filtered_dqn_runs:
config = get_config('dqn', r)
data = get_data('dqn', r)
plot_data = list(data['centroid_err'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
y = np.mean(list(plot_data), axis=0)
target_start = int(config['Methods']['target_start'])
ax2.plot(
y, '-', label=r'$r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax3.plot(
y, '--', label=r'DQN, $r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax1.margins(0)
ax1.set_ylim(0, 125)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.set_title('MCTS', fontsize=20)
ax1.legend(fontsize=10)
ax2.margins(0)
ax2.set_ylim(0, 125)
ax2.set_xlabel('Time Step', fontsize=16)
ax2.set_ylabel('{}'.format(metric_s), fontsize=16)
ax2.set_title('DQN', fontsize=20)
ax2.legend(fontsize=10)
ax3.margins(0)
ax3.set_ylim(0, 125)
ax3.set_xlabel('Time Step', fontsize=16)
ax3.set_ylabel('{}'.format(metric_s), fontsize=16)
ax3.set_title('MCTS vs DQN', fontsize=20)
ax3.legend(fontsize=10)
plt.subplots_adjust(top=0.85)
plt.suptitle('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
plt.show()
def single_plot(config, metric='centroid_err', variance_bars=False, verbose=False, limit=1):
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
reward_labels = {'range_reward': 'state',
'entropy_collision_reward': 'belief'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot(1, 1, 1)
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#d53e4f', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = list(data[metric].apply(lambda x: [float(xx)
for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
mcts_label_str = 'MCTS'
if limit == 2:
mcts_label_str = r'MCTS, $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(med, '-', label=mcts_label_str)
if variance_bars:
y_std = np.std(list(plot_data), axis=0)
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.5)
print('MCTS, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = list(data[metric].apply(lambda x: [float(xx)
for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
dqn_label_str = 'DQN'
if limit == 2:
dqn_label_str = r'DQN, $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(med, '--', label=dqn_label_str)
if variance_bars:
y_std = np.std(list(plot_data), axis=0)
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.5)
# plt caption
print('DQN, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
ax1.margins(0)
ax1.set_ylim(0, 125)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
plt.show()
def get_config(method_name, run_name):
"""
Results file reader functions
"""
config = read_header_log(
f'{RUN_DIR}/{method_name}/{run_name}_header.txt')
config['Methods']['reward'] = config['Methods'].get(
'reward', 'range_reward')
return config
def get_data(method_name, run_name):
data = pd.read_csv(
f'{RUN_DIR}/{method_name}/{run_name}_data.csv')
return data
def append_metric_avgs(df, metrics):
for m in metrics:
if ('avg_{}'.format(m) not in df) and ('average_{}'.format(m) not in df):
df['avg_{}'.format(m)] = np.mean(list(df[m]), axis=1)
def get_valid_runs(method_name):
files = os.listdir('{}/{}/'.format(RUN_DIR, method_name))
runs = list(set([f.split('_')[0] for f in files]))
valid_runs = []
for r in runs:
if (r+'_data.csv' in files) and (r+'_header.txt' in files):
try:
get_config(method_name, r)
valid_runs.append(r)
except:
pass
return valid_runs
def filter_runs(method_name, config_filter=None):
runs = get_valid_runs(method_name)
filtered_runs = []
for r in runs:
match = True
if method_name == 'baseline':
config = get_config(method_name, r)['Methods']
config.update(get_config(method_name, r)['Defaults'])
else:
config = get_config(method_name, r)['Methods']
for k, v in config_filter.items():
if k == 'target_speed':
v = float(v)
if ((config.get(k) is None) and (v != 1.)) or (float(config.get(k)) != v):
match = False
break
elif k == 'target_start':
if isinstance(v, list):
if config.get(k, None) not in v:
match = False
break
else:
if config.get(k) is None or float(config.get(k)) != float(v):
match = False
break
elif k == 'datetime_start':
config_datetime = datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')
run_datetime = datetime.strptime(r, '%Y-%m-%dT%H:%M:%S')
if run_datetime < config_datetime:
match = False
break
elif k == 'datetime_end':
config_datetime = datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')
run_datetime = datetime.strptime(r, '%Y-%m-%dT%H:%M:%S')
if run_datetime > config_datetime:
match = False
break
elif k == 'fading_sigma':
v = float(v)
if float(config.get(k, 0.0)) != v:
match = False
break
elif k == 'particle_resample':
v = float(v)
if float(config.get(k, 0.005)) != v:
match = False
break
elif config.get(k) != v:
match = False
break
if match:
filtered_runs.append(r)
return filtered_runs
def show_results():
method_name = 'mcts' # should be 'mcts' or 'dqn'
dqn_runs = get_valid_runs('dqn')
config_filter = {'method': 'dqn'}
filtered_dqn_runs = filter_runs('dqn', config_filter)
print(filtered_dqn_runs)
print(dqn_runs)
run_name = '2021-04-21T09:46:52'
config = get_config(method_name, run_name)
data = get_data(method_name, run_name)
|
import ast
import os
import re
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .definitions import RUN_DIR
from .utils import read_header_log
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance'}
def plotter(plot_func, title=None, target_start=78, sensors=['drone', 'signalstrength'], rewards=['range_reward', 'entropy_collision_reward'], **kwargs):
fig = plt.figure(figsize=(20, 16), constrained_layout=True)
fig.suptitle(title)
subfig_rows = fig.subfigures(nrows=len(sensors), ncols=1)
axs = [row.subplots(nrows=1, ncols=len(rewards)) for row in subfig_rows]
for i, s in enumerate(sensors):
for j, r in enumerate(rewards):
print('{} & {}'.format(sensor_str[s], reward_str[r]))
config = {'datetime_start': '2021-06-18T00:00:00', 'reward': r,
'sensor': s, 'target_start': target_start, 'target_speed': 1}
plot_func(axs[i][j], config, **kwargs)
plt.show()
def separate_plotter(plot_func, title=None, target_start=78, sensors=['drone', 'signalstrength'], rewards=['range_reward', 'entropy_collision_reward'], **kwargs):
fig = plt.figure(figsize=(20, 26), constrained_layout=True)
fig.suptitle(title)
subfig_rows = fig.subfigures(nrows=len(sensors)*len(rewards), ncols=1)
axs = [row.subplots(nrows=1, ncols=2) for row in subfig_rows]
for i, s in enumerate(sensors):
for j, r in enumerate(rewards):
config = {'datetime_start': '2021-06-18T00:00:00', 'reward': r,
'sensor': s, 'target_start': target_start, 'target_speed': 1}
plot_func(axs[i*len(sensors)+j][0], config, dqn=False, **kwargs)
plot_func(axs[i*len(sensors)+j][1], config, mcts=False, **kwargs)
plt.show()
def two_metric_grid(ax1, config, mcts=True, dqn=True, metric1='r_err', metric2='theta_err', variance_bars=False, verbose=False, timing=True, limit=1, y_lim=125):
# was panel_dual_axis
# set strings
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {
'centroid_err': 'Centroid Distance (m)', 'r_err': r'$\delta_{r}$ (m)', 'theta_err': r'$\delta_{\theta}$ (degrees)'}
# get configs and run data
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
ax2 = ax1.twinx()
# blue green color scheme
if limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#d53e4f'])
ax2.set_prop_cycle(color=['#66c2a5', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
lns = []
if mcts:
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n', config, '\n', '========================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data1 = np.abs(list(data[metric1].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med1 = np.percentile(list(plot_data1), 50, axis=0)
low1 = np.percentile(list(plot_data1), 16, axis=0)
high1 = np.percentile(list(plot_data1), 84, axis=0)
l1 = ax1.plot(med1, '-', label='MCTS, '+metric_str[metric1])
plot_data2 = np.abs(list(data[metric2].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med2 = np.percentile(list(plot_data2), 50, axis=0)
low2 = np.percentile(list(plot_data2), 16, axis=0)
high2 = np.percentile(list(plot_data2), 84, axis=0)
l2 = ax2.plot(med2, '-', label='MCTS, '+metric_str[metric2])
if variance_bars:
ax1.fill_between(np.arange(len(med1)), low1, high1, alpha=0.2)
ax2.fill_between(np.arange(len(med2)), low2, high2, alpha=0.2)
lns += l1+l2
# 3
if timing:
print('MCTS inference time={:.2e}s'.format(
mcts_avg_inference_time))
if dqn:
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n', config, '\n', '========================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data1 = np.abs(list(data[metric1].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med1 = np.percentile(list(plot_data1), 50, axis=0)
low1 = np.percentile(list(plot_data1), 16, axis=0)
high1 = np.percentile(list(plot_data1), 84, axis=0)
l3 = ax1.plot(med1, '--', label='DQN, '+metric_str[metric1])
plot_data2 = np.abs(list(data[metric2].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
med2 = np.percentile(list(plot_data2), 50, axis=0)
low2 = np.percentile(list(plot_data2), 16, axis=0)
high2 = np.percentile(list(plot_data2), 84, axis=0)
l4 = ax2.plot(med2, '--', label='DQN, '+metric_str[metric2])
if variance_bars:
ax1.fill_between(np.arange(len(med1)), low1, high1, alpha=0.2)
ax2.fill_between(np.arange(len(med2)), low2, high2, alpha=0.2)
lns += l3+l4
if timing:
print('DQN inference time={:.2e}s'.format(
dqn_avg_inference_time))
if mcts and dqn and timing:
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(metric_str[metric1], fontsize=24)
ax2.set_ylabel(metric_str[metric2], fontsize=24)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax2.tick_params(axis='both', which='both', labelsize=14)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fontsize=20)
def single_std_dev(ax1, config, metric='r', variance_bars=False, verbose=False, limit=1, y_lim=125):
# was single_plot_var
cov_idx = {'r': 0, 'theta': 1}
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
reward_labels = {'range_reward': 'state',
'entropy_collision_reward': 'belief'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'r': 'r', 'theta': '\\theta'}
metric_s = metric_str.get(metric, metric)
label_str = ''
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#d53e4f', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
variance = np.sqrt(pf_cov[:, :, cov_idx[metric], cov_idx[metric]])
variance_med = np.percentile(variance, 50, axis=0)
variance_low = np.percentile(variance, 16, axis=0)
variance_high = np.percentile(variance, 84, axis=0)
if limit == 2:
label_str = r', $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(variance_med, '-', label='MCTS'+'{}'.format(label_str))
if variance_bars:
ax1.fill_between(np.arange(len(variance_med)),
variance_low, variance_high, alpha=0.2)
print('MCTS, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
variance = np.sqrt(pf_cov[:, :, cov_idx[metric], cov_idx[metric]])
variance_med = np.percentile(variance, 50, axis=0)
variance_low = np.percentile(variance, 16, axis=0)
variance_high = np.percentile(variance, 84, axis=0)
if limit == 2:
label_str = r', $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(variance_med, '--', label='DQN'+'{}'.format(label_str))
if variance_bars:
ax1.fill_between(np.arange(len(variance_med)),
variance_low, variance_high, alpha=0.2)
# plt caption
print('DQN, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(r'$\sigma_{{{}}}$'.format(metric_s), fontsize=22)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
def std_dev_grid(ax1, config, mcts=True, dqn=True, variance_bars=False, verbose=False, timing=True, limit=1, y_lim=125):
# was single_plot_combined_cov
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
ax2 = ax1.twinx()
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#d53e4f'])
ax2.set_prop_cycle(color=['#66c2a5', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
lns = []
if mcts:
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
r_var = np.sqrt(pf_cov[:, :, 0, 0])
theta_var = np.sqrt(pf_cov[:, :, 1, 1])
r_med = np.percentile(r_var, 50, axis=0)
r_low = np.percentile(r_var, 16, axis=0)
r_high = np.percentile(r_var, 84, axis=0)
theta_med = np.percentile(theta_var, 50, axis=0)
theta_low = np.percentile(theta_var, 16, axis=0)
theta_high = np.percentile(theta_var, 84, axis=0)
l1 = ax1.plot(r_med, '-', label=r'MCTS, $\sigma_{r}$')
l2 = ax2.plot(theta_med, '-', label=r'MCTS, $\sigma_{\theta}$')
lns += l1+l2
if variance_bars:
ax1.fill_between(np.arange(len(r_med)),
r_low, r_high, alpha=0.2)
ax2.fill_between(np.arange(len(theta_med)),
theta_low, theta_high, alpha=0.2)
if timing:
print('MCTS inference time={:.2e}s'.format(
mcts_avg_inference_time))
if dqn:
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
pf_cov = data.get('pf_cov', None)
if pf_cov is not None:
pf_cov = np.array([ast.literal_eval(cov_str) for cov_str in pf_cov])
pf_cov = pf_cov.reshape(pf_cov.shape[0], pf_cov.shape[1], 4, 4)
else:
break
r_var = np.sqrt(pf_cov[:, :, 0, 0])
theta_var = np.sqrt(pf_cov[:, :, 1, 1])
r_med = np.percentile(r_var, 50, axis=0)
r_low = np.percentile(r_var, 16, axis=0)
r_high = np.percentile(r_var, 84, axis=0)
theta_med = np.percentile(theta_var, 50, axis=0)
theta_low = np.percentile(theta_var, 16, axis=0)
theta_high = np.percentile(theta_var, 84, axis=0)
l3 = ax1.plot(r_med, '--', label=r'DQN, $\sigma_{r}$')
l4 = ax2.plot(theta_med, '--', label=r'DQN, $\sigma_{\theta}$')
lns += l3+l4
if variance_bars:
ax1.fill_between(np.arange(len(r_med)),
r_low, r_high, alpha=0.2)
ax2.fill_between(np.arange(len(theta_med)),
theta_low, theta_high, alpha=0.2)
if timing:
print('DQN inference time={:.2e}s'.format(
dqn_avg_inference_time))
if mcts and dqn and timing:
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel(r'$\sigma_{r}$ (m)', fontsize=24)
ax2.set_ylabel(r'$\sigma_{\theta}$ (degrees)', fontsize=24)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax2.tick_params(axis='both', which='both', labelsize=14)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, fontsize=20)
def single_metric_grid(ax1, config, metric='centroid_err', variance_bars=False, verbose=False, limit=1, y_lim=125):
# was single_plot_combined
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance (m)'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
data = get_data('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = np.abs(list(data[metric].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
y = np.mean(list(plot_data), axis=0)
if verbose:
print(r, '\n')
print(config)
print(y)
print('=======================')
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
ax1.plot(med, '-', label='MCTS')
if variance_bars:
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.2)
print('MCTS inference time={:.2e}s'.format(mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = np.abs(list(data[metric].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
# median, 16, 84%
y = np.mean(list(plot_data), axis=0)
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
ax1.plot(med, '--', label='DQN')
if variance_bars:
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.2)
# plt caption
print('DQN inference time={:.2e}s'.format(dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
print('======================================')
ax1.margins(0)
ax1.set_ylim(0, y_lim)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
ax1.set_title('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
def starting_position_plots(config, limit=1, metric='centroid_err'):
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance (m)'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
sensor = config.get('sensor', 'all')
reward = config.get('reward', 'all')
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sorted_filtered_mcts_runs = sorted(filtered_mcts_runs[-limit:], key=lambda r: int(
get_config('mcts', r)['Methods']['target_start']), reverse=True)
sorted_filtered_dqn_runs = sorted(filtered_dqn_runs[-limit:], key=lambda r: int(
get_config('dqn', r)['Methods']['target_start']), reverse=True)
fig = plt.figure(figsize=(20, 6))
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3)
if limit == 3:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#abdda4'])
ax2.set_prop_cycle(color=['#d53e4f', '#f46d43', '#fdae61'])
ax3.set_prop_cycle(
color=['#3288bd', '#66c2a5', '#abdda4', '#d53e4f', '#f46d43', '#fdae61'])
for r in sorted_filtered_mcts_runs:
config = get_config('mcts', r)
data = get_data('mcts', r)
plot_data = list(data['centroid_err'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
target_start = int(config['Methods']['target_start'])
ax1.plot(np.mean(list(plot_data), axis=0), '-',
label=r'$r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax3.plot(np.mean(list(plot_data), axis=0), '-',
label=r'MCTS, $r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
for r in sorted_filtered_dqn_runs:
config = get_config('dqn', r)
data = get_data('dqn', r)
plot_data = list(data['centroid_err'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
y = np.mean(list(plot_data), axis=0)
target_start = int(config['Methods']['target_start'])
ax2.plot(
y, '-', label=r'$r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax3.plot(
y, '--', label=r'DQN, $r_0 \in [{},{}]$'.format(target_start-25, target_start+25))
ax1.margins(0)
ax1.set_ylim(0, 125)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.set_title('MCTS', fontsize=20)
ax1.legend(fontsize=10)
ax2.margins(0)
ax2.set_ylim(0, 125)
ax2.set_xlabel('Time Step', fontsize=16)
ax2.set_ylabel('{}'.format(metric_s), fontsize=16)
ax2.set_title('DQN', fontsize=20)
ax2.legend(fontsize=10)
ax3.margins(0)
ax3.set_ylim(0, 125)
ax3.set_xlabel('Time Step', fontsize=16)
ax3.set_ylabel('{}'.format(metric_s), fontsize=16)
ax3.set_title('MCTS vs DQN', fontsize=20)
ax3.legend(fontsize=10)
plt.subplots_adjust(top=0.85)
plt.suptitle('{} & {}'.format(sensor_str[sensor], reward_str[reward]))
plt.show()
def single_plot(config, metric='centroid_err', variance_bars=False, verbose=False, limit=1):
reward_str = {'range_reward': 'State Dependent Reward',
'entropy_collision_reward': 'Belief Dependent Reward'}
reward_labels = {'range_reward': 'state',
'entropy_collision_reward': 'belief'}
sensor_str = {'drone': 'Bearings Sensor',
'signalstrength': 'Signal Strength Sensor'}
metric_str = {'centroid_err': 'Centroid Distance'}
metric_s = metric_str.get(metric, metric)
mcts_config_filter = {}
dqn_config_filter = {}
mcts_config_filter.update(config)
dqn_config_filter.update(config)
filtered_dqn_runs = sorted(filter_runs('dqn', dqn_config_filter))
filtered_mcts_runs = sorted(filter_runs('mcts', mcts_config_filter))
sensor = config.get('sensor', 'all')
fig = plt.figure(figsize=(12, 8))
ax1 = plt.subplot(1, 1, 1)
# red blue color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#ca0020', '#0571b0'])
else:
ax1.set_prop_cycle(color=['#ca0020', '#f4a582', '#0571b0', '#92c5de'])
# blue green color scheme
if limit == 1:
ax1.set_prop_cycle(color=['#1f78b4', '#33a02c'])
elif limit == 2:
ax1.set_prop_cycle(color=['#3288bd', '#66c2a5', '#d53e4f', '#f46d43'])
else:
ax1.set_prop_cycle(color=['#1f78b4', '#a6cee3', '#33a02c', '#b2df8a'])
mcts_avg_inference_time = 10
dqn_avg_inference_time = 10
for r in filtered_mcts_runs[-limit:]:
config = get_config('mcts', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('mcts', r)
if data.get('inference_times', None) is not None:
mcts_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = list(data[metric].apply(lambda x: [float(xx)
for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
mcts_label_str = 'MCTS'
if limit == 2:
mcts_label_str = r'MCTS, $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(med, '-', label=mcts_label_str)
if variance_bars:
y_std = np.std(list(plot_data), axis=0)
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.5)
print('MCTS, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], mcts_avg_inference_time))
for r in filtered_dqn_runs[-limit:]:
config = get_config('dqn', r)
if verbose:
print(r, '\n')
print(config)
print('=======================')
data = get_data('dqn', r)
if data.get('inference_times', None) is not None:
dqn_avg_inference_time = np.mean(list(data['inference_times'].apply(
lambda x: [float(xx) for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0])))
plot_data = list(data[metric].apply(lambda x: [float(xx)
for xx in re.split(', |\s+', x[1:-1]) if len(xx) > 0]))
med = np.percentile(list(plot_data), 50, axis=0)
low = np.percentile(list(plot_data), 16, axis=0)
high = np.percentile(list(plot_data), 84, axis=0)
dqn_label_str = 'DQN'
if limit == 2:
dqn_label_str = r'DQN, $R_{{\mathrm{{{}}}}}$'.format(
reward_labels[config['Methods']['reward']])
ax1.plot(med, '--', label=dqn_label_str)
if variance_bars:
y_std = np.std(list(plot_data), axis=0)
ax1.fill_between(np.arange(len(med)), low, high, alpha=0.5)
# plt caption
print('DQN, {}, {}, inference time={:.2e}s'.format(
sensor_str[config['Methods']['sensor']], reward_str[config['Methods']['reward']], dqn_avg_inference_time))
print(
'Speedup (MCTS/DQN) = {:.2f}x'.format(mcts_avg_inference_time/dqn_avg_inference_time))
ax1.margins(0)
ax1.set_ylim(0, 125)
ax1.set_xlabel('Time Step', fontsize=16)
ax1.set_ylabel('{}'.format(metric_s), fontsize=16)
ax1.tick_params(axis='both', which='both', labelsize=14)
ax1.legend(fontsize=20)
plt.show()
def get_config(method_name, run_name):
"""
Results file reader functions
"""
config = read_header_log(
f'{RUN_DIR}/{method_name}/{run_name}_header.txt')
config['Methods']['reward'] = config['Methods'].get(
'reward', 'range_reward')
return config
def get_data(method_name, run_name):
data = pd.read_csv(
f'{RUN_DIR}/{method_name}/{run_name}_data.csv')
return data
def append_metric_avgs(df, metrics):
for m in metrics:
if ('avg_{}'.format(m) not in df) and ('average_{}'.format(m) not in df):
df['avg_{}'.format(m)] = np.mean(list(df[m]), axis=1)
def get_valid_runs(method_name):
files = os.listdir('{}/{}/'.format(RUN_DIR, method_name))
runs = list(set([f.split('_')[0] for f in files]))
valid_runs = []
for r in runs:
if (r+'_data.csv' in files) and (r+'_header.txt' in files):
try:
get_config(method_name, r)
valid_runs.append(r)
except:
pass
return valid_runs
def filter_runs(method_name, config_filter=None):
runs = get_valid_runs(method_name)
filtered_runs = []
for r in runs:
match = True
if method_name == 'baseline':
config = get_config(method_name, r)['Methods']
config.update(get_config(method_name, r)['Defaults'])
else:
config = get_config(method_name, r)['Methods']
for k, v in config_filter.items():
if k == 'target_speed':
v = float(v)
if ((config.get(k) is None) and (v != 1.)) or (float(config.get(k)) != v):
match = False
break
elif k == 'target_start':
if isinstance(v, list):
if config.get(k, None) not in v:
match = False
break
else:
if config.get(k) is None or float(config.get(k)) != float(v):
match = False
break
elif k == 'datetime_start':
config_datetime = datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')
run_datetime = datetime.strptime(r, '%Y-%m-%dT%H:%M:%S')
if run_datetime < config_datetime:
match = False
break
elif k == 'datetime_end':
config_datetime = datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')
run_datetime = datetime.strptime(r, '%Y-%m-%dT%H:%M:%S')
if run_datetime > config_datetime:
match = False
break
elif k == 'fading_sigma':
v = float(v)
if float(config.get(k, 0.0)) != v:
match = False
break
elif k == 'particle_resample':
v = float(v)
if float(config.get(k, 0.005)) != v:
match = False
break
elif config.get(k) != v:
match = False
break
if match:
filtered_runs.append(r)
return filtered_runs
def show_results():
method_name = 'mcts' # should be 'mcts' or 'dqn'
dqn_runs = get_valid_runs('dqn')
config_filter = {'method': 'dqn'}
filtered_dqn_runs = filter_runs('dqn', config_filter)
print(filtered_dqn_runs)
print(dqn_runs)
run_name = '2021-04-21T09:46:52'
config = get_config(method_name, run_name)
data = get_data(method_name, run_name)
|
en
| 0.740521
|
# was panel_dual_axis # set strings # get configs and run data # blue green color scheme # 3 # was single_plot_var # red blue color scheme # blue green color scheme # plt caption # was single_plot_combined_cov # blue green color scheme # was single_plot_combined # red blue color scheme # blue green color scheme # median, 16, 84% # plt caption # red blue color scheme # blue green color scheme # plt caption Results file reader functions # should be 'mcts' or 'dqn'
| 2.461738
| 2
|
tethysext/atcore/controllers/rest/spatial_reference.py
|
Aquaveo/tethysext-atcore
| 3
|
6627155
|
<reponame>Aquaveo/tethysext-atcore
"""
********************************************************************************
* Name: spatial_reference.py
* Author: nswain
* Created On: May 14, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
from django.http import JsonResponse
from tethys_apps.base.controller import TethysController
from tethysext.atcore.services.spatial_reference import SpatialReferenceService
class QuerySpatialReference(TethysController):
"""
Controller for modify_resource page.
POST: Handle spatial reference queries.
"""
_app = None
_persistent_store_name = None
_SpatialReferenceService = SpatialReferenceService
def get(self, request, *args, **kwargs):
"""
Route post requests.
"""
if request.GET.get('id', False):
return self.query_srid_by_id(request)
elif request.GET.get('q', False):
return self.query_srid_by_query(request)
return JsonResponse({'error': 'BadRequest: must pass either "id" or "q" parameters.'})
def query_srid_by_id(self, request):
""""
This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database
"""
srid = request.GET.get('id', '')
_engine = self.get_engine()
srs = self._SpatialReferenceService(_engine)
dict = srs.get_spatial_reference_system_by_srid(srid)
return JsonResponse(dict)
def query_srid_by_query(self, request):
""""
This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database
"""
query_words = request.GET.get('q', '').split()
_engine = self.get_engine()
srs = self._SpatialReferenceService(_engine)
dict = srs.get_spatial_reference_system_by_query_string(query_words)
return JsonResponse(dict)
def get_engine(self):
"""
Get connection to database.
Returns:
sqlalchemy.engine: connection to database with spatial_ref_sys table.
"""
if not self._app:
raise NotImplementedError('_app not implemented for QuerySpatialReference controller.')
if not self._persistent_store_name:
raise NotImplementedError('_persistent_store_name not implemented for QuerySpatialReference controller.')
return self._app.get_persistent_store_database(self._persistent_store_name)
|
"""
********************************************************************************
* Name: spatial_reference.py
* Author: nswain
* Created On: May 14, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
from django.http import JsonResponse
from tethys_apps.base.controller import TethysController
from tethysext.atcore.services.spatial_reference import SpatialReferenceService
class QuerySpatialReference(TethysController):
"""
Controller for modify_resource page.
POST: Handle spatial reference queries.
"""
_app = None
_persistent_store_name = None
_SpatialReferenceService = SpatialReferenceService
def get(self, request, *args, **kwargs):
"""
Route post requests.
"""
if request.GET.get('id', False):
return self.query_srid_by_id(request)
elif request.GET.get('q', False):
return self.query_srid_by_query(request)
return JsonResponse({'error': 'BadRequest: must pass either "id" or "q" parameters.'})
def query_srid_by_id(self, request):
""""
This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database
"""
srid = request.GET.get('id', '')
_engine = self.get_engine()
srs = self._SpatialReferenceService(_engine)
dict = srs.get_spatial_reference_system_by_srid(srid)
return JsonResponse(dict)
def query_srid_by_query(self, request):
""""
This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database
"""
query_words = request.GET.get('q', '').split()
_engine = self.get_engine()
srs = self._SpatialReferenceService(_engine)
dict = srs.get_spatial_reference_system_by_query_string(query_words)
return JsonResponse(dict)
def get_engine(self):
"""
Get connection to database.
Returns:
sqlalchemy.engine: connection to database with spatial_ref_sys table.
"""
if not self._app:
raise NotImplementedError('_app not implemented for QuerySpatialReference controller.')
if not self._persistent_store_name:
raise NotImplementedError('_persistent_store_name not implemented for QuerySpatialReference controller.')
return self._app.get_persistent_store_database(self._persistent_store_name)
|
en
| 0.716072
|
******************************************************************************** * Name: spatial_reference.py * Author: nswain * Created On: May 14, 2018 * Copyright: (c) Aquaveo 2018 ******************************************************************************** Controller for modify_resource page. POST: Handle spatial reference queries. Route post requests. " This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database " This controller is normally called by the select2 Ajax for looking up SRIDs from the SQL database Get connection to database. Returns: sqlalchemy.engine: connection to database with spatial_ref_sys table.
| 2.108763
| 2
|
novnc/utils/websockify.old/websockify/auth_plugins.py
|
minfucui/html
| 1
|
6627156
|
class BasePlugin(object):
def __init__(self, src=None):
self.source = src
def authenticate(self, headers, target_host, target_port):
pass
class AuthenticationError(Exception):
def __init__(self, log_msg=None, response_code=403, response_headers={}, response_msg=None):
self.code = response_code
self.headers = response_headers
self.msg = response_msg
if log_msg is None:
log_msg = response_msg
super(AuthenticationError, self).__init__('%s %s' % (self.code, log_msg))
class InvalidOriginError(AuthenticationError):
def __init__(self, expected, actual):
self.expected_origin = expected
self.actual_origin = actual
super(InvalidOriginError, self).__init__(
response_msg='Invalid Origin',
log_msg="Invalid Origin Header: Expected one of "
"%s, got '%s'" % (expected, actual))
class BasicHTTPAuth(object):
"""Verifies Basic Auth headers. Specify src as username:password"""
def __init__(self, src=None):
self.src = src
def authenticate(self, headers, target_host, target_port):
import base64
auth_header = headers.get('Authorization')
if auth_header:
if not auth_header.startswith('Basic '):
self.auth_error()
try:
user_pass_raw = base64.b64decode(auth_header[6:])
except TypeError:
self.auth_error()
try:
# http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication
user_pass_as_text = user_pass_raw.decode('ISO-8859-1')
except UnicodeDecodeError:
self.auth_error()
user_pass = user_pass_as_text.split(':', 1)
if len(user_pass) != 2:
self.auth_error()
if not self.validate_creds(*user_pass):
self.demand_auth()
else:
self.demand_auth()
def validate_creds(self, username, password):
if '%s:%s' % (username, password) == self.src:
return True
else:
return False
def auth_error(self):
raise AuthenticationError(response_code=403)
def demand_auth(self):
raise AuthenticationError(response_code=401,
response_headers={'WWW-Authenticate': 'Basic realm="Websockify"'})
class ExpectOrigin(object):
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
origin = headers.get('Origin', None)
if origin is None or origin not in self.source:
raise InvalidOriginError(expected=self.source, actual=origin)
class ClientCertCNAuth(object):
"""Verifies client by SSL certificate. Specify src as whitespace separated list of common names."""
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
if headers.get('SSL_CLIENT_S_DN_CN', None) not in self.source:
raise AuthenticationError(response_code=403)
|
class BasePlugin(object):
def __init__(self, src=None):
self.source = src
def authenticate(self, headers, target_host, target_port):
pass
class AuthenticationError(Exception):
def __init__(self, log_msg=None, response_code=403, response_headers={}, response_msg=None):
self.code = response_code
self.headers = response_headers
self.msg = response_msg
if log_msg is None:
log_msg = response_msg
super(AuthenticationError, self).__init__('%s %s' % (self.code, log_msg))
class InvalidOriginError(AuthenticationError):
def __init__(self, expected, actual):
self.expected_origin = expected
self.actual_origin = actual
super(InvalidOriginError, self).__init__(
response_msg='Invalid Origin',
log_msg="Invalid Origin Header: Expected one of "
"%s, got '%s'" % (expected, actual))
class BasicHTTPAuth(object):
"""Verifies Basic Auth headers. Specify src as username:password"""
def __init__(self, src=None):
self.src = src
def authenticate(self, headers, target_host, target_port):
import base64
auth_header = headers.get('Authorization')
if auth_header:
if not auth_header.startswith('Basic '):
self.auth_error()
try:
user_pass_raw = base64.b64decode(auth_header[6:])
except TypeError:
self.auth_error()
try:
# http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication
user_pass_as_text = user_pass_raw.decode('ISO-8859-1')
except UnicodeDecodeError:
self.auth_error()
user_pass = user_pass_as_text.split(':', 1)
if len(user_pass) != 2:
self.auth_error()
if not self.validate_creds(*user_pass):
self.demand_auth()
else:
self.demand_auth()
def validate_creds(self, username, password):
if '%s:%s' % (username, password) == self.src:
return True
else:
return False
def auth_error(self):
raise AuthenticationError(response_code=403)
def demand_auth(self):
raise AuthenticationError(response_code=401,
response_headers={'WWW-Authenticate': 'Basic realm="Websockify"'})
class ExpectOrigin(object):
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
origin = headers.get('Origin', None)
if origin is None or origin not in self.source:
raise InvalidOriginError(expected=self.source, actual=origin)
class ClientCertCNAuth(object):
"""Verifies client by SSL certificate. Specify src as whitespace separated list of common names."""
def __init__(self, src=None):
if src is None:
self.source = []
else:
self.source = src.split()
def authenticate(self, headers, target_host, target_port):
if headers.get('SSL_CLIENT_S_DN_CN', None) not in self.source:
raise AuthenticationError(response_code=403)
|
en
| 0.721705
|
Verifies Basic Auth headers. Specify src as username:password # http://stackoverflow.com/questions/7242316/what-encoding-should-i-use-for-http-basic-authentication Verifies client by SSL certificate. Specify src as whitespace separated list of common names.
| 2.771929
| 3
|
functions/degiskenler.py
|
mboyr4z/Sayi_Gorsel_Siralama_Oyunu
| 9
|
6627157
|
class degisken():
def __init__(self):
self.sure=0
self.boyut=3
self.dosyaKonum=""
self.checkBoxKontrol=False
self.sureKontrol=1
self.konum=""
self.listButon=[]
self.gorsel=[]
self.hamleSayisi=0
self.turnuvaSeviye = 0
self.mod = ""
|
class degisken():
def __init__(self):
self.sure=0
self.boyut=3
self.dosyaKonum=""
self.checkBoxKontrol=False
self.sureKontrol=1
self.konum=""
self.listButon=[]
self.gorsel=[]
self.hamleSayisi=0
self.turnuvaSeviye = 0
self.mod = ""
|
none
| 1
| 2.588075
| 3
|
|
Semenenya_Vladislav_dz_8/task_8_4.py
|
neesaj/1824_GB_Python_1
| 0
|
6627158
|
<gh_stars>0
"""
Написать декоратор с аргументом-функцией (callback), позволяющий валидировать входные значения функции и выбрасывать
исключение ValueError, если что-то не так, например:
def val_checker...
...
@val_checker(lambda x: x > 0)
def calc_cube(x):
return x ** 3
# >>> a = calc_cube(5)
125
# >>> a = calc_cube(-5)
Traceback (most recent call last):
...
raise ValueError(msg)
ValueError: wrong val -5
Примечание: сможете ли вы замаскировать работу декоратора?
"""
def val_checker(func):
def wrapper():
digit = int(input('Введите число: '))
try:
print(func(digit))
except BaseException as ex:
print(f'ValueError {ex}')
return wrapper()
@val_checker
def calc_cube(x):
return x ** 3
|
"""
Написать декоратор с аргументом-функцией (callback), позволяющий валидировать входные значения функции и выбрасывать
исключение ValueError, если что-то не так, например:
def val_checker...
...
@val_checker(lambda x: x > 0)
def calc_cube(x):
return x ** 3
# >>> a = calc_cube(5)
125
# >>> a = calc_cube(-5)
Traceback (most recent call last):
...
raise ValueError(msg)
ValueError: wrong val -5
Примечание: сможете ли вы замаскировать работу декоратора?
"""
def val_checker(func):
def wrapper():
digit = int(input('Введите число: '))
try:
print(func(digit))
except BaseException as ex:
print(f'ValueError {ex}')
return wrapper()
@val_checker
def calc_cube(x):
return x ** 3
|
ru
| 0.845347
|
Написать декоратор с аргументом-функцией (callback), позволяющий валидировать входные значения функции и выбрасывать исключение ValueError, если что-то не так, например: def val_checker... ... @val_checker(lambda x: x > 0) def calc_cube(x): return x ** 3 # >>> a = calc_cube(5) 125 # >>> a = calc_cube(-5) Traceback (most recent call last): ... raise ValueError(msg) ValueError: wrong val -5 Примечание: сможете ли вы замаскировать работу декоратора?
| 4.088713
| 4
|
ttbd/ttbl/test_ttb_client.py
|
sriiora/tcf
| 0
|
6627159
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FIXME: cache the target list's per target broker into a pickled
# ${TCF_CACHE:-~/.tcf/cache}/BROKER.cache; use the cache instead of
# calling target_list(); implement cache-refresh command.
# FIXME: do a python iterator over the targets
"""
Client API for accessing *ttbd*\'s REST API
"""
import sys
import unittest
import testing
import tcfl.ttb_client
class _test_target(unittest.TestCase):
def setUp(self):
pass
@staticmethod
@unittest.expectedFailure
def test_acquire__bad_args():
tcfl.ttb_client.rest_test_target("a")
if __name__ == "__main__":
testing.logging_init(sys.argv)
unittest.main()
|
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
#
# FIXME: cache the target list's per target broker into a pickled
# ${TCF_CACHE:-~/.tcf/cache}/BROKER.cache; use the cache instead of
# calling target_list(); implement cache-refresh command.
# FIXME: do a python iterator over the targets
"""
Client API for accessing *ttbd*\'s REST API
"""
import sys
import unittest
import testing
import tcfl.ttb_client
class _test_target(unittest.TestCase):
def setUp(self):
pass
@staticmethod
@unittest.expectedFailure
def test_acquire__bad_args():
tcfl.ttb_client.rest_test_target("a")
if __name__ == "__main__":
testing.logging_init(sys.argv)
unittest.main()
|
en
| 0.538657
|
#! /usr/bin/python3 # # Copyright (c) 2017 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # # # FIXME: cache the target list's per target broker into a pickled # ${TCF_CACHE:-~/.tcf/cache}/BROKER.cache; use the cache instead of # calling target_list(); implement cache-refresh command. # FIXME: do a python iterator over the targets Client API for accessing *ttbd*\'s REST API
| 2.150354
| 2
|
old/openpose_json_old.py
|
bedssys/Bedssys
| 2
|
6627160
|
import argparse
import logging
import sys
import time
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from tf_pose.eval import write_coco_json
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image_h, image_w = image.shape[:2]
result = []
for human in humans:
item = {
'keypoints': write_coco_json(human,image_w,image_h)
}
result.append(item)
fp = open("Ta_pose.txt", 'w')
json.dump(result, fp)
fp.close()
|
import argparse
import logging
import sys
import time
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from tf_pose.eval import write_coco_json
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image_h, image_w = image.shape[:2]
result = []
for human in humans:
item = {
'keypoints': write_coco_json(human,image_w,image_h)
}
result.append(item)
fp = open("Ta_pose.txt", 'w')
json.dump(result, fp)
fp.close()
|
en
| 0.827581
|
# estimate human poses from a single image !
| 2.20812
| 2
|
celery/tests/test_task_http.py
|
gthb/celery
| 2
|
6627161
|
# -*- coding: utf-8 -*-
from __future__ import generators
import logging
import unittest2 as unittest
from urllib import addinfourl
try:
from contextlib import contextmanager
except ImportError:
from celery.tests.utils import fallback_contextmanager as contextmanager
from StringIO import StringIO
from anyjson import serialize
from celery.task import http
from celery.utils.functional import wraps
from celery.tests.utils import eager_tasks, execute_context
@contextmanager
def mock_urlopen(response_method):
import urllib2
urlopen = urllib2.urlopen
@wraps(urlopen)
def _mocked(url, *args, **kwargs):
response_data, headers = response_method(url)
return addinfourl(StringIO(response_data), headers, url)
urllib2.urlopen = _mocked
yield True
urllib2.urlopen = urlopen
def _response(res):
return lambda r: (res, [])
def success_response(value):
return _response(serialize({"status": "success", "retval": value}))
def fail_response(reason):
return _response(serialize({"status": "failure", "reason": reason}))
def unknown_response():
return _response(serialize({"status": "u.u.u.u", "retval": True}))
class TestEncodings(unittest.TestCase):
def test_utf8dict(self):
d = {u"følelser ær langé": u"ærbadægzaå寨Å",
"foobar".encode("utf-8"): "xuzzybaz".encode("utf-8")}
for key, value in http.utf8dict(d.items()).items():
self.assertIsInstance(key, str)
self.assertIsInstance(value, str)
class TestMutableURL(unittest.TestCase):
def test_url_query(self):
url = http.MutableURL("http://example.com?x=10&y=20&z=Foo")
self.assertDictContainsSubset({"x": "10",
"y": "20",
"z": "Foo"}, url.query)
url.query["name"] = "George"
url = http.MutableURL(str(url))
self.assertDictContainsSubset({"x": "10",
"y": "20",
"z": "Foo",
"name": "George"}, url.query)
def test_url_keeps_everything(self):
url = "https://e.com:808/foo/bar#zeta?x=10&y=20"
url = http.MutableURL(url)
self.assertEqual(str(url).split("?")[0],
"https://e.com:808/foo/bar#zeta")
def test___repr__(self):
url = http.MutableURL("http://e.com/foo/bar")
self.assertTrue(repr(url).startswith("<MutableURL: http://e.com"))
def test_set_query(self):
url = http.MutableURL("http://e.com/foo/bar/?x=10")
url.query = {"zzz": "xxx"}
url = http.MutableURL(str(url))
self.assertEqual(url.query, {"zzz": "xxx"})
class TestHttpDispatch(unittest.TestCase):
def test_dispatch_success(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertEqual(d.dispatch(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
def test_dispatch_failure(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.RemoteExecuteError, d.dispatch)
context = mock_urlopen(fail_response("Invalid moon alignment"))
execute_context(context, with_mock_urlopen)
def test_dispatch_empty_response(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.InvalidResponseError, d.dispatch)
context = mock_urlopen(_response(""))
execute_context(context, with_mock_urlopen)
def test_dispatch_non_json(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.InvalidResponseError, d.dispatch)
context = mock_urlopen(_response("{'#{:'''"))
execute_context(context, with_mock_urlopen)
def test_dispatch_unknown_status(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.UnknownStatusError, d.dispatch)
context = mock_urlopen(unknown_response())
execute_context(context, with_mock_urlopen)
def test_dispatch_POST(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "POST", {
"x": 10, "y": 10}, logger)
self.assertEqual(d.dispatch(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
class TestURL(unittest.TestCase):
def test_URL_get_async(self):
def with_eager_tasks(_val):
def with_mock_urlopen(_val):
d = http.URL("http://example.com/mul").get_async(x=10, y=10)
self.assertEqual(d.get(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
execute_context(eager_tasks(), with_eager_tasks)
def test_URL_post_async(self):
def with_eager_tasks(_val):
def with_mock_urlopen(_val):
d = http.URL("http://example.com/mul").post_async(x=10, y=10)
self.assertEqual(d.get(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
execute_context(eager_tasks(), with_eager_tasks)
|
# -*- coding: utf-8 -*-
from __future__ import generators
import logging
import unittest2 as unittest
from urllib import addinfourl
try:
from contextlib import contextmanager
except ImportError:
from celery.tests.utils import fallback_contextmanager as contextmanager
from StringIO import StringIO
from anyjson import serialize
from celery.task import http
from celery.utils.functional import wraps
from celery.tests.utils import eager_tasks, execute_context
@contextmanager
def mock_urlopen(response_method):
import urllib2
urlopen = urllib2.urlopen
@wraps(urlopen)
def _mocked(url, *args, **kwargs):
response_data, headers = response_method(url)
return addinfourl(StringIO(response_data), headers, url)
urllib2.urlopen = _mocked
yield True
urllib2.urlopen = urlopen
def _response(res):
return lambda r: (res, [])
def success_response(value):
return _response(serialize({"status": "success", "retval": value}))
def fail_response(reason):
return _response(serialize({"status": "failure", "reason": reason}))
def unknown_response():
return _response(serialize({"status": "u.u.u.u", "retval": True}))
class TestEncodings(unittest.TestCase):
def test_utf8dict(self):
d = {u"følelser ær langé": u"ærbadægzaå寨Å",
"foobar".encode("utf-8"): "xuzzybaz".encode("utf-8")}
for key, value in http.utf8dict(d.items()).items():
self.assertIsInstance(key, str)
self.assertIsInstance(value, str)
class TestMutableURL(unittest.TestCase):
def test_url_query(self):
url = http.MutableURL("http://example.com?x=10&y=20&z=Foo")
self.assertDictContainsSubset({"x": "10",
"y": "20",
"z": "Foo"}, url.query)
url.query["name"] = "George"
url = http.MutableURL(str(url))
self.assertDictContainsSubset({"x": "10",
"y": "20",
"z": "Foo",
"name": "George"}, url.query)
def test_url_keeps_everything(self):
url = "https://e.com:808/foo/bar#zeta?x=10&y=20"
url = http.MutableURL(url)
self.assertEqual(str(url).split("?")[0],
"https://e.com:808/foo/bar#zeta")
def test___repr__(self):
url = http.MutableURL("http://e.com/foo/bar")
self.assertTrue(repr(url).startswith("<MutableURL: http://e.com"))
def test_set_query(self):
url = http.MutableURL("http://e.com/foo/bar/?x=10")
url.query = {"zzz": "xxx"}
url = http.MutableURL(str(url))
self.assertEqual(url.query, {"zzz": "xxx"})
class TestHttpDispatch(unittest.TestCase):
def test_dispatch_success(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertEqual(d.dispatch(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
def test_dispatch_failure(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.RemoteExecuteError, d.dispatch)
context = mock_urlopen(fail_response("Invalid moon alignment"))
execute_context(context, with_mock_urlopen)
def test_dispatch_empty_response(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.InvalidResponseError, d.dispatch)
context = mock_urlopen(_response(""))
execute_context(context, with_mock_urlopen)
def test_dispatch_non_json(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.InvalidResponseError, d.dispatch)
context = mock_urlopen(_response("{'#{:'''"))
execute_context(context, with_mock_urlopen)
def test_dispatch_unknown_status(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "GET", {
"x": 10, "y": 10}, logger)
self.assertRaises(http.UnknownStatusError, d.dispatch)
context = mock_urlopen(unknown_response())
execute_context(context, with_mock_urlopen)
def test_dispatch_POST(self):
logger = logging.getLogger("celery.unittest")
def with_mock_urlopen(_val):
d = http.HttpDispatch("http://example.com/mul", "POST", {
"x": 10, "y": 10}, logger)
self.assertEqual(d.dispatch(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
class TestURL(unittest.TestCase):
def test_URL_get_async(self):
def with_eager_tasks(_val):
def with_mock_urlopen(_val):
d = http.URL("http://example.com/mul").get_async(x=10, y=10)
self.assertEqual(d.get(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
execute_context(eager_tasks(), with_eager_tasks)
def test_URL_post_async(self):
def with_eager_tasks(_val):
def with_mock_urlopen(_val):
d = http.URL("http://example.com/mul").post_async(x=10, y=10)
self.assertEqual(d.get(), 100)
context = mock_urlopen(success_response(100))
execute_context(context, with_mock_urlopen)
execute_context(eager_tasks(), with_eager_tasks)
|
en
| 0.394367
|
# -*- coding: utf-8 -*- #zeta?x=10&y=20" #zeta")
| 2.446764
| 2
|
test-allure/test_add_group.py
|
nataliasviattseva/python_training
| 0
|
6627162
|
<reponame>nataliasviattseva/python_training<filename>test-allure/test_add_group.py
import allure
from model.group import Group
def test_add_group(app, db, json_groups, check_ui):
group = json_groups
with allure.step("Given a group list"):
old_groups = db.get_groups_list()
with allure.step("When I add the group to the list"):
app.group.create_group(group)
with allure.step("Then the new group list is equal to the old list with the added group"):
new_groups = db.get_groups_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
import allure
from model.group import Group
def test_add_group(app, db, json_groups, check_ui):
group = json_groups
with allure.step("Given a group list"):
old_groups = db.get_groups_list()
with allure.step("When I add the group to the list"):
app.group.create_group(group)
with allure.step("Then the new group list is equal to the old list with the added group"):
new_groups = db.get_groups_list()
old_groups.append(group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
none
| 1
| 2.676637
| 3
|
|
data/scripts/crosswalk.py
|
caiterade/figuringthisout
| 0
|
6627163
|
import pandas as pd
import numpy as np
from Tkinter import *
from ttk import *
import os, sys
# Function to import file whether json or csv. Input: csv/json / output: df.
def import_file(file_name, geo_old):
if "json" in file_name:
data = pd.read_json(file_name, orient="index", convert_axes=0) # make sure tracts remain tracts, not dates
data[geo_old] = data.index
data.reset_index(level=0, inplace=True)
data[geo_old] = data[geo_old].convert_objects(convert_numeric = True)
data.drop("index", axis=1, inplace=True)
data.columns = map(lambda x: x.lower(), data.columns)
elif "csv" in file_name:
data = pd.read_csv(file_name)
data.columns = map(lambda x: x.lower(), data.columns)
return data
# Function to merge crosswalk and data file and multiple data by weights. Input: df / output: df.
def merge_and_weight(cross, data, geo_old, geo_new, weight, weight2):
merged = pd.merge(cross,data,on=geo_old)
columns = list(merged)
# Get single weight column, then multiply all data columns by weight
indices = [columns.index(y) for y in [geo_old, geo_new, weight, weight2]]
data_cols = [i for j, i in enumerate(columns) if j not in indices]
merged['weight_tot'] = merged[weight].multiply(merged[weight2])
for col in data_cols:
merged[col] = merged[col].convert_objects(convert_numeric = True)
merged[col] = merged[col].multiply(merged['weight_tot'])
data_cols.append(geo_new)
return merged.loc[:, data_cols]
# Function to calculate square root of sum of squares
def sqrt_sos(data, columns, geo_new):
for col in columns:
if col != geo_new:
data[col] = data[col]**2
data_agg = data.groupby([geo_new]).sum()
for col in columns:
if col != geo_new:
data_agg[col] = data_agg[col]**.5
return data_agg
# Main function launched with GUI form. Input: csv/json via user form / output: csv.
def main():
# User inputs/get data from files
data_file = os.path.join("inputs", e1.get())
cross_file = os.path.join("inputs", e2.get())
geo_old = e3.get()
geo_new = e4.get()
data = import_file(data_file, geo_old)
cross = import_file(cross_file, geo_old)
# If there's no weight column or secondary weight column defined for crosswalk, print warning and set equal to 1
if len(e5.get()) > 0:
weight = e5.get()
else:
weight = "weight"
cross[weight] = 1
print "No weight column specified, setting all weights equal to 1."
if len(e6.get()) > 0:
weight2 = e6.get()
else:
weight2 = "weight2"
cross[weight2]= 1
print "No second weight column specified, setting all secondary weights to 1."
# Merge crosswalk and data, weight data for new geography
merged = merge_and_weight(cross, data, geo_old, geo_new, weight, weight2)
# Merge data and calculate ratios for counts and MOEs
# Split data into count and margin columns, to simplify code for aggregation/calculations
columns = list(merged)
counts = [x for x in columns if "_margin" not in x]
margins = [x for x in columns if "_margin" in x]
margins.append(geo_new)
count_data = merged.loc[:, counts]
margin_data = merged.loc[:, margins]
# Sum counts by new geography, aggregate MOEs using sum of squares
count_agg = count_data.groupby([geo_new]).sum()
margin_agg = sqrt_sos(margin_data, margins, geo_new)
# Calculate ratios
bases = [r.replace("_numer","") for r in counts if "_numer" in r]
for base in bases:
numer = "{}_numer".format(base)
denom = "{}_denom".format(base)
ratio = "{}_ratio".format(base)
numer_margin = "{}_numer_margin".format(base)
denom_margin = "{}_denom_margin".format(base)
ratio_margin = "{}_ratio_margin".format(base)
try:
count_agg[ratio] = count_agg[numer] / count_agg[denom]
count_agg[np.isinf(count_agg)] = 1 # Inf set equal to 1
except:
print "{0} or {1} missing for variable {2}.".format(numer, denom, ratio)
if base in margins:
try:
# Margin for ratios uses formula from appendix for derived ratios because it is more conservative (eg yields wider margins) and doesn't yield negatives under root
# Source: https://www.census.gov/content/dam/Census/library/publications/2009/acs/ACSResearch.pdf
margin_agg[ratio_margin] = ((margin_agg[numer_margin]**2 + ((count_agg[ratio]**2)*margin_agg[denom_margin]**2))**.5)/count_agg[denom]
except:
print "{0}, {1}, {2}, {3} for variable {4}.".format(numer_margin, denom_margin, ratio, denom, ratio_margin)
count_agg.drop([numer, denom], axis=1, inplace=True) #these need to go at the end because margin calculation needs denom var
margin_agg.drop([numer_margin, denom_margin], axis=1, inplace=True)
final = pd.concat([count_agg, margin_agg], axis=1)
# Output new file to csv
name = e1.get().split(".")[0]
finalfile = "{0}_{1}.csv".format(name, geo_new)
output_file = os.path.join("outputs", finalfile)
final.to_csv(output_file)
print "Completed. Converted final data set named {}.".format(finalfile)
# Make form for user inputs
# Cannot press enter until all fields are filled
window = Tk()
window.title("Crosswalk Script")
Button(window, text="Quit", command=window.quit).grid(row=7,column=1, pady=4)
enter = Button(window, text="Enter", command=main)
enter.grid(row=7,column=0, pady=4)
enter.config(state="disabled")
def disable_button(*args):
data = stringvar1.get()
cross = stringvar2.get()
geo_old = stringvar3.get()
geo_new = stringvar4.get()
if data and cross and geo_old and geo_new:
enter.config(state="normal")
else:
enter.config(state="disabled")
Label(window, text="Path to data file").grid(row=0, column=0, sticky="we")
Label(window, text="Path to crosswalk file").grid(row=1, column=0, sticky="we")
Label(window, text="Old geography\n(geography coverting from)").grid(row=2, column=0, sticky="we")
Label(window, text="New geography\n(geography coverting to)").grid(row=3, column=0, sticky="we")
Label(window, text="Weight column\n(in crosswalk file)").grid(row=4, column=0, sticky="we")
Label(window, text="Optional second weight column\n(in crosswalk file)").grid(row=5, column=0, sticky="we")
stringvar1 = StringVar(window)
stringvar2 = StringVar(window)
stringvar3 = StringVar(window)
stringvar4 = StringVar(window)
stringvar5 = StringVar(window)
stringvar6 = StringVar(window)
stringvar1.trace("w", disable_button)
stringvar2.trace("w", disable_button)
stringvar3.trace("w", disable_button)
stringvar4.trace("w", disable_button)
e1 = Entry(window, textvariable=stringvar1)
e2 = Entry(window, textvariable=stringvar2)
e3 = Entry(window, textvariable=stringvar3)
e4 = Entry(window, textvariable=stringvar4)
e5 = Entry(window)
e6 = Entry(window)
e1.grid(row=0, column=1, sticky="we")
e2.grid(row=1, column=1, sticky="we")
e3.grid(row=2, column=1, sticky="we")
e4.grid(row=3, column=1, sticky="we")
e5.grid(row=4, column=1, sticky="we")
e6.grid(row=5, column=1, sticky="we")
window.columnconfigure(1, weight=1)
if __name__ == "__main__":
window.mainloop()
|
import pandas as pd
import numpy as np
from Tkinter import *
from ttk import *
import os, sys
# Function to import file whether json or csv. Input: csv/json / output: df.
def import_file(file_name, geo_old):
if "json" in file_name:
data = pd.read_json(file_name, orient="index", convert_axes=0) # make sure tracts remain tracts, not dates
data[geo_old] = data.index
data.reset_index(level=0, inplace=True)
data[geo_old] = data[geo_old].convert_objects(convert_numeric = True)
data.drop("index", axis=1, inplace=True)
data.columns = map(lambda x: x.lower(), data.columns)
elif "csv" in file_name:
data = pd.read_csv(file_name)
data.columns = map(lambda x: x.lower(), data.columns)
return data
# Function to merge crosswalk and data file and multiple data by weights. Input: df / output: df.
def merge_and_weight(cross, data, geo_old, geo_new, weight, weight2):
merged = pd.merge(cross,data,on=geo_old)
columns = list(merged)
# Get single weight column, then multiply all data columns by weight
indices = [columns.index(y) for y in [geo_old, geo_new, weight, weight2]]
data_cols = [i for j, i in enumerate(columns) if j not in indices]
merged['weight_tot'] = merged[weight].multiply(merged[weight2])
for col in data_cols:
merged[col] = merged[col].convert_objects(convert_numeric = True)
merged[col] = merged[col].multiply(merged['weight_tot'])
data_cols.append(geo_new)
return merged.loc[:, data_cols]
# Function to calculate square root of sum of squares
def sqrt_sos(data, columns, geo_new):
for col in columns:
if col != geo_new:
data[col] = data[col]**2
data_agg = data.groupby([geo_new]).sum()
for col in columns:
if col != geo_new:
data_agg[col] = data_agg[col]**.5
return data_agg
# Main function launched with GUI form. Input: csv/json via user form / output: csv.
def main():
# User inputs/get data from files
data_file = os.path.join("inputs", e1.get())
cross_file = os.path.join("inputs", e2.get())
geo_old = e3.get()
geo_new = e4.get()
data = import_file(data_file, geo_old)
cross = import_file(cross_file, geo_old)
# If there's no weight column or secondary weight column defined for crosswalk, print warning and set equal to 1
if len(e5.get()) > 0:
weight = e5.get()
else:
weight = "weight"
cross[weight] = 1
print "No weight column specified, setting all weights equal to 1."
if len(e6.get()) > 0:
weight2 = e6.get()
else:
weight2 = "weight2"
cross[weight2]= 1
print "No second weight column specified, setting all secondary weights to 1."
# Merge crosswalk and data, weight data for new geography
merged = merge_and_weight(cross, data, geo_old, geo_new, weight, weight2)
# Merge data and calculate ratios for counts and MOEs
# Split data into count and margin columns, to simplify code for aggregation/calculations
columns = list(merged)
counts = [x for x in columns if "_margin" not in x]
margins = [x for x in columns if "_margin" in x]
margins.append(geo_new)
count_data = merged.loc[:, counts]
margin_data = merged.loc[:, margins]
# Sum counts by new geography, aggregate MOEs using sum of squares
count_agg = count_data.groupby([geo_new]).sum()
margin_agg = sqrt_sos(margin_data, margins, geo_new)
# Calculate ratios
bases = [r.replace("_numer","") for r in counts if "_numer" in r]
for base in bases:
numer = "{}_numer".format(base)
denom = "{}_denom".format(base)
ratio = "{}_ratio".format(base)
numer_margin = "{}_numer_margin".format(base)
denom_margin = "{}_denom_margin".format(base)
ratio_margin = "{}_ratio_margin".format(base)
try:
count_agg[ratio] = count_agg[numer] / count_agg[denom]
count_agg[np.isinf(count_agg)] = 1 # Inf set equal to 1
except:
print "{0} or {1} missing for variable {2}.".format(numer, denom, ratio)
if base in margins:
try:
# Margin for ratios uses formula from appendix for derived ratios because it is more conservative (eg yields wider margins) and doesn't yield negatives under root
# Source: https://www.census.gov/content/dam/Census/library/publications/2009/acs/ACSResearch.pdf
margin_agg[ratio_margin] = ((margin_agg[numer_margin]**2 + ((count_agg[ratio]**2)*margin_agg[denom_margin]**2))**.5)/count_agg[denom]
except:
print "{0}, {1}, {2}, {3} for variable {4}.".format(numer_margin, denom_margin, ratio, denom, ratio_margin)
count_agg.drop([numer, denom], axis=1, inplace=True) #these need to go at the end because margin calculation needs denom var
margin_agg.drop([numer_margin, denom_margin], axis=1, inplace=True)
final = pd.concat([count_agg, margin_agg], axis=1)
# Output new file to csv
name = e1.get().split(".")[0]
finalfile = "{0}_{1}.csv".format(name, geo_new)
output_file = os.path.join("outputs", finalfile)
final.to_csv(output_file)
print "Completed. Converted final data set named {}.".format(finalfile)
# Make form for user inputs
# Cannot press enter until all fields are filled
window = Tk()
window.title("Crosswalk Script")
Button(window, text="Quit", command=window.quit).grid(row=7,column=1, pady=4)
enter = Button(window, text="Enter", command=main)
enter.grid(row=7,column=0, pady=4)
enter.config(state="disabled")
def disable_button(*args):
data = stringvar1.get()
cross = stringvar2.get()
geo_old = stringvar3.get()
geo_new = stringvar4.get()
if data and cross and geo_old and geo_new:
enter.config(state="normal")
else:
enter.config(state="disabled")
Label(window, text="Path to data file").grid(row=0, column=0, sticky="we")
Label(window, text="Path to crosswalk file").grid(row=1, column=0, sticky="we")
Label(window, text="Old geography\n(geography coverting from)").grid(row=2, column=0, sticky="we")
Label(window, text="New geography\n(geography coverting to)").grid(row=3, column=0, sticky="we")
Label(window, text="Weight column\n(in crosswalk file)").grid(row=4, column=0, sticky="we")
Label(window, text="Optional second weight column\n(in crosswalk file)").grid(row=5, column=0, sticky="we")
stringvar1 = StringVar(window)
stringvar2 = StringVar(window)
stringvar3 = StringVar(window)
stringvar4 = StringVar(window)
stringvar5 = StringVar(window)
stringvar6 = StringVar(window)
stringvar1.trace("w", disable_button)
stringvar2.trace("w", disable_button)
stringvar3.trace("w", disable_button)
stringvar4.trace("w", disable_button)
e1 = Entry(window, textvariable=stringvar1)
e2 = Entry(window, textvariable=stringvar2)
e3 = Entry(window, textvariable=stringvar3)
e4 = Entry(window, textvariable=stringvar4)
e5 = Entry(window)
e6 = Entry(window)
e1.grid(row=0, column=1, sticky="we")
e2.grid(row=1, column=1, sticky="we")
e3.grid(row=2, column=1, sticky="we")
e4.grid(row=3, column=1, sticky="we")
e5.grid(row=4, column=1, sticky="we")
e6.grid(row=5, column=1, sticky="we")
window.columnconfigure(1, weight=1)
if __name__ == "__main__":
window.mainloop()
|
en
| 0.852174
|
# Function to import file whether json or csv. Input: csv/json / output: df. # make sure tracts remain tracts, not dates # Function to merge crosswalk and data file and multiple data by weights. Input: df / output: df. # Get single weight column, then multiply all data columns by weight # Function to calculate square root of sum of squares # Main function launched with GUI form. Input: csv/json via user form / output: csv. # User inputs/get data from files # If there's no weight column or secondary weight column defined for crosswalk, print warning and set equal to 1 # Merge crosswalk and data, weight data for new geography # Merge data and calculate ratios for counts and MOEs # Split data into count and margin columns, to simplify code for aggregation/calculations # Sum counts by new geography, aggregate MOEs using sum of squares # Calculate ratios # Inf set equal to 1 # Margin for ratios uses formula from appendix for derived ratios because it is more conservative (eg yields wider margins) and doesn't yield negatives under root # Source: https://www.census.gov/content/dam/Census/library/publications/2009/acs/ACSResearch.pdf #these need to go at the end because margin calculation needs denom var # Output new file to csv # Make form for user inputs # Cannot press enter until all fields are filled
| 3.365435
| 3
|
dialogue-engine/test/programytest/parser/template/node_tests/richmedia_tests/test_reply.py
|
cotobadesign/cotoba-agent-oss
| 104
|
6627164
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.reply import TemplateReplyNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateReplyNodeTests(ParserTestsBaseClass):
def test_text_reply_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
reply = TemplateReplyNode()
reply._text = TemplateWordNode("SAY HELLO")
root.append(reply)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<reply><text>SAY HELLO</text></reply>", resolved)
self.assertEqual("<reply><text>SAY HELLO</text></reply>", root.to_xml(self._client_context))
def test_text_postback__replynode(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
reply = TemplateReplyNode()
reply._text = TemplateWordNode("SAY HELLO")
reply._postback = TemplateWordNode("HELLO")
root.append(reply)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<reply><text>SAY HELLO</text><postback>HELLO</postback></reply>", resolved)
self.assertEqual("<reply><text>SAY HELLO</text><postback>HELLO</postback></reply>", root.to_xml(self._client_context))
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.richmedia.reply import TemplateReplyNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class TemplateReplyNodeTests(ParserTestsBaseClass):
def test_text_reply_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
reply = TemplateReplyNode()
reply._text = TemplateWordNode("SAY HELLO")
root.append(reply)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<reply><text>SAY HELLO</text></reply>", resolved)
self.assertEqual("<reply><text>SAY HELLO</text></reply>", root.to_xml(self._client_context))
def test_text_postback__replynode(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
reply = TemplateReplyNode()
reply._text = TemplateWordNode("SAY HELLO")
reply._postback = TemplateWordNode("HELLO")
root.append(reply)
resolved = root.resolve(self._client_context)
self.assertIsNotNone(resolved)
self.assertEqual("<reply><text>SAY HELLO</text><postback>HELLO</postback></reply>", resolved)
self.assertEqual("<reply><text>SAY HELLO</text><postback>HELLO</postback></reply>", root.to_xml(self._client_context))
|
en
| 0.774213
|
Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| 1.935699
| 2
|
whatsup/image-cnv.py
|
chrislast/whatsup
| 1
|
6627165
|
<reponame>chrislast/whatsup
import argparse
from PIL import Image
def rgb_to_redwhiteblack(img_file):
"""Convert an image to a 3-color palette suitable for InkyWHAT"""
# Open the input image file
img = Image.open(img_file)
# Create a 3-color palette
pal_img = Image.new("P", (1, 1))
pal_img.putpalette((255, 255, 255, 0, 0, 0, 255, 0, 0) + (0, 0, 0) * 252)
# Convert the image to the 3 color palette
img = img.convert("RGB").quantize(palette=pal_img)
return img
if __name__ == "__main__":
# Command line arguments to set display type and colour, and enter your name
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--image', '-i', type=str, required=True, help="Input image to be converted/displayed")
PARSER.add_argument('--out', '-o', type=str, required=False, help="Output file")
ARGS = PARSER.parse_args()
IMG = rgb_to_redwhiteblack(ARGS.image)
OUT = ARGS.__dict__.get("out", "out.png")
IMG.save(OUT)
|
import argparse
from PIL import Image
def rgb_to_redwhiteblack(img_file):
"""Convert an image to a 3-color palette suitable for InkyWHAT"""
# Open the input image file
img = Image.open(img_file)
# Create a 3-color palette
pal_img = Image.new("P", (1, 1))
pal_img.putpalette((255, 255, 255, 0, 0, 0, 255, 0, 0) + (0, 0, 0) * 252)
# Convert the image to the 3 color palette
img = img.convert("RGB").quantize(palette=pal_img)
return img
if __name__ == "__main__":
# Command line arguments to set display type and colour, and enter your name
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--image', '-i', type=str, required=True, help="Input image to be converted/displayed")
PARSER.add_argument('--out', '-o', type=str, required=False, help="Output file")
ARGS = PARSER.parse_args()
IMG = rgb_to_redwhiteblack(ARGS.image)
OUT = ARGS.__dict__.get("out", "out.png")
IMG.save(OUT)
|
en
| 0.626051
|
Convert an image to a 3-color palette suitable for InkyWHAT # Open the input image file # Create a 3-color palette # Convert the image to the 3 color palette # Command line arguments to set display type and colour, and enter your name
| 3.681556
| 4
|
lib/surface/iam/workforce_pools/create_cred_config.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
| 2
|
6627166
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to create a configuration file to allow authentication from 3rd party user identities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam.byoid_utilities import cred_config
class CreateCredConfig(base.CreateCommand):
"""Create a configuration file for generated credentials.
This command creates a configuration file to allow access to authenticated
Google Cloud actions from a variety of external user accounts.
"""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To create a file-sourced credential configuration for your project, run:
$ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-file=$PATH_TO_OIDC_ID_TOKEN --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json
To create a URL-sourced credential configuration for your project, run:
$ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-url=$URL_FOR_OIDC_TOKEN --credential-source-headers=Key=Value --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json
To use the resulting file for any of these commands, set the GOOGLE_APPLICATION_CREDENTIALS environment variable to point to the generated file.
"""),
}
@staticmethod
def Args(parser):
# Required args. The audience is a positional arg, meaning it is required.
parser.add_argument(
'audience', help='The workforce pool provider resource name.')
# The credential source must be specified (file-sourced or URL-sourced).
credential_types = parser.add_group(
mutex=True, required=True, help='Credential types.')
credential_types.add_argument(
'--credential-source-file',
help='The location of the file which stores the credential.')
credential_types.add_argument(
'--credential-source-url',
help='The URL to obtain the credential from.')
parser.add_argument(
'--workforce-pool-user-project',
help='The client project number used to identify the application ' +
'(client project) to the server when calling Google APIs. The user ' +
'principal must have serviceusage.services.use IAM permission to use ' +
'the specified project.',
required=True)
parser.add_argument(
'--output-file',
help='Location to store the generated credential configuration file.',
required=True)
# Optional args.
parser.add_argument(
'--service-account',
help='The email of the service account to impersonate.')
parser.add_argument(
'--subject-token-type',
help='The type of token being used for authorization. ' +
'This defaults to urn:ietf:params:oauth:token-type:id_token.')
parser.add_argument(
'--credential-source-headers',
type=arg_parsers.ArgDict(),
metavar='key=value',
help='Headers to use when querying the credential-source-url.')
parser.add_argument(
'--credential-source-type',
help='The format of the credential source (JSON or text).')
parser.add_argument(
'--credential-source-field-name',
help='The subject token field name (key) in a JSON credential source.')
def Run(self, args):
cred_config.create_credential_config(args,
cred_config.ConfigType.WORKFORCE_POOLS)
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to create a configuration file to allow authentication from 3rd party user identities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam.byoid_utilities import cred_config
class CreateCredConfig(base.CreateCommand):
"""Create a configuration file for generated credentials.
This command creates a configuration file to allow access to authenticated
Google Cloud actions from a variety of external user accounts.
"""
detailed_help = {
'EXAMPLES':
textwrap.dedent("""\
To create a file-sourced credential configuration for your project, run:
$ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-file=$PATH_TO_OIDC_ID_TOKEN --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json
To create a URL-sourced credential configuration for your project, run:
$ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-url=$URL_FOR_OIDC_TOKEN --credential-source-headers=Key=Value --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json
To use the resulting file for any of these commands, set the GOOGLE_APPLICATION_CREDENTIALS environment variable to point to the generated file.
"""),
}
@staticmethod
def Args(parser):
# Required args. The audience is a positional arg, meaning it is required.
parser.add_argument(
'audience', help='The workforce pool provider resource name.')
# The credential source must be specified (file-sourced or URL-sourced).
credential_types = parser.add_group(
mutex=True, required=True, help='Credential types.')
credential_types.add_argument(
'--credential-source-file',
help='The location of the file which stores the credential.')
credential_types.add_argument(
'--credential-source-url',
help='The URL to obtain the credential from.')
parser.add_argument(
'--workforce-pool-user-project',
help='The client project number used to identify the application ' +
'(client project) to the server when calling Google APIs. The user ' +
'principal must have serviceusage.services.use IAM permission to use ' +
'the specified project.',
required=True)
parser.add_argument(
'--output-file',
help='Location to store the generated credential configuration file.',
required=True)
# Optional args.
parser.add_argument(
'--service-account',
help='The email of the service account to impersonate.')
parser.add_argument(
'--subject-token-type',
help='The type of token being used for authorization. ' +
'This defaults to urn:ietf:params:oauth:token-type:id_token.')
parser.add_argument(
'--credential-source-headers',
type=arg_parsers.ArgDict(),
metavar='key=value',
help='Headers to use when querying the credential-source-url.')
parser.add_argument(
'--credential-source-type',
help='The format of the credential source (JSON or text).')
parser.add_argument(
'--credential-source-field-name',
help='The subject token field name (key) in a JSON credential source.')
def Run(self, args):
cred_config.create_credential_config(args,
cred_config.ConfigType.WORKFORCE_POOLS)
|
en
| 0.764995
|
# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Command to create a configuration file to allow authentication from 3rd party user identities. Create a configuration file for generated credentials. This command creates a configuration file to allow access to authenticated Google Cloud actions from a variety of external user accounts. \ To create a file-sourced credential configuration for your project, run: $ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-file=$PATH_TO_OIDC_ID_TOKEN --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json To create a URL-sourced credential configuration for your project, run: $ {command} locations/$REGION/workforcePools/$WORKFORCE_POOL_ID/providers/$PROVIDER_ID --credential-source-url=$URL_FOR_OIDC_TOKEN --credential-source-headers=Key=Value --workforce-pool-user-project $PROJECT_NUMBER --output-file=credentials.json To use the resulting file for any of these commands, set the GOOGLE_APPLICATION_CREDENTIALS environment variable to point to the generated file. # Required args. The audience is a positional arg, meaning it is required. # The credential source must be specified (file-sourced or URL-sourced). # Optional args.
| 1.794891
| 2
|
ebcli/docker/preconfigured_container.py
|
ianblenke/awsebcli
| 4
|
6627167
|
<reponame>ianblenke/awsebcli<gh_stars>1-10
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from . import commands
from . import containerops
from . import dockerrun
from . import log
from .container import Container
from ..objects.exceptions import ValidationError
class PreconfiguredContainer(Container):
"""
Immutable class used for running Preconfigured Docker containers.
"""
def validate(self):
if self.fs_handler.dockerfile_exists:
_validate_preconfig_dockerfile(self.soln_stk,
self.container_cfg,
self.fs_handler.dockerfile_path)
dockerrun.validate_dockerrun_v1(self.fs_handler.dockerrun, False)
def _containerize(self):
self.fs_handler.copy_dockerfile(self.soln_stk, self.container_cfg)
def _get_log_volume_map(self):
log_volume_map = super(PreconfiguredContainer, self)._get_log_volume_map()
if log_volume_map: # User provided Logging in Dockerrun.aws.json
return log_volume_map
else:
host_log = log.get_host_log_path(self.fs_handler.logdir_path)
cont_log = containerops.get_runtime_default_log_path(self.soln_stk,
self.container_cfg)
return {host_log: cont_log}
def _validate_preconfig_dockerfile(soln_stk, container_config,
full_docker_path):
"""
Validates that the Dockerfile found at full_docker_path has the correct
Docker base image that matches runtime Docker image appropriate for this
solution stack. For example, the given solution stack:
64bit Debian jessie v1.2.0 running GlassFish 4.1 Java 8 (Preconfigured - Docker)
must have
glassfish-runtime-4.1-jdk8 base image in the Dockerfile.
:param soln_stk: SolutionStack: the solution stack
:param container_config: dict: container_config.json as dict
:param full_docker_path: str: path to the Dockerfile
:return: bool
"""
container_info = containerops._get_preconfig_info(soln_stk,
container_config)
expected_img = container_info[containerops.RUNTIME_IMG_KEY]
actual_img = commands._get_base_img(full_docker_path)
err_msg = ('Invalid base Docker img in Dockerfile. Expected {} but got {}'
.format(expected_img, actual_img))
if actual_img != expected_img:
raise ValidationError(err_msg)
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from . import commands
from . import containerops
from . import dockerrun
from . import log
from .container import Container
from ..objects.exceptions import ValidationError
class PreconfiguredContainer(Container):
"""
Immutable class used for running Preconfigured Docker containers.
"""
def validate(self):
if self.fs_handler.dockerfile_exists:
_validate_preconfig_dockerfile(self.soln_stk,
self.container_cfg,
self.fs_handler.dockerfile_path)
dockerrun.validate_dockerrun_v1(self.fs_handler.dockerrun, False)
def _containerize(self):
self.fs_handler.copy_dockerfile(self.soln_stk, self.container_cfg)
def _get_log_volume_map(self):
log_volume_map = super(PreconfiguredContainer, self)._get_log_volume_map()
if log_volume_map: # User provided Logging in Dockerrun.aws.json
return log_volume_map
else:
host_log = log.get_host_log_path(self.fs_handler.logdir_path)
cont_log = containerops.get_runtime_default_log_path(self.soln_stk,
self.container_cfg)
return {host_log: cont_log}
def _validate_preconfig_dockerfile(soln_stk, container_config,
full_docker_path):
"""
Validates that the Dockerfile found at full_docker_path has the correct
Docker base image that matches runtime Docker image appropriate for this
solution stack. For example, the given solution stack:
64bit Debian jessie v1.2.0 running GlassFish 4.1 Java 8 (Preconfigured - Docker)
must have
glassfish-runtime-4.1-jdk8 base image in the Dockerfile.
:param soln_stk: SolutionStack: the solution stack
:param container_config: dict: container_config.json as dict
:param full_docker_path: str: path to the Dockerfile
:return: bool
"""
container_info = containerops._get_preconfig_info(soln_stk,
container_config)
expected_img = container_info[containerops.RUNTIME_IMG_KEY]
actual_img = commands._get_base_img(full_docker_path)
err_msg = ('Invalid base Docker img in Dockerfile. Expected {} but got {}'
.format(expected_img, actual_img))
if actual_img != expected_img:
raise ValidationError(err_msg)
|
en
| 0.827182
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. Immutable class used for running Preconfigured Docker containers. # User provided Logging in Dockerrun.aws.json Validates that the Dockerfile found at full_docker_path has the correct Docker base image that matches runtime Docker image appropriate for this solution stack. For example, the given solution stack: 64bit Debian jessie v1.2.0 running GlassFish 4.1 Java 8 (Preconfigured - Docker) must have glassfish-runtime-4.1-jdk8 base image in the Dockerfile. :param soln_stk: SolutionStack: the solution stack :param container_config: dict: container_config.json as dict :param full_docker_path: str: path to the Dockerfile :return: bool
| 1.817833
| 2
|
pyannote/audio/labeling/models.py
|
nhamilakis/pyannote-audio
| 0
|
6627168
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..train.utils import get_info
from torch.nn.utils.rnn import PackedSequence
from . import TASK_MULTI_CLASS_CLASSIFICATION
from . import TASK_MULTI_LABEL_CLASSIFICATION
from . import TASK_REGRESSION
class StackedRNN(nn.Module):
"""Stacked recurrent neural network
Parameters
----------
specifications : `dict`
Provides model IO specifications using the following data structure:
{'X': {'dimension': DIMENSION},
'y': {'classes': CLASSES},
'task': TASK_TYPE}
where
* DIMENSION is the input feature dimension
* CLASSES is the list of (human-readable) output classes
* TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or
TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is
adressed, the final activation will vary. Classification relies
on log-softmax, multi-label classificatition and regression use
sigmoid.
instance_normalize : boolean, optional
Apply mean/variance normalization on input sequences.
rnn : {'LSTM', 'GRU'}, optional
Defaults to 'LSTM'.
recurrent : list, optional
List of hidden dimensions of stacked recurrent layers. Defaults to
[16, ], i.e. one recurrent layer with hidden dimension of 16.
bidirectional : bool, optional
Use bidirectional recurrent layers. Defaults to False, i.e. use
mono-directional RNNs.
pooling : {None, 'sum', 'max'}
Apply temporal pooling before linear layers. Defaults to no pooling.
This is useful for tasks expecting just one label per sequence.
linear : list, optional
List of hidden dimensions of linear layers. Defaults to [16, ], i.e.
one linear layer with hidden dimension of 16.
"""
def __init__(self, specifications, instance_normalize=False,
rnn='LSTM', recurrent=[16,], bidirectional=False,
linear=[16, ], pooling=None):
super(StackedRNN, self).__init__()
self.specifications = specifications
n_features = specifications['X']['dimension']
self.n_features_ = n_features
n_classes = len(specifications['y']['classes'])
self.n_classes_ = n_classes
task_type = specifications['task']
if task_type not in {TASK_MULTI_CLASS_CLASSIFICATION,
TASK_MULTI_LABEL_CLASSIFICATION,
TASK_REGRESSION}:
msg = (f"`task_type` must be one of {TASK_MULTI_CLASS_CLASSIFICATION}, "
f"{TASK_MULTI_LABEL_CLASSIFICATION} or {TASK_REGRESSION}.")
raise ValueError(msg)
self.task_type_ = task_type
self.instance_normalize = instance_normalize
self.rnn = rnn
self.recurrent = recurrent
self.bidirectional = bidirectional
self.pooling = pooling
self.linear = linear
self.num_directions_ = 2 if self.bidirectional else 1
# create list of recurrent layers
self.recurrent_layers_ = []
input_dim = self.n_features_
for i, hidden_dim in enumerate(self.recurrent):
if self.rnn == 'LSTM':
recurrent_layer = nn.LSTM(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
elif self.rnn == 'GRU':
recurrent_layer = nn.GRU(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
else:
raise ValueError('"rnn" must be one of {"LSTM", "GRU"}.')
self.add_module('recurrent_{0}'.format(i), recurrent_layer)
self.recurrent_layers_.append(recurrent_layer)
input_dim = hidden_dim
# create list of linear layers
self.linear_layers_ = []
for i, hidden_dim in enumerate(self.linear):
linear_layer = nn.Linear(input_dim, hidden_dim, bias=True)
self.add_module('linear_{0}'.format(i), linear_layer)
self.linear_layers_.append(linear_layer)
input_dim = hidden_dim
self.last_hidden_dim_ = input_dim
self.final_layer_ = nn.Linear(self.last_hidden_dim_, self.n_classes_)
@property
def classes(self):
return self.specifications['y']['classes']
@property
def n_classes(self):
return len(self.specifications['y']['classes'])
def forward(self, sequences):
"""
Parameters
----------
sequences : (batch_size, n_samples, n_features) `torch.tensor`
Batch of sequences.
Returns
-------
predictions : `torch.tensor`
Shape is (batch_size, n_samples, n_classes) without pooling, and
(batch_size, n_classes) with pooling.
"""
if isinstance(sequences, PackedSequence):
msg = (f'{self.__class__.__name__} does not support batches '
f'containing sequences of variable length.')
raise ValueError(msg)
batch_size, n_features, device = get_info(sequences)
if n_features != self.n_features_:
msg = 'Wrong feature dimension. Found {0}, should be {1}'
raise ValueError(msg.format(n_features, self.n_features_))
output = sequences
if self.instance_normalize:
output = output.transpose(1, 2)
output = F.instance_norm(output)
output = output.transpose(1, 2)
# stack recurrent layers
for hidden_dim, layer in zip(self.recurrent, self.recurrent_layers_):
if self.rnn == 'LSTM':
# initial hidden and cell states
h = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
c = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
hidden = (h, c)
elif self.rnn == 'GRU':
# initial hidden state
hidden = torch.zeros(
self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
# apply current recurrent layer and get output sequence
output, _ = layer(output, hidden)
# average both directions in case of bidirectional layers
if self.bidirectional:
output = .5 * (output[:, :, :hidden_dim] + \
output[:, :, hidden_dim:])
if self.pooling is not None:
if self.pooling == 'sum':
output = output.sum(dim=1)
elif self.pooling == 'max':
output, _ = output.max(dim=1)
# stack linear layers
for hidden_dim, layer in zip(self.linear, self.linear_layers_):
# apply current linear layer
output = layer(output)
# apply non-linear activation function
output = torch.tanh(output)
# apply final classification layer
output = self.final_layer_(output)
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return torch.log_softmax(output, dim=-1)
elif self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return torch.sigmoid(output)
elif self.task_type_ == TASK_REGRESSION:
return torch.sigmoid(output)
class ConvRNN(nn.Module):
"""1D convolutional network followed by a stacked recurrent neural network
Parameters
----------
specifications : `dict`
Provides model IO specifications using the following data structure:
{'X': {'dimension': DIMENSION},
'y': {'classes': CLASSES},
'task': TASK_TYPE}
where
* DIMENSION is the input feature dimension
* CLASSES is the list of (human-readable) output classes
* TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or
TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is
adressed, the final activation will vary. Classification relies
on log-softmax, multi-label classificatition and regression use
sigmoid.
norm : {'batch', 'instance}, optional
Apply instance or batch normalization after each convolutionnal layer.
rnn : {'LSTM', 'GRU'}, optional
Defaults to 'LSTM'.
recurrent : list, optional
List of hidden dimensions of stacked recurrent layers. Defaults to
[16, ], i.e. one recurrent layer with hidden dimension of 16.
bidirectional : bool, optional
Use bidirectional recurrent layers. Defaults to False, i.e. use
mono-directional RNNs.
linear : list, optional
List of hidden dimensions of linear layers. Defaults to [16, ], i.e.
one linear layer with hidden dimension of 16.
"""
def __init__(self, specifications,
norm=None, rnn='LSTM', recurrent=[16,], bidirectional=False,
linear=[16, ], conv_out=[128,], kernel_size=[32,], dropout=0.):
super(ConvRNN, self).__init__()
self.specifications = specifications
n_features = specifications['X']['dimension']
self.n_features_ = n_features
n_classes = len(specifications['y']['classes'])
self.n_classes_ = n_classes
task_type = specifications['task']
if task_type not in {TASK_MULTI_CLASS_CLASSIFICATION,
TASK_MULTI_LABEL_CLASSIFICATION,
TASK_REGRESSION}:
msg = (f"`task_type` must be one of {TASK_MULTI_CLASS_CLASSIFICATION}, "
f"{TASK_MULTI_LABEL_CLASSIFICATION} or {TASK_REGRESSION}.")
raise ValueError(msg)
self.task_type_ = task_type
self.norm = norm
self.conv_out = conv_out
self.rnn = rnn
self.recurrent = recurrent
self.bidirectional = bidirectional
self.linear = linear
self.kernel_size = kernel_size
self.num_directions_ = 2 if self.bidirectional else 1
if len(conv_out) != len(kernel_size):
raise ValueError("The convolutional output channels list must of same size than the kernel sizes list.")
if self.norm is not None and self.norm not in ["batch", "instance"]:
raise ValueError("norm parameter must be in ['batch', 'instance']")
input_dim = self.n_features_
self.conv1d_, self.norm_, self.relu_, self.max_pool_ = nn.ModuleList([]), nn.ModuleList([]), nn.ModuleList([]), nn.ModuleList([])
for i, (out_channel, kernel_size) in enumerate(zip(self.conv_out, self.kernel_size)):
conv_layer = nn.Conv1d(in_channels=1, out_channels=out_channel, kernel_size=[kernel_size, 1])
if self.norm == "batch":
self.norm_.append(nn.BatchNorm2d(out_channel))
if self.norm == "instance":
self.norm_.append(nn.InstanceNorm2d(out_channel))
relu = nn.ReLU(inplace=True)
maxpool = nn.MaxPool2d(kernel_size=[input_dim-kernel_size+1, 1])
self.conv1d_.append(conv_layer)
self.relu_.append(relu)
self.max_pool_.append(maxpool)
input_dim = out_channel
# Dropout
self.dropout = dropout
if self.dropout:
self.dropout_ = nn.Dropout(p=self.dropout)
# create list of recurrent layers
self.recurrent_layers_ = []
for i, hidden_dim in enumerate(self.recurrent):
if self.rnn == 'LSTM':
recurrent_layer = nn.LSTM(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
elif self.rnn == 'GRU':
recurrent_layer = nn.GRU(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
else:
raise ValueError('"rnn" must be one of {"LSTM", "GRU"}.')
self.add_module('recurrent_{0}'.format(i), recurrent_layer)
self.recurrent_layers_.append(recurrent_layer)
input_dim = hidden_dim
# create list of linear layers
self.linear_layers_ = []
for i, hidden_dim in enumerate(self.linear):
linear_layer = nn.Linear(input_dim, hidden_dim, bias=True)
self.add_module('linear_{0}'.format(i), linear_layer)
self.linear_layers_.append(linear_layer)
input_dim = hidden_dim
# create final classification layer
self.final_layer_ = nn.Linear(input_dim, self.n_classes_)
@property
def classes(self):
return self.specifications['y']['classes']
@property
def n_classes(self):
return len(self.specifications['y']['classes'])
def get_loss(self):
"""Return loss function (with support for class weights)
Returns
-------
loss_func : callable
Function f(input, target, weight=None) -> loss value
"""
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return F.nll_loss
if self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return F.binary_cross_entropy
if self.task_type_ == TASK_REGRESSION:
def mse_loss(input, target, weight=None):
return F.mse_loss(input, target)
return mse_loss
def forward(self, sequences):
if isinstance(sequences, PackedSequence):
msg = (f'{self.__class__.__name__} does not support batches '
f'containing sequences of variable length.')
raise ValueError(msg)
batch_size, n_features, device = get_info(sequences)
if n_features != self.n_features_:
msg = 'Wrong feature dimension. Found {0}, should be {1}'
raise ValueError(msg.format(n_features, self.n_features_))
output = sequences
if self.norm == "instance":
output = output.transpose(1, 2)
output = F.instance_norm(output)
output = output.transpose(1, 2)
# Here's N describes the batch size, T describes the time dimension
# M describes the number of mel filterbanks used for computing the spectrogram (frequency dimension)
# C describes the number of channels = 1
# Transform the (N, T, M) to (N, C, M, T)
output = output.transpose(1, 2).unsqueeze(1).contiguous()
layers = zip(self.conv1d_, self.relu_, self.max_pool_)
for i, (conv1d, relu, max_pool) in enumerate(layers):
output = conv1d(output)
if self.norm is not None:
output = self.norm_[i](output)
output = relu(output)
output = max_pool(output)
output = output.transpose(1, 2).contiguous()
# Returns to (N, T, M)
output = output.squeeze(dim=1).transpose(1,2).contiguous()
if self.dropout:
output = self.dropout_(output)
# stack recurrent layers
for hidden_dim, layer in zip(self.recurrent, self.recurrent_layers_):
if self.rnn == 'LSTM':
# initial hidden and cell states
h = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
c = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
hidden = (h, c)
elif self.rnn == 'GRU':
# initial hidden state
hidden = torch.zeros(
self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
# apply current recurrent layer and get output sequence
output, _ = layer(output, hidden)
# average both directions in case of bidirectional layers
if self.bidirectional:
output = .5 * (output[:, :, :hidden_dim] + \
output[:, :, hidden_dim:])
# stack linear layers
for hidden_dim, layer in zip(self.linear, self.linear_layers_):
# apply current linear layer
output = layer(output)
# apply non-linear activation function
output = torch.tanh(output)
# apply final classification layer
output = self.final_layer_(output)
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return torch.log_softmax(output, dim=2)
elif self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return torch.sigmoid(output)
elif self.task_type_ == TASK_REGRESSION:
return torch.sigmoid(output)
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017-2019 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..train.utils import get_info
from torch.nn.utils.rnn import PackedSequence
from . import TASK_MULTI_CLASS_CLASSIFICATION
from . import TASK_MULTI_LABEL_CLASSIFICATION
from . import TASK_REGRESSION
class StackedRNN(nn.Module):
"""Stacked recurrent neural network
Parameters
----------
specifications : `dict`
Provides model IO specifications using the following data structure:
{'X': {'dimension': DIMENSION},
'y': {'classes': CLASSES},
'task': TASK_TYPE}
where
* DIMENSION is the input feature dimension
* CLASSES is the list of (human-readable) output classes
* TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or
TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is
adressed, the final activation will vary. Classification relies
on log-softmax, multi-label classificatition and regression use
sigmoid.
instance_normalize : boolean, optional
Apply mean/variance normalization on input sequences.
rnn : {'LSTM', 'GRU'}, optional
Defaults to 'LSTM'.
recurrent : list, optional
List of hidden dimensions of stacked recurrent layers. Defaults to
[16, ], i.e. one recurrent layer with hidden dimension of 16.
bidirectional : bool, optional
Use bidirectional recurrent layers. Defaults to False, i.e. use
mono-directional RNNs.
pooling : {None, 'sum', 'max'}
Apply temporal pooling before linear layers. Defaults to no pooling.
This is useful for tasks expecting just one label per sequence.
linear : list, optional
List of hidden dimensions of linear layers. Defaults to [16, ], i.e.
one linear layer with hidden dimension of 16.
"""
def __init__(self, specifications, instance_normalize=False,
rnn='LSTM', recurrent=[16,], bidirectional=False,
linear=[16, ], pooling=None):
super(StackedRNN, self).__init__()
self.specifications = specifications
n_features = specifications['X']['dimension']
self.n_features_ = n_features
n_classes = len(specifications['y']['classes'])
self.n_classes_ = n_classes
task_type = specifications['task']
if task_type not in {TASK_MULTI_CLASS_CLASSIFICATION,
TASK_MULTI_LABEL_CLASSIFICATION,
TASK_REGRESSION}:
msg = (f"`task_type` must be one of {TASK_MULTI_CLASS_CLASSIFICATION}, "
f"{TASK_MULTI_LABEL_CLASSIFICATION} or {TASK_REGRESSION}.")
raise ValueError(msg)
self.task_type_ = task_type
self.instance_normalize = instance_normalize
self.rnn = rnn
self.recurrent = recurrent
self.bidirectional = bidirectional
self.pooling = pooling
self.linear = linear
self.num_directions_ = 2 if self.bidirectional else 1
# create list of recurrent layers
self.recurrent_layers_ = []
input_dim = self.n_features_
for i, hidden_dim in enumerate(self.recurrent):
if self.rnn == 'LSTM':
recurrent_layer = nn.LSTM(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
elif self.rnn == 'GRU':
recurrent_layer = nn.GRU(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
else:
raise ValueError('"rnn" must be one of {"LSTM", "GRU"}.')
self.add_module('recurrent_{0}'.format(i), recurrent_layer)
self.recurrent_layers_.append(recurrent_layer)
input_dim = hidden_dim
# create list of linear layers
self.linear_layers_ = []
for i, hidden_dim in enumerate(self.linear):
linear_layer = nn.Linear(input_dim, hidden_dim, bias=True)
self.add_module('linear_{0}'.format(i), linear_layer)
self.linear_layers_.append(linear_layer)
input_dim = hidden_dim
self.last_hidden_dim_ = input_dim
self.final_layer_ = nn.Linear(self.last_hidden_dim_, self.n_classes_)
@property
def classes(self):
return self.specifications['y']['classes']
@property
def n_classes(self):
return len(self.specifications['y']['classes'])
def forward(self, sequences):
"""
Parameters
----------
sequences : (batch_size, n_samples, n_features) `torch.tensor`
Batch of sequences.
Returns
-------
predictions : `torch.tensor`
Shape is (batch_size, n_samples, n_classes) without pooling, and
(batch_size, n_classes) with pooling.
"""
if isinstance(sequences, PackedSequence):
msg = (f'{self.__class__.__name__} does not support batches '
f'containing sequences of variable length.')
raise ValueError(msg)
batch_size, n_features, device = get_info(sequences)
if n_features != self.n_features_:
msg = 'Wrong feature dimension. Found {0}, should be {1}'
raise ValueError(msg.format(n_features, self.n_features_))
output = sequences
if self.instance_normalize:
output = output.transpose(1, 2)
output = F.instance_norm(output)
output = output.transpose(1, 2)
# stack recurrent layers
for hidden_dim, layer in zip(self.recurrent, self.recurrent_layers_):
if self.rnn == 'LSTM':
# initial hidden and cell states
h = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
c = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
hidden = (h, c)
elif self.rnn == 'GRU':
# initial hidden state
hidden = torch.zeros(
self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
# apply current recurrent layer and get output sequence
output, _ = layer(output, hidden)
# average both directions in case of bidirectional layers
if self.bidirectional:
output = .5 * (output[:, :, :hidden_dim] + \
output[:, :, hidden_dim:])
if self.pooling is not None:
if self.pooling == 'sum':
output = output.sum(dim=1)
elif self.pooling == 'max':
output, _ = output.max(dim=1)
# stack linear layers
for hidden_dim, layer in zip(self.linear, self.linear_layers_):
# apply current linear layer
output = layer(output)
# apply non-linear activation function
output = torch.tanh(output)
# apply final classification layer
output = self.final_layer_(output)
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return torch.log_softmax(output, dim=-1)
elif self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return torch.sigmoid(output)
elif self.task_type_ == TASK_REGRESSION:
return torch.sigmoid(output)
class ConvRNN(nn.Module):
"""1D convolutional network followed by a stacked recurrent neural network
Parameters
----------
specifications : `dict`
Provides model IO specifications using the following data structure:
{'X': {'dimension': DIMENSION},
'y': {'classes': CLASSES},
'task': TASK_TYPE}
where
* DIMENSION is the input feature dimension
* CLASSES is the list of (human-readable) output classes
* TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or
TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is
adressed, the final activation will vary. Classification relies
on log-softmax, multi-label classificatition and regression use
sigmoid.
norm : {'batch', 'instance}, optional
Apply instance or batch normalization after each convolutionnal layer.
rnn : {'LSTM', 'GRU'}, optional
Defaults to 'LSTM'.
recurrent : list, optional
List of hidden dimensions of stacked recurrent layers. Defaults to
[16, ], i.e. one recurrent layer with hidden dimension of 16.
bidirectional : bool, optional
Use bidirectional recurrent layers. Defaults to False, i.e. use
mono-directional RNNs.
linear : list, optional
List of hidden dimensions of linear layers. Defaults to [16, ], i.e.
one linear layer with hidden dimension of 16.
"""
def __init__(self, specifications,
norm=None, rnn='LSTM', recurrent=[16,], bidirectional=False,
linear=[16, ], conv_out=[128,], kernel_size=[32,], dropout=0.):
super(ConvRNN, self).__init__()
self.specifications = specifications
n_features = specifications['X']['dimension']
self.n_features_ = n_features
n_classes = len(specifications['y']['classes'])
self.n_classes_ = n_classes
task_type = specifications['task']
if task_type not in {TASK_MULTI_CLASS_CLASSIFICATION,
TASK_MULTI_LABEL_CLASSIFICATION,
TASK_REGRESSION}:
msg = (f"`task_type` must be one of {TASK_MULTI_CLASS_CLASSIFICATION}, "
f"{TASK_MULTI_LABEL_CLASSIFICATION} or {TASK_REGRESSION}.")
raise ValueError(msg)
self.task_type_ = task_type
self.norm = norm
self.conv_out = conv_out
self.rnn = rnn
self.recurrent = recurrent
self.bidirectional = bidirectional
self.linear = linear
self.kernel_size = kernel_size
self.num_directions_ = 2 if self.bidirectional else 1
if len(conv_out) != len(kernel_size):
raise ValueError("The convolutional output channels list must of same size than the kernel sizes list.")
if self.norm is not None and self.norm not in ["batch", "instance"]:
raise ValueError("norm parameter must be in ['batch', 'instance']")
input_dim = self.n_features_
self.conv1d_, self.norm_, self.relu_, self.max_pool_ = nn.ModuleList([]), nn.ModuleList([]), nn.ModuleList([]), nn.ModuleList([])
for i, (out_channel, kernel_size) in enumerate(zip(self.conv_out, self.kernel_size)):
conv_layer = nn.Conv1d(in_channels=1, out_channels=out_channel, kernel_size=[kernel_size, 1])
if self.norm == "batch":
self.norm_.append(nn.BatchNorm2d(out_channel))
if self.norm == "instance":
self.norm_.append(nn.InstanceNorm2d(out_channel))
relu = nn.ReLU(inplace=True)
maxpool = nn.MaxPool2d(kernel_size=[input_dim-kernel_size+1, 1])
self.conv1d_.append(conv_layer)
self.relu_.append(relu)
self.max_pool_.append(maxpool)
input_dim = out_channel
# Dropout
self.dropout = dropout
if self.dropout:
self.dropout_ = nn.Dropout(p=self.dropout)
# create list of recurrent layers
self.recurrent_layers_ = []
for i, hidden_dim in enumerate(self.recurrent):
if self.rnn == 'LSTM':
recurrent_layer = nn.LSTM(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
elif self.rnn == 'GRU':
recurrent_layer = nn.GRU(input_dim, hidden_dim,
bidirectional=self.bidirectional,
batch_first=True)
else:
raise ValueError('"rnn" must be one of {"LSTM", "GRU"}.')
self.add_module('recurrent_{0}'.format(i), recurrent_layer)
self.recurrent_layers_.append(recurrent_layer)
input_dim = hidden_dim
# create list of linear layers
self.linear_layers_ = []
for i, hidden_dim in enumerate(self.linear):
linear_layer = nn.Linear(input_dim, hidden_dim, bias=True)
self.add_module('linear_{0}'.format(i), linear_layer)
self.linear_layers_.append(linear_layer)
input_dim = hidden_dim
# create final classification layer
self.final_layer_ = nn.Linear(input_dim, self.n_classes_)
@property
def classes(self):
return self.specifications['y']['classes']
@property
def n_classes(self):
return len(self.specifications['y']['classes'])
def get_loss(self):
"""Return loss function (with support for class weights)
Returns
-------
loss_func : callable
Function f(input, target, weight=None) -> loss value
"""
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return F.nll_loss
if self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return F.binary_cross_entropy
if self.task_type_ == TASK_REGRESSION:
def mse_loss(input, target, weight=None):
return F.mse_loss(input, target)
return mse_loss
def forward(self, sequences):
if isinstance(sequences, PackedSequence):
msg = (f'{self.__class__.__name__} does not support batches '
f'containing sequences of variable length.')
raise ValueError(msg)
batch_size, n_features, device = get_info(sequences)
if n_features != self.n_features_:
msg = 'Wrong feature dimension. Found {0}, should be {1}'
raise ValueError(msg.format(n_features, self.n_features_))
output = sequences
if self.norm == "instance":
output = output.transpose(1, 2)
output = F.instance_norm(output)
output = output.transpose(1, 2)
# Here's N describes the batch size, T describes the time dimension
# M describes the number of mel filterbanks used for computing the spectrogram (frequency dimension)
# C describes the number of channels = 1
# Transform the (N, T, M) to (N, C, M, T)
output = output.transpose(1, 2).unsqueeze(1).contiguous()
layers = zip(self.conv1d_, self.relu_, self.max_pool_)
for i, (conv1d, relu, max_pool) in enumerate(layers):
output = conv1d(output)
if self.norm is not None:
output = self.norm_[i](output)
output = relu(output)
output = max_pool(output)
output = output.transpose(1, 2).contiguous()
# Returns to (N, T, M)
output = output.squeeze(dim=1).transpose(1,2).contiguous()
if self.dropout:
output = self.dropout_(output)
# stack recurrent layers
for hidden_dim, layer in zip(self.recurrent, self.recurrent_layers_):
if self.rnn == 'LSTM':
# initial hidden and cell states
h = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
c = torch.zeros(self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
hidden = (h, c)
elif self.rnn == 'GRU':
# initial hidden state
hidden = torch.zeros(
self.num_directions_, batch_size, hidden_dim,
device=device, requires_grad=False)
# apply current recurrent layer and get output sequence
output, _ = layer(output, hidden)
# average both directions in case of bidirectional layers
if self.bidirectional:
output = .5 * (output[:, :, :hidden_dim] + \
output[:, :, hidden_dim:])
# stack linear layers
for hidden_dim, layer in zip(self.linear, self.linear_layers_):
# apply current linear layer
output = layer(output)
# apply non-linear activation function
output = torch.tanh(output)
# apply final classification layer
output = self.final_layer_(output)
if self.task_type_ == TASK_MULTI_CLASS_CLASSIFICATION:
return torch.log_softmax(output, dim=2)
elif self.task_type_ == TASK_MULTI_LABEL_CLASSIFICATION:
return torch.sigmoid(output)
elif self.task_type_ == TASK_REGRESSION:
return torch.sigmoid(output)
|
en
| 0.682185
|
#!/usr/bin/env python # encoding: utf-8 # The MIT License (MIT) # Copyright (c) 2017-2019 CNRS # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # AUTHORS # <NAME> - http://herve.niderb.fr Stacked recurrent neural network Parameters ---------- specifications : `dict` Provides model IO specifications using the following data structure: {'X': {'dimension': DIMENSION}, 'y': {'classes': CLASSES}, 'task': TASK_TYPE} where * DIMENSION is the input feature dimension * CLASSES is the list of (human-readable) output classes * TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is adressed, the final activation will vary. Classification relies on log-softmax, multi-label classificatition and regression use sigmoid. instance_normalize : boolean, optional Apply mean/variance normalization on input sequences. rnn : {'LSTM', 'GRU'}, optional Defaults to 'LSTM'. recurrent : list, optional List of hidden dimensions of stacked recurrent layers. Defaults to [16, ], i.e. one recurrent layer with hidden dimension of 16. bidirectional : bool, optional Use bidirectional recurrent layers. Defaults to False, i.e. use mono-directional RNNs. pooling : {None, 'sum', 'max'} Apply temporal pooling before linear layers. Defaults to no pooling. This is useful for tasks expecting just one label per sequence. linear : list, optional List of hidden dimensions of linear layers. Defaults to [16, ], i.e. one linear layer with hidden dimension of 16. # create list of recurrent layers # create list of linear layers Parameters ---------- sequences : (batch_size, n_samples, n_features) `torch.tensor` Batch of sequences. Returns ------- predictions : `torch.tensor` Shape is (batch_size, n_samples, n_classes) without pooling, and (batch_size, n_classes) with pooling. # stack recurrent layers # initial hidden and cell states # initial hidden state # apply current recurrent layer and get output sequence # average both directions in case of bidirectional layers # stack linear layers # apply current linear layer # apply non-linear activation function # apply final classification layer 1D convolutional network followed by a stacked recurrent neural network Parameters ---------- specifications : `dict` Provides model IO specifications using the following data structure: {'X': {'dimension': DIMENSION}, 'y': {'classes': CLASSES}, 'task': TASK_TYPE} where * DIMENSION is the input feature dimension * CLASSES is the list of (human-readable) output classes * TASK_TYPE is either TASK_MULTI_CLASS_CLASSIFICATION, TASK_REGRESSION, or TASK_MULTI_LABEL_CLASSIFICATION. Depending on which task is adressed, the final activation will vary. Classification relies on log-softmax, multi-label classificatition and regression use sigmoid. norm : {'batch', 'instance}, optional Apply instance or batch normalization after each convolutionnal layer. rnn : {'LSTM', 'GRU'}, optional Defaults to 'LSTM'. recurrent : list, optional List of hidden dimensions of stacked recurrent layers. Defaults to [16, ], i.e. one recurrent layer with hidden dimension of 16. bidirectional : bool, optional Use bidirectional recurrent layers. Defaults to False, i.e. use mono-directional RNNs. linear : list, optional List of hidden dimensions of linear layers. Defaults to [16, ], i.e. one linear layer with hidden dimension of 16. # Dropout # create list of recurrent layers # create list of linear layers # create final classification layer Return loss function (with support for class weights) Returns ------- loss_func : callable Function f(input, target, weight=None) -> loss value # Here's N describes the batch size, T describes the time dimension # M describes the number of mel filterbanks used for computing the spectrogram (frequency dimension) # C describes the number of channels = 1 # Transform the (N, T, M) to (N, C, M, T) # Returns to (N, T, M) # stack recurrent layers # initial hidden and cell states # initial hidden state # apply current recurrent layer and get output sequence # average both directions in case of bidirectional layers # stack linear layers # apply current linear layer # apply non-linear activation function # apply final classification layer
| 1.533236
| 2
|
skit_pipelines/components/preprocess/create_features/utils.py
|
skit-ai/skit-pipelines
| 0
|
6627169
|
import json
import pydash as py_
import pandas as pd
from skit_pipelines import constants as pipeline_constants
UTTERANCES = pipeline_constants.UTTERANCES
ALTERNATIVES = pipeline_constants.ALTERNATIVES
STATE = pipeline_constants.STATE
TAG = pipeline_constants.TAG
TRAIN = pipeline_constants.TRAIN
START_TOKEN = pipeline_constants.START_TOKEN
END_TOKEN = pipeline_constants.END_TOKEN
TRANSCRIPT = pipeline_constants.TRANSCRIPT
def get_transcript(utterance):
return utterance.get(TRANSCRIPT)
def featurize_utterances(utterances):
transcripts = map(get_transcript, py_.flatten(utterances))
feature_as_str = f" {END_TOKEN} {START_TOKEN} ".join(transcripts)
return f"{START_TOKEN} {feature_as_str} {END_TOKEN}"
def featurize_state(state):
return f"{START_TOKEN} {state} {END_TOKEN}"
def row2features(use_state: bool):
def featurize(row: pd.Series):
if UTTERANCES in row:
utterances = json.loads(row[UTTERANCES])
elif ALTERNATIVES in row:
utterances = json.loads(row[ALTERNATIVES])
elif "data" in row:
data = json.loads(row.data)
data = json.loads(data) if isinstance(data, str) else data
utterances = data.get(UTTERANCES) or data.get(ALTERNATIVES)
utterances = (
json.loads(utterances) if isinstance(utterances, str) else utterances
)
else:
raise ValueError(f"No utterances found in row: {row}")
feat_utterance = featurize_utterances(utterances)
feat_state = featurize_state(data.get(STATE))
return f"{feat_utterance} {feat_state}" if use_state else f"{feat_utterance}"
return featurize
|
import json
import pydash as py_
import pandas as pd
from skit_pipelines import constants as pipeline_constants
UTTERANCES = pipeline_constants.UTTERANCES
ALTERNATIVES = pipeline_constants.ALTERNATIVES
STATE = pipeline_constants.STATE
TAG = pipeline_constants.TAG
TRAIN = pipeline_constants.TRAIN
START_TOKEN = pipeline_constants.START_TOKEN
END_TOKEN = pipeline_constants.END_TOKEN
TRANSCRIPT = pipeline_constants.TRANSCRIPT
def get_transcript(utterance):
return utterance.get(TRANSCRIPT)
def featurize_utterances(utterances):
transcripts = map(get_transcript, py_.flatten(utterances))
feature_as_str = f" {END_TOKEN} {START_TOKEN} ".join(transcripts)
return f"{START_TOKEN} {feature_as_str} {END_TOKEN}"
def featurize_state(state):
return f"{START_TOKEN} {state} {END_TOKEN}"
def row2features(use_state: bool):
def featurize(row: pd.Series):
if UTTERANCES in row:
utterances = json.loads(row[UTTERANCES])
elif ALTERNATIVES in row:
utterances = json.loads(row[ALTERNATIVES])
elif "data" in row:
data = json.loads(row.data)
data = json.loads(data) if isinstance(data, str) else data
utterances = data.get(UTTERANCES) or data.get(ALTERNATIVES)
utterances = (
json.loads(utterances) if isinstance(utterances, str) else utterances
)
else:
raise ValueError(f"No utterances found in row: {row}")
feat_utterance = featurize_utterances(utterances)
feat_state = featurize_state(data.get(STATE))
return f"{feat_utterance} {feat_state}" if use_state else f"{feat_utterance}"
return featurize
|
none
| 1
| 2.624634
| 3
|
|
tests/bugs/issue_73/issue_73_1_fast_mode_2.py
|
jmabry/pyaf
| 377
|
6627170
|
import numpy as np
import pandas as pd
df_train = pd.read_csv("https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/R_TSData/taylor.csv")
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.mOptions.mCycleLengths = None
lEngine.train(iInputDS = df_train, iTime = 'time', iSignal = 'signal', iHorizon = 36);
lEngine.getModelInfo() #
|
import numpy as np
import pandas as pd
df_train = pd.read_csv("https://raw.githubusercontent.com/antoinecarme/TimeSeriesData/master/R_TSData/taylor.csv")
import pyaf.ForecastEngine as autof
lEngine = autof.cForecastEngine()
lEngine.mOptions.mCycleLengths = None
lEngine.train(iInputDS = df_train, iTime = 'time', iSignal = 'signal', iHorizon = 36);
lEngine.getModelInfo() #
|
none
| 1
| 2.210416
| 2
|
|
Ex004.py
|
iRnx/Programas-em-Python-part-1
| 3
|
6627171
|
# Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo#
a = input('Digite algo: ')
print(f'O tipo primitivo de \033[33m{a}\033[m é: {type(a)}')
print(f'É numerico?, {a.isnumeric()}')
print(f'É AlfaNumerico?, {a.isalnum()}')
print(f'É Alfa?, {a.isalpha()}')
print(f'É Maiúsculas?, {a.isupper()}')
print(f'É Minúsculas?, {a.islower()}')
print(f'Está Capitalizada?, {a.istitle()}')
print(f'Só tem espaços?, {a.isspace()}')
print(f'É número da tabela ASCII?, {a.isascii()} ')
|
# Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo#
a = input('Digite algo: ')
print(f'O tipo primitivo de \033[33m{a}\033[m é: {type(a)}')
print(f'É numerico?, {a.isnumeric()}')
print(f'É AlfaNumerico?, {a.isalnum()}')
print(f'É Alfa?, {a.isalpha()}')
print(f'É Maiúsculas?, {a.isupper()}')
print(f'É Minúsculas?, {a.islower()}')
print(f'Está Capitalizada?, {a.istitle()}')
print(f'Só tem espaços?, {a.isspace()}')
print(f'É número da tabela ASCII?, {a.isascii()} ')
|
pt
| 0.880103
|
# Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo#
| 3.954623
| 4
|
MyBike_Day.py
|
yolygithub2020/Bike-rental
| 0
|
6627172
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 19 14:21:35 2021
@author: chrysmok
"""
# import the library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
# import sklearn.linear_model as skl_lm
from sklearn import preprocessing
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
import sklearn.linear_model as skl_lm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.metrics import mean_absolute_error
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statistics as st
from scipy import linalg
from math import sqrt
plt.style.use('seaborn-white')
plt.style.use("seaborn")
pd.set_option('max_columns', 1000)# display in spyder console up to 1000 columns
# bikeDay = pd.read_csv('day.csv', usecols=[1,2,3,4])
bikeDay = pd.read_csv('day.csv')
df = pd.read_csv('Data/Boston.csv', index_col=0)
bikeDay.head()
iris = sns.load_dataset('iris')
iris.head()
from IPython.core.display import display_html
# display(HTML(\"<style>.container { width:80% !important; }</style>\"))
bikeDay.shape
bike=bikeDay
bike.size
bike.isnull().sum()
bike.isnull().sum()
bike.info()
bike.dtypes
bike.describe()
bike.nunique()
# Learning Outcome:Except one column
bike_dup = bike.copy()
bike_dup.shape
#Create a copy of the dataframe, without the 'instant' column
bike_dummy=bike.iloc[:,1:16]
for col in bike_dummy:
print(bike_dummy[col].value_counts(ascending=False), )
bike.columns
# create categorical and then dummy variables
bike_new=bike[['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed', 'cnt']]
bike_new.info()
bike_new['season']=bike_new['season'].astype('category')
bike_new['weathersit']=bike_new['weathersit'].astype('category')
bike_new['mnth']=bike_new['mnth'].astype('category')
bike_new['weekday']=bike_new['weekday'].astype('category')
bike_new = pd.get_dummies(bike_new, drop_first= True)
bike_new.info()
bike_new.shape
##--------------------- split-holdout----------------
# We should specify 'random_state' so that the train and test data set always have the same rows, respectively
np.random.seed(0)
df_train, df_test = train_test_split(bike_new, train_size = 0.70, test_size = 0.30, random_state = 333)
df_train.columns
bike_num=df_train[[ 'temp', 'atemp', 'hum', 'windspeed','cnt']]
sns.pairplot(bike_num, diag_kind='kde')
plt.show()
# Box plot by catgories
#%% display ('ciao')
plt.figure(figsize=(25, 10))
plt.subplot(2,3,1)
sns.boxplot(x = 'season', y = 'cnt', data = bike)
plt.subplot(2,3,2)
sns.boxplot(x = 'mnth', y = 'cnt', data = bike)
plt.subplot(2,3,3)
sns.boxplot(x = 'weathersit', y = 'cnt', data = bike)
plt.subplot(2,3,4)
sns.boxplot(x = 'holiday', y = 'cnt', data = bike)
plt.subplot(2,3,5)
sns.boxplot(x = 'weekday', y = 'cnt', data = bike)
plt.subplot(2,3,6)
sns.boxplot(x = 'workingday', y = 'cnt', data = bike)
plt.show()
####
for col in bike_dummy:
print(bike_dummy[col].value_counts(ascending=False))
#bonjour
df_train.columns
#%% sns.pairplot(bike_num, diag_kind='kde')
#%% plt.show()
sns.pairplot(bike_num, diag_kind='auto')
plt.show()
#Correlation matrix
plt.figure(figsize = (25,20))
plt.figure(figsize = (35,30))
sns.heatmap(bike_new.corr(), annot = False, cmap="RdBu")
plt.show()
a=bike_new.corr();
sns.heatmap(a)
sns.heatmap(a,cmap="RdBu")
# =============================================================================
# scale
# =============================================================================
scaler = MinMaxScaler()
num_vars = ['temp', 'atemp', 'hum', 'windspeed','cnt']
df_train[num_vars]= scaler.fit_transform(df_train[num_vars])
df_test[num_vars] = scaler.transform(df_test[num_vars])
# b.head()# ne se fait pas pour les matrices
df_train.loc[num_vars]= scaler.fit_transform(df_train[num_vars])
df_test.loc[num_vars] = scaler.transform(df_test[num_vars])
df_test.loc[num_vars] = scaler.fit_transform(df_test[num_vars])
df_train.head()
bike_dummy.info()
df_train.describe()
#%% Regression
y_train = df_train.pop('cnt')
X_train = df_train
df_test2=df_test.copy()
y_test=df_test.pop('cnt')
X_test=df_test
lm = LinearRegression()
lm.fit(X_train, y_train)
model=lm.fit(X_train, y_train)
predictions = lm.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel('TrueValues')
plt.ylabel('Predictions')
print ('Score:', model.score(X_test, y_test))
print ('Score:', model.score(X_train, y_train))
accuracy = metrics.r2_score(y_test, predictions)
#%% feature selection VIF ranking
rfe = RFE(lm, 15)
rfe = rfe.fit(X_train, y_train)
list(zip(X_train.columns,rfe.support_,rfe.ranking_))
col = X_train.columns[rfe.support_]#list of selected features
col
X_train.columns[~rfe.support_]
#check VIF
vif = pd.DataFrame()
vif['Features'] = X_train_rfe.columns
vif['VIF'] = [variance_inflation_factor(X_train_rfe.values, i) for i in range(X_train_rfe.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = \"VIF\", ascending = False)
#apply rfe
X_train_rfe = X_train[col] #Creating X_test dataframe with RFE selected variables
print('vv')
X_test_rfe = X_test[col]
lm.fit(X_train_rfe, y_train)
model_rfe=lm.fit(X_train_rfe, y_train)
predictions_rfe = lm.predict(X_test_rfe)
plt.scatter(y_test, predictions_rfe)
plt.xlabel('TrueValues')
plt.ylabel('Predictions')
print ('Score:', model.score(X_test_rfe, y_test))
print ('Score:', model.score(X_train_rfe, y_train))
accuracy_rfe = metrics.r2_score(y_test, predictions_rfe)
#add a constant
X_train_lm1 = sm.add_constant(X_train_rfe)
X_test_lm1 = sm.add_constant(X_test_rfe)
#%% OLS regression
lr1 = sm.OLS(y_train, X_train_lm1).fit()
predictions_OLS= lr1.predict(X_test_lm1)
plt.scatter(y_test, predictions_OLS)
plt.xlabel('TrueValues')
plt.ylabel('PredictionsOLS')
accuracy_lr1 = metrics.r2_score(y_test, predictions_OLS)
metrics.r2_score(y_train, lr1.predict(X_train_lm1))
lr1.rsquared
lr1.rsquared_adj
r2=metrics.r2_score(y_test, predictions_OLS)
adjusted_r_squared = 1 - (1-r2)*(len(y_test)-1)/(len(y_test)-X_test_lm1.shape[1]-1)
# Residuals
res=y_train-lr1.predict(X_train_lm1)
sns.distplot((res))
# regression performance check the p-value of each variable and the global F-stat
lr1.params
print(lr1.summary())
#remvove a variable
X_train_new = X_train_rfe.drop(['atemp'], axis = 1)
X_train_lm2 = sm.add_constant(X_train_new)
X_test_new = X_test_rfe.drop(['atemp'], axis = 1)
X_test_lm2 = sm.add_constant(X_test_new)
lr2 = sm.OLS(y_train, X_train_lm2).fit()
predictions_OLS2= lr2.predict(X_test_lm2)
plt.scatter(y_test, predictions_OLS2)
plt.xlabel('TrueValues')
plt.ylabel('PredictionsOLS2')
lr2.params
print(lr2.summary())
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 19 14:21:35 2021
@author: chrysmok
"""
# import the library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
# import sklearn.linear_model as skl_lm
from sklearn import preprocessing
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
import sklearn.linear_model as skl_lm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.metrics import mean_absolute_error
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import variance_inflation_factor
import statistics as st
from scipy import linalg
from math import sqrt
plt.style.use('seaborn-white')
plt.style.use("seaborn")
pd.set_option('max_columns', 1000)# display in spyder console up to 1000 columns
# bikeDay = pd.read_csv('day.csv', usecols=[1,2,3,4])
bikeDay = pd.read_csv('day.csv')
df = pd.read_csv('Data/Boston.csv', index_col=0)
bikeDay.head()
iris = sns.load_dataset('iris')
iris.head()
from IPython.core.display import display_html
# display(HTML(\"<style>.container { width:80% !important; }</style>\"))
bikeDay.shape
bike=bikeDay
bike.size
bike.isnull().sum()
bike.isnull().sum()
bike.info()
bike.dtypes
bike.describe()
bike.nunique()
# Learning Outcome:Except one column
bike_dup = bike.copy()
bike_dup.shape
#Create a copy of the dataframe, without the 'instant' column
bike_dummy=bike.iloc[:,1:16]
for col in bike_dummy:
print(bike_dummy[col].value_counts(ascending=False), )
bike.columns
# create categorical and then dummy variables
bike_new=bike[['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit', 'temp', 'atemp', 'hum', 'windspeed', 'cnt']]
bike_new.info()
bike_new['season']=bike_new['season'].astype('category')
bike_new['weathersit']=bike_new['weathersit'].astype('category')
bike_new['mnth']=bike_new['mnth'].astype('category')
bike_new['weekday']=bike_new['weekday'].astype('category')
bike_new = pd.get_dummies(bike_new, drop_first= True)
bike_new.info()
bike_new.shape
##--------------------- split-holdout----------------
# We should specify 'random_state' so that the train and test data set always have the same rows, respectively
np.random.seed(0)
df_train, df_test = train_test_split(bike_new, train_size = 0.70, test_size = 0.30, random_state = 333)
df_train.columns
bike_num=df_train[[ 'temp', 'atemp', 'hum', 'windspeed','cnt']]
sns.pairplot(bike_num, diag_kind='kde')
plt.show()
# Box plot by catgories
#%% display ('ciao')
plt.figure(figsize=(25, 10))
plt.subplot(2,3,1)
sns.boxplot(x = 'season', y = 'cnt', data = bike)
plt.subplot(2,3,2)
sns.boxplot(x = 'mnth', y = 'cnt', data = bike)
plt.subplot(2,3,3)
sns.boxplot(x = 'weathersit', y = 'cnt', data = bike)
plt.subplot(2,3,4)
sns.boxplot(x = 'holiday', y = 'cnt', data = bike)
plt.subplot(2,3,5)
sns.boxplot(x = 'weekday', y = 'cnt', data = bike)
plt.subplot(2,3,6)
sns.boxplot(x = 'workingday', y = 'cnt', data = bike)
plt.show()
####
for col in bike_dummy:
print(bike_dummy[col].value_counts(ascending=False))
#bonjour
df_train.columns
#%% sns.pairplot(bike_num, diag_kind='kde')
#%% plt.show()
sns.pairplot(bike_num, diag_kind='auto')
plt.show()
#Correlation matrix
plt.figure(figsize = (25,20))
plt.figure(figsize = (35,30))
sns.heatmap(bike_new.corr(), annot = False, cmap="RdBu")
plt.show()
a=bike_new.corr();
sns.heatmap(a)
sns.heatmap(a,cmap="RdBu")
# =============================================================================
# scale
# =============================================================================
scaler = MinMaxScaler()
num_vars = ['temp', 'atemp', 'hum', 'windspeed','cnt']
df_train[num_vars]= scaler.fit_transform(df_train[num_vars])
df_test[num_vars] = scaler.transform(df_test[num_vars])
# b.head()# ne se fait pas pour les matrices
df_train.loc[num_vars]= scaler.fit_transform(df_train[num_vars])
df_test.loc[num_vars] = scaler.transform(df_test[num_vars])
df_test.loc[num_vars] = scaler.fit_transform(df_test[num_vars])
df_train.head()
bike_dummy.info()
df_train.describe()
#%% Regression
y_train = df_train.pop('cnt')
X_train = df_train
df_test2=df_test.copy()
y_test=df_test.pop('cnt')
X_test=df_test
lm = LinearRegression()
lm.fit(X_train, y_train)
model=lm.fit(X_train, y_train)
predictions = lm.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel('TrueValues')
plt.ylabel('Predictions')
print ('Score:', model.score(X_test, y_test))
print ('Score:', model.score(X_train, y_train))
accuracy = metrics.r2_score(y_test, predictions)
#%% feature selection VIF ranking
rfe = RFE(lm, 15)
rfe = rfe.fit(X_train, y_train)
list(zip(X_train.columns,rfe.support_,rfe.ranking_))
col = X_train.columns[rfe.support_]#list of selected features
col
X_train.columns[~rfe.support_]
#check VIF
vif = pd.DataFrame()
vif['Features'] = X_train_rfe.columns
vif['VIF'] = [variance_inflation_factor(X_train_rfe.values, i) for i in range(X_train_rfe.shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = \"VIF\", ascending = False)
#apply rfe
X_train_rfe = X_train[col] #Creating X_test dataframe with RFE selected variables
print('vv')
X_test_rfe = X_test[col]
lm.fit(X_train_rfe, y_train)
model_rfe=lm.fit(X_train_rfe, y_train)
predictions_rfe = lm.predict(X_test_rfe)
plt.scatter(y_test, predictions_rfe)
plt.xlabel('TrueValues')
plt.ylabel('Predictions')
print ('Score:', model.score(X_test_rfe, y_test))
print ('Score:', model.score(X_train_rfe, y_train))
accuracy_rfe = metrics.r2_score(y_test, predictions_rfe)
#add a constant
X_train_lm1 = sm.add_constant(X_train_rfe)
X_test_lm1 = sm.add_constant(X_test_rfe)
#%% OLS regression
lr1 = sm.OLS(y_train, X_train_lm1).fit()
predictions_OLS= lr1.predict(X_test_lm1)
plt.scatter(y_test, predictions_OLS)
plt.xlabel('TrueValues')
plt.ylabel('PredictionsOLS')
accuracy_lr1 = metrics.r2_score(y_test, predictions_OLS)
metrics.r2_score(y_train, lr1.predict(X_train_lm1))
lr1.rsquared
lr1.rsquared_adj
r2=metrics.r2_score(y_test, predictions_OLS)
adjusted_r_squared = 1 - (1-r2)*(len(y_test)-1)/(len(y_test)-X_test_lm1.shape[1]-1)
# Residuals
res=y_train-lr1.predict(X_train_lm1)
sns.distplot((res))
# regression performance check the p-value of each variable and the global F-stat
lr1.params
print(lr1.summary())
#remvove a variable
X_train_new = X_train_rfe.drop(['atemp'], axis = 1)
X_train_lm2 = sm.add_constant(X_train_new)
X_test_new = X_test_rfe.drop(['atemp'], axis = 1)
X_test_lm2 = sm.add_constant(X_test_new)
lr2 = sm.OLS(y_train, X_train_lm2).fit()
predictions_OLS2= lr2.predict(X_test_lm2)
plt.scatter(y_test, predictions_OLS2)
plt.xlabel('TrueValues')
plt.ylabel('PredictionsOLS2')
lr2.params
print(lr2.summary())
|
en
| 0.439388
|
# -*- coding: utf-8 -*- Created on Fri Nov 19 14:21:35 2021
@author: chrysmok # import the library # import sklearn.linear_model as skl_lm # display in spyder console up to 1000 columns # bikeDay = pd.read_csv('day.csv', usecols=[1,2,3,4]) # display(HTML(\"<style>.container { width:80% !important; }</style>\")) # Learning Outcome:Except one column #Create a copy of the dataframe, without the 'instant' column # create categorical and then dummy variables ##--------------------- split-holdout---------------- # We should specify 'random_state' so that the train and test data set always have the same rows, respectively # Box plot by catgories #%% display ('ciao') #### #bonjour #%% sns.pairplot(bike_num, diag_kind='kde') #%% plt.show() #Correlation matrix # ============================================================================= # scale # ============================================================================= # b.head()# ne se fait pas pour les matrices #%% Regression #%% feature selection VIF ranking #list of selected features #check VIF #apply rfe #Creating X_test dataframe with RFE selected variables #add a constant #%% OLS regression # Residuals # regression performance check the p-value of each variable and the global F-stat #remvove a variable
| 2.83935
| 3
|
build.py
|
bugy/rebuilder
| 6
|
6627173
|
#!/usr/bin/env python
from __future__ import print_function
import os.path
import common
import utils.file_utils as file_utils
import utils.mvn_utils as mvn_utils
(ROOT_PROJECT_PATH, MAVEN_REPO_PATH, MVN_OPTS, ROOT_ONLY, TRACK_UNVERSIONED, vcs_gateway) = common.parse_options()
def is_important(file_path):
return not file_path.endswith(".iml")
def get_unique_name(root_project_path):
if os.name == 'nt':
result = root_project_path.replace('\\', "_")
else:
result = root_project_path.replace('/', "_")
result = result.replace(":", "_")
return result
changed_files = vcs_gateway.get_local_changed_files(ROOT_PROJECT_PATH, not TRACK_UNVERSIONED)
important_files = filter(is_important, changed_files)
pom_paths = set([])
for file_path in important_files:
file_path = file_utils.normalize_path(file_path)
if os.path.isdir(file_path):
parent_path = file_path
else:
parent_path = os.path.dirname(file_path)
while parent_path and not (file_utils.is_root(parent_path)):
pom_path = os.path.join(parent_path, "pom.xml")
if os.path.exists(pom_path):
pom_paths.add(pom_path)
break
if parent_path == ROOT_PROJECT_PATH:
break
parent_path = os.path.dirname(parent_path)
new_in_progress = set(pom_paths)
home_folder = os.path.expanduser('~')
unique_name = get_unique_name(ROOT_PROJECT_PATH)
in_progress_file = os.path.join(home_folder, '.incremaven', unique_name)
prev_in_progress = []
if os.path.exists(in_progress_file):
prev_in_progress = file_utils.read_file(in_progress_file).split("\n")
prev_in_progress = filter(lambda line: line != "", prev_in_progress)
for pom_path in prev_in_progress:
if os.path.exists(pom_path):
pom_paths.add(pom_path)
file_utils.write_file(in_progress_file, "\n".join(pom_paths))
projects = common.to_mvn_projects(pom_paths, ROOT_PROJECT_PATH, ROOT_ONLY)
to_rebuild = []
to_install = []
for project in projects:
build_date = mvn_utils.target_build_date(project)
if build_date is None:
print(str(project) + ' needs rebuild. Artifact is missing in target')
to_rebuild.append(project)
continue
project_src_paths = mvn_utils.get_buildable_paths(project)
src_modification = file_utils.last_modification(project_src_paths)
if build_date < src_modification:
print(str(project) + ' needs rebuild. Last build update: ' + str(build_date))
to_rebuild.append(project)
else:
to_install.append(project)
print('Installing non-changed artifacts to local repository...')
for project in to_install:
mvn_utils.fast_install(project, MAVEN_REPO_PATH)
print('Rebuilding projects...')
mvn_utils.rebuild(ROOT_PROJECT_PATH, to_rebuild, MVN_OPTS)
file_utils.write_file(in_progress_file, '\n'.join(new_in_progress))
|
#!/usr/bin/env python
from __future__ import print_function
import os.path
import common
import utils.file_utils as file_utils
import utils.mvn_utils as mvn_utils
(ROOT_PROJECT_PATH, MAVEN_REPO_PATH, MVN_OPTS, ROOT_ONLY, TRACK_UNVERSIONED, vcs_gateway) = common.parse_options()
def is_important(file_path):
return not file_path.endswith(".iml")
def get_unique_name(root_project_path):
if os.name == 'nt':
result = root_project_path.replace('\\', "_")
else:
result = root_project_path.replace('/', "_")
result = result.replace(":", "_")
return result
changed_files = vcs_gateway.get_local_changed_files(ROOT_PROJECT_PATH, not TRACK_UNVERSIONED)
important_files = filter(is_important, changed_files)
pom_paths = set([])
for file_path in important_files:
file_path = file_utils.normalize_path(file_path)
if os.path.isdir(file_path):
parent_path = file_path
else:
parent_path = os.path.dirname(file_path)
while parent_path and not (file_utils.is_root(parent_path)):
pom_path = os.path.join(parent_path, "pom.xml")
if os.path.exists(pom_path):
pom_paths.add(pom_path)
break
if parent_path == ROOT_PROJECT_PATH:
break
parent_path = os.path.dirname(parent_path)
new_in_progress = set(pom_paths)
home_folder = os.path.expanduser('~')
unique_name = get_unique_name(ROOT_PROJECT_PATH)
in_progress_file = os.path.join(home_folder, '.incremaven', unique_name)
prev_in_progress = []
if os.path.exists(in_progress_file):
prev_in_progress = file_utils.read_file(in_progress_file).split("\n")
prev_in_progress = filter(lambda line: line != "", prev_in_progress)
for pom_path in prev_in_progress:
if os.path.exists(pom_path):
pom_paths.add(pom_path)
file_utils.write_file(in_progress_file, "\n".join(pom_paths))
projects = common.to_mvn_projects(pom_paths, ROOT_PROJECT_PATH, ROOT_ONLY)
to_rebuild = []
to_install = []
for project in projects:
build_date = mvn_utils.target_build_date(project)
if build_date is None:
print(str(project) + ' needs rebuild. Artifact is missing in target')
to_rebuild.append(project)
continue
project_src_paths = mvn_utils.get_buildable_paths(project)
src_modification = file_utils.last_modification(project_src_paths)
if build_date < src_modification:
print(str(project) + ' needs rebuild. Last build update: ' + str(build_date))
to_rebuild.append(project)
else:
to_install.append(project)
print('Installing non-changed artifacts to local repository...')
for project in to_install:
mvn_utils.fast_install(project, MAVEN_REPO_PATH)
print('Rebuilding projects...')
mvn_utils.rebuild(ROOT_PROJECT_PATH, to_rebuild, MVN_OPTS)
file_utils.write_file(in_progress_file, '\n'.join(new_in_progress))
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.080215
| 2
|
vk_wall_saver/settings.py
|
Nelapa/vk_wall_saver
| 0
|
6627174
|
<gh_stars>0
"""
Django settings for vk_wall_saver project.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_core',
'social_django',
'posts_saver',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vk_wall_saver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vk_wall_saver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'vk_wall_saver',
'USER': 'vk_wall_saver',
'PASSWORD': 'password',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# VK social auth settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social_core.backends.vk.VKOAuth2',
)
SOCIAL_AUTH_VK_OAUTH2_KEY = '6368970'
SOCIAL_AUTH_VK_OAUTH2_SECRET = 'zngVBUx18nSetUEULYAR'
SOCIAL_AUTH_VK_OAUTH2_SCOPE = ['wall', 'offline']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login'
LOGIN_REDIRECT_URL = '/login'
LOGOUT_REDIRECT_URL='/login'
|
"""
Django settings for vk_wall_saver project.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_core',
'social_django',
'posts_saver',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vk_wall_saver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vk_wall_saver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'vk_wall_saver',
'USER': 'vk_wall_saver',
'PASSWORD': 'password',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# VK social auth settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social_core.backends.vk.VKOAuth2',
)
SOCIAL_AUTH_VK_OAUTH2_KEY = '6368970'
SOCIAL_AUTH_VK_OAUTH2_SECRET = 'zngVBUx18nSetUEULYAR'
SOCIAL_AUTH_VK_OAUTH2_SCOPE = ['wall', 'offline']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login'
LOGIN_REDIRECT_URL = '/login'
LOGOUT_REDIRECT_URL='/login'
|
en
| 0.598257
|
Django settings for vk_wall_saver project. # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/1.11/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.11/howto/static-files/ # VK social auth settings
| 1.933911
| 2
|
data/test/python/ed4d6d2bd911aca4717c239cd04bbe341b9f3c76Alarm_checkPromiseTemplateTool.py
|
harshp8l/deep-learning-lang-detection
| 84
|
6627175
|
<filename>data/test/python/ed4d6d2bd911aca4717c239cd04bbe341b9f3c76Alarm_checkPromiseTemplateTool.py
from Products.CMFActivity.ActiveResult import ActiveResult
portal = context.getPortalObject()
promise_repository = portal.getPromiseParameter('portal_templates', 'repository')
if promise_repository is None:
return
if promise_repository:
promise_repository_list = promise_repository.split()
promise_repository_list.sort()
else:
promise_repository_list = []
repository_list = portal.portal_templates.getRepositoryList()
repository_list.sort()
active_result = ActiveResult()
if repository_list != promise_repository_list:
severity = 1
summary = "Template tool not configured as expected"
detail = '\n'.join(promise_repository_list)
else:
severity = 0
summary = "Nothing to do."
detail = ""
active_result.edit(
summary=summary,
severity=severity,
detail=detail)
context.newActiveProcess().postResult(active_result)
|
<filename>data/test/python/ed4d6d2bd911aca4717c239cd04bbe341b9f3c76Alarm_checkPromiseTemplateTool.py
from Products.CMFActivity.ActiveResult import ActiveResult
portal = context.getPortalObject()
promise_repository = portal.getPromiseParameter('portal_templates', 'repository')
if promise_repository is None:
return
if promise_repository:
promise_repository_list = promise_repository.split()
promise_repository_list.sort()
else:
promise_repository_list = []
repository_list = portal.portal_templates.getRepositoryList()
repository_list.sort()
active_result = ActiveResult()
if repository_list != promise_repository_list:
severity = 1
summary = "Template tool not configured as expected"
detail = '\n'.join(promise_repository_list)
else:
severity = 0
summary = "Nothing to do."
detail = ""
active_result.edit(
summary=summary,
severity=severity,
detail=detail)
context.newActiveProcess().postResult(active_result)
|
none
| 1
| 1.740353
| 2
|
|
tests/view_tests/views.py
|
ericholscher/django
| 1
|
6627176
|
from __future__ import unicode_literals
import os
import sys
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils._os import upath
from django.utils.log import getLogger
from . import BrokenException, except_args
dirs = (os.path.join(os.path.dirname(upath(__file__)), 'other_templates'),)
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises500(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises400(request):
raise SuspiciousOperation
def raises403(request):
raise PermissionDenied
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
def jsi18n(request):
return render_to_response('jsi18n.html')
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_content_type(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_to_response_view_with_dirs(request):
return render_to_response('render_dirs_test.html', dirs=dirs)
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def render_with_dirs(request):
return render(request, 'render_dirs_test.html', dirs=dirs)
def raises_template_does_not_exist(request, path='i_dont_exist.html'):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response(path)
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def render_no_template(request):
# If we do not specify a template, we need to make sure the debug
# view doesn't blow up.
return render(request, [], {})
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
admin_email_handler.include_html = True
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_args_function_caller(request):
try:
sensitive_args_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_args_function(sauce):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
raise Exception
def sensitive_kwargs_function_caller(request):
try:
sensitive_kwargs_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_kwargs_function(sauce=None):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
raise Exception
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class Klass(object):
@sensitive_variables('sauce')
def method(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_method_view(request):
return Klass().method(request)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def multivalue_dict_key_error(request):
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
request.POST['bar']
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
|
from __future__ import unicode_literals
import os
import sys
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils._os import upath
from django.utils.log import getLogger
from . import BrokenException, except_args
dirs = (os.path.join(os.path.dirname(upath(__file__)), 'other_templates'),)
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises500(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises400(request):
raise SuspiciousOperation
def raises403(request):
raise PermissionDenied
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
def jsi18n(request):
return render_to_response('jsi18n.html')
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_content_type(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_to_response_view_with_dirs(request):
return render_to_response('render_dirs_test.html', dirs=dirs)
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def render_with_dirs(request):
return render(request, 'render_dirs_test.html', dirs=dirs)
def raises_template_does_not_exist(request, path='i_dont_exist.html'):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response(path)
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def render_no_template(request):
# If we do not specify a template, we need to make sure the debug
# view doesn't blow up.
return render(request, [], {})
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
admin_email_handler.include_html = True
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_args_function_caller(request):
try:
sensitive_args_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_args_function(sauce):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
raise Exception
def sensitive_kwargs_function_caller(request):
try:
sensitive_kwargs_function(''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']))
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
def sensitive_kwargs_function(sauce=None):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
raise Exception
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class Klass(object):
@sensitive_variables('sauce')
def method(self, request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's
# source is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
def sensitive_method_view(request):
return Klass().method(request)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def multivalue_dict_key_error(request):
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd']) # NOQA
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e']) # NOQA
try:
request.POST['bar']
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
|
en
| 0.799564
|
Dummy index page # Make sure that a callable that raises an exception in the stack frame's # local vars won't hijack the technical 500 response. See: # http://code.djangoproject.com/ticket/15025 # We need to inspect the HTML generated by the fancy 500 debug view but # the test client ignores it, so we send it explicitly. Forces an HTTP redirect. # Some views to exercise the shortcuts # This should fail because we don't passing both a current_app and # context_instance: # We need to inspect the HTML generated by the fancy 500 debug view but # the test client ignores it, so we send it explicitly. # If we do not specify a template, we need to make sure the debug # view doesn't blow up. # The default logging config has a logging filter to ensure admin emails are # only sent with DEBUG=False, but since someone might choose to remove that # filter, we still want to be able to test the behavior of error emails # with DEBUG=True. So we need to remove the filter temporarily. # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA # NOQA # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA # NOQA # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA # NOQA # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA Ignores all the filtering done by its parent class. # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's source # is displayed in the exception report. # NOQA # NOQA # Do not just use plain strings for the variables' values in the code # so that the tests don't return false positives when the function's # source is displayed in the exception report. # NOQA # NOQA # NOQA # NOQA
| 2.012591
| 2
|
dto.py
|
ArchOJ/archoj-judged
| 0
|
6627177
|
from datetime import datetime, timedelta
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, ByteSize, Extra, Field
class DTO(BaseModel):
class Config:
allow_mutation = False
# FIXME:
# During development, forbidding extra fields makes it easier to discover programming errors.
# However, for forward compatibility, extra fields should be allowed in production so that we can upgrade
# message producers, which may introduce new fields, without shutting down running judge daemons.
extra = Extra.forbid
json_encoders = {datetime: lambda v: v.timestamp()}
json_decoders = {datetime: lambda v: datetime.fromtimestamp(v)}
class FileEntry(DTO):
path: str
content: str
class JudgeRequest(DTO):
problemId: int
submissionId: str
files: List[FileEntry]
# TODO: extra data, e.g. self-test input, self-test JUnit
class Verdict(Enum):
ACCEPTED = 'AC'
ERROR = 'ERR'
TIME_LIMIT_EXCEEDED = 'TLE'
MEMORY_LIMIT_EXCEEDED = 'MLE'
INTERNAL_ERROR = 'IE'
class JudgeStatus(Enum):
OK = 'OK'
BAD_SUBMISSION = 'BAD'
INTERNAL_ERROR = 'IE'
class JudgeStepDetails(DTO):
wallTime: timedelta
cpuTime: timedelta
memory: ByteSize
exitCode: int
exitSignal: int
class JudgeProgress(DTO):
timestamp: datetime = Field(default_factory=datetime.now)
submissionId: str
step: str
verdict: Verdict
message: Optional[str] # shown to students
log: Optional[str] # not shown to students
details: Optional[JudgeStepDetails] # if None, sth goes wrong in this step
class JudgeResult(DTO): # Final result
timestamp: datetime = Field(default_factory=datetime.now)
submissionId: str
score: float
ignored: bool
status: JudgeStatus
message: str
|
from datetime import datetime, timedelta
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, ByteSize, Extra, Field
class DTO(BaseModel):
class Config:
allow_mutation = False
# FIXME:
# During development, forbidding extra fields makes it easier to discover programming errors.
# However, for forward compatibility, extra fields should be allowed in production so that we can upgrade
# message producers, which may introduce new fields, without shutting down running judge daemons.
extra = Extra.forbid
json_encoders = {datetime: lambda v: v.timestamp()}
json_decoders = {datetime: lambda v: datetime.fromtimestamp(v)}
class FileEntry(DTO):
path: str
content: str
class JudgeRequest(DTO):
problemId: int
submissionId: str
files: List[FileEntry]
# TODO: extra data, e.g. self-test input, self-test JUnit
class Verdict(Enum):
ACCEPTED = 'AC'
ERROR = 'ERR'
TIME_LIMIT_EXCEEDED = 'TLE'
MEMORY_LIMIT_EXCEEDED = 'MLE'
INTERNAL_ERROR = 'IE'
class JudgeStatus(Enum):
OK = 'OK'
BAD_SUBMISSION = 'BAD'
INTERNAL_ERROR = 'IE'
class JudgeStepDetails(DTO):
wallTime: timedelta
cpuTime: timedelta
memory: ByteSize
exitCode: int
exitSignal: int
class JudgeProgress(DTO):
timestamp: datetime = Field(default_factory=datetime.now)
submissionId: str
step: str
verdict: Verdict
message: Optional[str] # shown to students
log: Optional[str] # not shown to students
details: Optional[JudgeStepDetails] # if None, sth goes wrong in this step
class JudgeResult(DTO): # Final result
timestamp: datetime = Field(default_factory=datetime.now)
submissionId: str
score: float
ignored: bool
status: JudgeStatus
message: str
|
en
| 0.874774
|
# FIXME: # During development, forbidding extra fields makes it easier to discover programming errors. # However, for forward compatibility, extra fields should be allowed in production so that we can upgrade # message producers, which may introduce new fields, without shutting down running judge daemons. # TODO: extra data, e.g. self-test input, self-test JUnit # shown to students # not shown to students # if None, sth goes wrong in this step # Final result
| 2.54496
| 3
|
video_downloader/main.py
|
GussSoares/video-downloader
| 0
|
6627178
|
<reponame>GussSoares/video-downloader
"""entry point main"""
|
"""entry point main"""
|
en
| 0.704381
|
entry point main
| 0.89617
| 1
|
app/wqFull/CQtype/clickMap.py
|
fkwai/geolearn
| 0
|
6627179
|
from mpl_toolkits import basemap
import pandas as pd
from hydroDL.data import dbBasin, gageII, usgs
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
from hydroDL.app.waterQuality import WRTDS
import statsmodels.api as sm
import scipy
from hydroDL.app.waterQuality import cqType
import importlib
import time
# load data
dataName = 'G200'
DF = dbBasin.DataFrameBasin(dataName)
siteNoLst = DF.siteNoLst
codeLst = DF.varC
ns = len(siteNoLst)
nc = len(codeLst)
# load pars
filePar = os.path.join(kPath.dirWQ, 'modelStat', 'typeCQ', dataName+'.npz')
npz = np.load(filePar)
matA = npz['matA']
matB = npz['matB']
matP = npz['matP']
# get types
importlib.reload(axplot)
importlib.reload(cqType)
tp = cqType.par2type(matB, matP)
# plot map
code = '00955'
indC = codeLst.index(code)
tpC = tp[:, indC]
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
fig, ax = plt.subplots(1, 1)
vLst, cLst, mLst, labLst = cqType.getPlotArg()
axplot.mapPointClass(ax, lat, lon, tp[:, indC], vLst=vLst, mLst=mLst,
cLst=cLst, labLst=labLst)
fig.show()
# CQ plot
indS = np.where(tpC == 4)[0][10]
fig, ax = plt.subplots(1, 1)
Q = DF.q[:, indS, 1]
C = DF.c[:, indS, indC]
a = matA[indS, indC, :]
b = matB[indS, indC, :]
p = matP[indS, indC, :]
cqType.plotCQ(ax, Q, C, a, b, p)
fig.show()
# tsmap
codePlot = ['00915', '00955']
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
vLst, cLst, mLst, labLst = cqType.getPlotArg()
def funcM():
nM = len(codePlot)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
for k, code in enumerate(codePlot):
indC = codeLst.index(code)
axplot.mapPointClass(axM[k], lat, lon, tp[:, indC],
vLst=vLst, mLst=mLst, cLst=cLst, labLst=labLst)
title = '{} {}'.format(usgs.codePdf.loc[code]['shortName'], code)
axM[k].set_title(title)
figP, axP = plt.subplots(nM, 1, figsize=(8, 6))
axP = np.array([axP]) if nM == 1 else axP
return figM, axM, figP, axP, lon, lat
def funcP(iP, axP):
siteNo = siteNoLst[iP]
for k, code in enumerate(codePlot):
indC = codeLst.index(code)
Q = DF.q[:, iP, 1]
C = DF.c[:, iP, indC]
a = matA[iP, indC, :]
b = matB[iP, indC, :]
p = matP[iP, indC, :]
cqType.plotCQ(axP[k], Q, C, a, b, p)
title = '{} {} {}'.format(
siteNo, usgs.codePdf.loc[code]['shortName'], code)
axP[k].set_title(title)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcM, funcP)
|
from mpl_toolkits import basemap
import pandas as pd
from hydroDL.data import dbBasin, gageII, usgs
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
from hydroDL.app.waterQuality import WRTDS
import statsmodels.api as sm
import scipy
from hydroDL.app.waterQuality import cqType
import importlib
import time
# load data
dataName = 'G200'
DF = dbBasin.DataFrameBasin(dataName)
siteNoLst = DF.siteNoLst
codeLst = DF.varC
ns = len(siteNoLst)
nc = len(codeLst)
# load pars
filePar = os.path.join(kPath.dirWQ, 'modelStat', 'typeCQ', dataName+'.npz')
npz = np.load(filePar)
matA = npz['matA']
matB = npz['matB']
matP = npz['matP']
# get types
importlib.reload(axplot)
importlib.reload(cqType)
tp = cqType.par2type(matB, matP)
# plot map
code = '00955'
indC = codeLst.index(code)
tpC = tp[:, indC]
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
fig, ax = plt.subplots(1, 1)
vLst, cLst, mLst, labLst = cqType.getPlotArg()
axplot.mapPointClass(ax, lat, lon, tp[:, indC], vLst=vLst, mLst=mLst,
cLst=cLst, labLst=labLst)
fig.show()
# CQ plot
indS = np.where(tpC == 4)[0][10]
fig, ax = plt.subplots(1, 1)
Q = DF.q[:, indS, 1]
C = DF.c[:, indS, indC]
a = matA[indS, indC, :]
b = matB[indS, indC, :]
p = matP[indS, indC, :]
cqType.plotCQ(ax, Q, C, a, b, p)
fig.show()
# tsmap
codePlot = ['00915', '00955']
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
vLst, cLst, mLst, labLst = cqType.getPlotArg()
def funcM():
nM = len(codePlot)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
for k, code in enumerate(codePlot):
indC = codeLst.index(code)
axplot.mapPointClass(axM[k], lat, lon, tp[:, indC],
vLst=vLst, mLst=mLst, cLst=cLst, labLst=labLst)
title = '{} {}'.format(usgs.codePdf.loc[code]['shortName'], code)
axM[k].set_title(title)
figP, axP = plt.subplots(nM, 1, figsize=(8, 6))
axP = np.array([axP]) if nM == 1 else axP
return figM, axM, figP, axP, lon, lat
def funcP(iP, axP):
siteNo = siteNoLst[iP]
for k, code in enumerate(codePlot):
indC = codeLst.index(code)
Q = DF.q[:, iP, 1]
C = DF.c[:, iP, indC]
a = matA[iP, indC, :]
b = matB[iP, indC, :]
p = matP[iP, indC, :]
cqType.plotCQ(axP[k], Q, C, a, b, p)
title = '{} {} {}'.format(
siteNo, usgs.codePdf.loc[code]['shortName'], code)
axP[k].set_title(title)
importlib.reload(figplot)
figM, figP = figplot.clickMap(funcM, funcP)
|
en
| 0.500535
|
# load data # load pars # get types # plot map # CQ plot # tsmap
| 2.135469
| 2
|
memeStream/serializers.py
|
Pratyush1606/xmeme
| 3
|
6627180
|
from rest_framework import serializers
from memeStream.models import Meme
class MemeSerializer(serializers.ModelSerializer):
# overriding the model fields with new having some styling parameters like placeholder
url = serializers.URLField(style={
"placeholder": "Meme Url"
})
name = serializers.CharField(style={
"placeholder": "Owner name"
})
caption = serializers.CharField(style={
"placeholder": "Caption"
})
# defining the meta class for setting the model and fields for the serialiser
class Meta:
model = Meme
fields = ["id", "name", "url", "caption"]
|
from rest_framework import serializers
from memeStream.models import Meme
class MemeSerializer(serializers.ModelSerializer):
# overriding the model fields with new having some styling parameters like placeholder
url = serializers.URLField(style={
"placeholder": "Meme Url"
})
name = serializers.CharField(style={
"placeholder": "Owner name"
})
caption = serializers.CharField(style={
"placeholder": "Caption"
})
# defining the meta class for setting the model and fields for the serialiser
class Meta:
model = Meme
fields = ["id", "name", "url", "caption"]
|
en
| 0.761006
|
# overriding the model fields with new having some styling parameters like placeholder # defining the meta class for setting the model and fields for the serialiser
| 2.577643
| 3
|
ibkr_app.py
|
farukuslu/Connecting-Dash-to-an-Execution-System-on-Interactive-Brokers-App
| 1
|
6627181
|
from ib_insync import *
from os import listdir, remove
from time import sleep
import pickle
import pandas as pd
from helper_functions import *
# Define your variables here ###########################################################################################
sampling_rate = 5 # How often, in seconds, to check for inputs from Dash?
# For TWS Paper account, default port is 7497
# For IBG Paper account, default port is 4002
port = 7497
# choose your master id. Mine is 10645. You can use whatever you want, just set it in API Settings within TWS or IBG.
master_client_id = 10645
# choose your dedicated id just for orders. I picked 1111.
orders_client_id = 1111
client_id = 3
# account number: you'll need to fill in yourself. The below is one of my paper trader account numbers.
acc_number = 'DU3527359'
########################################################################################################################
# Run your helper function to clear out any io files left over from old runs
check_for_and_del_io_files('currency_pair_history.csv')
# Create an IB app; i.e., an instance of the IB() class from the ib_insync package
ib = IB()
# Connect your app to a running instance of IBG or TWS
ib.connect(host='127.0.0.1', port=port, clientId=client_id)
# Make sure you're connected -- stay in this while loop until ib.isConnected() is True.
while not ib.isConnected():
sleep(.01)
# If connected, script proceeds and prints a success message.
print('Connection Successful!')
# Main while loop of the app. Stay in this loop until the app is stopped by the user.
while True:
# If the app finds a file named 'currency_pair.txt' in the current directory, enter this code block.
if 'currency_pair.txt' in listdir():
# Code goes here...
currency_pair_connection = open('currency_pair.txt', 'r')
info_from_currency_pair_txt = currency_pair_connection.read()
currency_pair_connection.close()
check_for_and_del_io_files('currency_pair.txt')
#Use the value (which is a string defining a currency pair, e.g., "AUDCAD")
#to create a contract object of type Forex (from the ib_insync module)
contract = Forex(info_from_currency_pair_txt)
# Note that here, if you wanted to make inputs for endDateTime, durationStr, barSizeSetting, etc within the Dash
# app, then you could save a dictionary as a pickle and import it here like we do below for the order.
bars = ib.reqHistoricalData(
contract, # <<- pass in your contract object here
endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True
)
#convert bars to data frame
#print(bars)
# currency_pair_data = pd.DataFrame(bars)
currency_pair_data = util.df(bars)
# Save a CSV file
currency_pair_data.to_csv('currency_pair_history.csv')
# Code goes here...
# pass -- not return -- because this function doesn't return a value. It's called for what it does. In computer
# science, we say that it's called for its 'side effects'.
pass
# If there's a file named trade_order.p in listdir(), then enter the loop below.
if 'trade_order.p' in listdir():
trd_order = pickle.load(open("trade_order.p", "rb"))
print(trd_order)
# Create a special instance of IB() JUST for entering orders.
# The reason for this is because the way that Interactive Brokers automatically provides valid order IDs to
# ib_insync is not trustworthy enough to really rely on. It's best practice to set aside a dedicated client ID
# to ONLY be used for submitting orders, and close the connection when the order is successfully submitted.
# your code goes here
ib_orders = IB()
ib_orders.connect(host='127.0.0.1', port=port, clientId=orders_client_id, account=acc_number)
# "action": action,
# "trade_currency": trade_currency,
# "trade_amt": trade_amt
contract = Forex(trd_order['trade_currency'])
order = MarketOrder(trd_order['action'], trd_order['trade_amt'],)
new_order = ib.placeOrder(contract, order)
ib.sleep(1)
new_order.log
# The new_order object returned by the call to ib_orders.placeOrder() that you've written is an object of class
# `trade` that is kept continually updated by the `ib_insync` machinery. It's a market order; as such, it will
# be filled immediately.
# In this while loop, we wait for confirmation that new_order filled.
while not new_order.orderStatus.status == 'Filled':
ib_orders.sleep(0) # we use ib_orders.sleep(0) from the ib_insync module because the async socket connection
# is not built for the normal time.sleep() function.
ib_orders.disconnect()
# your code goes here
check_for_and_del_io_files('trade_order.p')
# pass: same reason as above.
pass
# sleep, for the while loop.
ib.sleep(sampling_rate)
|
from ib_insync import *
from os import listdir, remove
from time import sleep
import pickle
import pandas as pd
from helper_functions import *
# Define your variables here ###########################################################################################
sampling_rate = 5 # How often, in seconds, to check for inputs from Dash?
# For TWS Paper account, default port is 7497
# For IBG Paper account, default port is 4002
port = 7497
# choose your master id. Mine is 10645. You can use whatever you want, just set it in API Settings within TWS or IBG.
master_client_id = 10645
# choose your dedicated id just for orders. I picked 1111.
orders_client_id = 1111
client_id = 3
# account number: you'll need to fill in yourself. The below is one of my paper trader account numbers.
acc_number = 'DU3527359'
########################################################################################################################
# Run your helper function to clear out any io files left over from old runs
check_for_and_del_io_files('currency_pair_history.csv')
# Create an IB app; i.e., an instance of the IB() class from the ib_insync package
ib = IB()
# Connect your app to a running instance of IBG or TWS
ib.connect(host='127.0.0.1', port=port, clientId=client_id)
# Make sure you're connected -- stay in this while loop until ib.isConnected() is True.
while not ib.isConnected():
sleep(.01)
# If connected, script proceeds and prints a success message.
print('Connection Successful!')
# Main while loop of the app. Stay in this loop until the app is stopped by the user.
while True:
# If the app finds a file named 'currency_pair.txt' in the current directory, enter this code block.
if 'currency_pair.txt' in listdir():
# Code goes here...
currency_pair_connection = open('currency_pair.txt', 'r')
info_from_currency_pair_txt = currency_pair_connection.read()
currency_pair_connection.close()
check_for_and_del_io_files('currency_pair.txt')
#Use the value (which is a string defining a currency pair, e.g., "AUDCAD")
#to create a contract object of type Forex (from the ib_insync module)
contract = Forex(info_from_currency_pair_txt)
# Note that here, if you wanted to make inputs for endDateTime, durationStr, barSizeSetting, etc within the Dash
# app, then you could save a dictionary as a pickle and import it here like we do below for the order.
bars = ib.reqHistoricalData(
contract, # <<- pass in your contract object here
endDateTime='', durationStr='30 D', barSizeSetting='1 hour', whatToShow='MIDPOINT', useRTH=True
)
#convert bars to data frame
#print(bars)
# currency_pair_data = pd.DataFrame(bars)
currency_pair_data = util.df(bars)
# Save a CSV file
currency_pair_data.to_csv('currency_pair_history.csv')
# Code goes here...
# pass -- not return -- because this function doesn't return a value. It's called for what it does. In computer
# science, we say that it's called for its 'side effects'.
pass
# If there's a file named trade_order.p in listdir(), then enter the loop below.
if 'trade_order.p' in listdir():
trd_order = pickle.load(open("trade_order.p", "rb"))
print(trd_order)
# Create a special instance of IB() JUST for entering orders.
# The reason for this is because the way that Interactive Brokers automatically provides valid order IDs to
# ib_insync is not trustworthy enough to really rely on. It's best practice to set aside a dedicated client ID
# to ONLY be used for submitting orders, and close the connection when the order is successfully submitted.
# your code goes here
ib_orders = IB()
ib_orders.connect(host='127.0.0.1', port=port, clientId=orders_client_id, account=acc_number)
# "action": action,
# "trade_currency": trade_currency,
# "trade_amt": trade_amt
contract = Forex(trd_order['trade_currency'])
order = MarketOrder(trd_order['action'], trd_order['trade_amt'],)
new_order = ib.placeOrder(contract, order)
ib.sleep(1)
new_order.log
# The new_order object returned by the call to ib_orders.placeOrder() that you've written is an object of class
# `trade` that is kept continually updated by the `ib_insync` machinery. It's a market order; as such, it will
# be filled immediately.
# In this while loop, we wait for confirmation that new_order filled.
while not new_order.orderStatus.status == 'Filled':
ib_orders.sleep(0) # we use ib_orders.sleep(0) from the ib_insync module because the async socket connection
# is not built for the normal time.sleep() function.
ib_orders.disconnect()
# your code goes here
check_for_and_del_io_files('trade_order.p')
# pass: same reason as above.
pass
# sleep, for the while loop.
ib.sleep(sampling_rate)
|
en
| 0.819538
|
# Define your variables here ########################################################################################### # How often, in seconds, to check for inputs from Dash? # For TWS Paper account, default port is 7497 # For IBG Paper account, default port is 4002 # choose your master id. Mine is 10645. You can use whatever you want, just set it in API Settings within TWS or IBG. # choose your dedicated id just for orders. I picked 1111. # account number: you'll need to fill in yourself. The below is one of my paper trader account numbers. ######################################################################################################################## # Run your helper function to clear out any io files left over from old runs # Create an IB app; i.e., an instance of the IB() class from the ib_insync package # Connect your app to a running instance of IBG or TWS # Make sure you're connected -- stay in this while loop until ib.isConnected() is True. # If connected, script proceeds and prints a success message. # Main while loop of the app. Stay in this loop until the app is stopped by the user. # If the app finds a file named 'currency_pair.txt' in the current directory, enter this code block. # Code goes here... #Use the value (which is a string defining a currency pair, e.g., "AUDCAD") #to create a contract object of type Forex (from the ib_insync module) # Note that here, if you wanted to make inputs for endDateTime, durationStr, barSizeSetting, etc within the Dash # app, then you could save a dictionary as a pickle and import it here like we do below for the order. # <<- pass in your contract object here #convert bars to data frame #print(bars) # currency_pair_data = pd.DataFrame(bars) # Save a CSV file # Code goes here... # pass -- not return -- because this function doesn't return a value. It's called for what it does. In computer # science, we say that it's called for its 'side effects'. # If there's a file named trade_order.p in listdir(), then enter the loop below. # Create a special instance of IB() JUST for entering orders. # The reason for this is because the way that Interactive Brokers automatically provides valid order IDs to # ib_insync is not trustworthy enough to really rely on. It's best practice to set aside a dedicated client ID # to ONLY be used for submitting orders, and close the connection when the order is successfully submitted. # your code goes here # "action": action, # "trade_currency": trade_currency, # "trade_amt": trade_amt # The new_order object returned by the call to ib_orders.placeOrder() that you've written is an object of class # `trade` that is kept continually updated by the `ib_insync` machinery. It's a market order; as such, it will # be filled immediately. # In this while loop, we wait for confirmation that new_order filled. # we use ib_orders.sleep(0) from the ib_insync module because the async socket connection # is not built for the normal time.sleep() function. # your code goes here # pass: same reason as above. # sleep, for the while loop.
| 2.415274
| 2
|
function_and_module_funcs/get_modules_complete_list.py
|
SecTraversl/Toolbox_Python_3.8
| 0
|
6627182
|
# %%
#######################################
def get_modules_complete_list():
"""Returns a complete list of available modules.
References:
https://stackoverflow.com/questions/37752054/how-can-i-list-all-packages-modules-available-to-python-from-within-a-python-scr
Returns:
list: Returns a list of available module names
"""
import sys
from pydoc import ModuleScanner
import warnings
original_sys_path_tuple = tuple(sys.path)
def scan_modules():
"""Scans for available modules using pydoc.ModuleScanner, taken from help('modules')"""
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == ".__init__":
modname = modname[:-9] + " (package)"
if modname.find(".") < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore warnings from importing deprecated modules
ModuleScanner().run(callback, onerror=onerror)
return modules
all_modules = sorted(scan_modules().keys())
del sys.path
sys.path = list(original_sys_path_tuple)
return all_modules
|
# %%
#######################################
def get_modules_complete_list():
"""Returns a complete list of available modules.
References:
https://stackoverflow.com/questions/37752054/how-can-i-list-all-packages-modules-available-to-python-from-within-a-python-scr
Returns:
list: Returns a list of available module names
"""
import sys
from pydoc import ModuleScanner
import warnings
original_sys_path_tuple = tuple(sys.path)
def scan_modules():
"""Scans for available modules using pydoc.ModuleScanner, taken from help('modules')"""
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == ".__init__":
modname = modname[:-9] + " (package)"
if modname.find(".") < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
with warnings.catch_warnings():
warnings.simplefilter("ignore") # ignore warnings from importing deprecated modules
ModuleScanner().run(callback, onerror=onerror)
return modules
all_modules = sorted(scan_modules().keys())
del sys.path
sys.path = list(original_sys_path_tuple)
return all_modules
|
en
| 0.354585
|
# %% ####################################### Returns a complete list of available modules. References: https://stackoverflow.com/questions/37752054/how-can-i-list-all-packages-modules-available-to-python-from-within-a-python-scr Returns: list: Returns a list of available module names Scans for available modules using pydoc.ModuleScanner, taken from help('modules') # ignore warnings from importing deprecated modules
| 3.029232
| 3
|
src/hybrid.py
|
nassim-yagoub/fquad-exploration
| 0
|
6627183
|
import argparse
from tqdm import tqdm
import camembert
import okapi_bm25
class HybridSelector:
def __init__(self, cuda = False, model_size = 'base'):
self.okapi = okapi_bm25.OkapiBM25()
self.camembert = camembert.CamembertSelector(cuda, model_size)
def topk_predictions(self, dataset_type, prediction_nb, okapi_number = 20):
if dataset_type == "train":
questions_o = self.okapi.train_q
questions_c = self.camembert.train_q
answers = self.okapi.train_a
id_to_ans = self.okapi.train_id_to_ans
else:
questions_o = self.okapi.valid_q
questions_c = self.camembert.valid_q
answers = self.okapi.valid_a
id_to_ans = self.okapi.valid_id_to_ans
iterator = tqdm(range(len(questions_o)))
correct = 0
for i in iterator:
question_o = questions_o[i]
question_c = questions_c[i]
best_fits = self.okapi.get_best_fits(question_o, okapi_number, dataset_type)
good_ids = [best_fits[i][0] for i in range(okapi_number)]
best_ids = self.camembert.choose_best_contexts(
question_c,
dataset_type,
prediction_nb,
limited_context = good_ids
)
if id_to_ans[tuple(answers[i])] in best_ids:
correct += 1
iterator.set_description("The right context is found {: .3f}% of the time".format(100*correct/(i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default='precision')
parser.add_argument("--dataset", type=str, default='train')
parser.add_argument("--number", type=int, default=5)
parser.add_argument("--question", type=str)
args = parser.parse_args()
selector = HybridSelector()
selector.topk_predictions(args.dataset, args.number)
|
import argparse
from tqdm import tqdm
import camembert
import okapi_bm25
class HybridSelector:
def __init__(self, cuda = False, model_size = 'base'):
self.okapi = okapi_bm25.OkapiBM25()
self.camembert = camembert.CamembertSelector(cuda, model_size)
def topk_predictions(self, dataset_type, prediction_nb, okapi_number = 20):
if dataset_type == "train":
questions_o = self.okapi.train_q
questions_c = self.camembert.train_q
answers = self.okapi.train_a
id_to_ans = self.okapi.train_id_to_ans
else:
questions_o = self.okapi.valid_q
questions_c = self.camembert.valid_q
answers = self.okapi.valid_a
id_to_ans = self.okapi.valid_id_to_ans
iterator = tqdm(range(len(questions_o)))
correct = 0
for i in iterator:
question_o = questions_o[i]
question_c = questions_c[i]
best_fits = self.okapi.get_best_fits(question_o, okapi_number, dataset_type)
good_ids = [best_fits[i][0] for i in range(okapi_number)]
best_ids = self.camembert.choose_best_contexts(
question_c,
dataset_type,
prediction_nb,
limited_context = good_ids
)
if id_to_ans[tuple(answers[i])] in best_ids:
correct += 1
iterator.set_description("The right context is found {: .3f}% of the time".format(100*correct/(i+1)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default='precision')
parser.add_argument("--dataset", type=str, default='train')
parser.add_argument("--number", type=int, default=5)
parser.add_argument("--question", type=str)
args = parser.parse_args()
selector = HybridSelector()
selector.topk_predictions(args.dataset, args.number)
|
none
| 1
| 2.344308
| 2
|
|
lisa/analysis/cpus.py
|
msrasmussen/lisa
| 159
|
6627184
|
<gh_stars>100-1000
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" CPUs Analysis Module """
import pandas as pd
import holoviews as hv
from lisa.analysis.base import TraceAnalysisBase
from lisa.trace import requires_events, CPU
from lisa.datautils import df_window
class CpusAnalysis(TraceAnalysisBase):
"""
Support for CPUs signals analysis
"""
name = 'cpus'
###############################################################################
# DataFrame Getter Methods
###############################################################################
@TraceAnalysisBase.cache
@requires_events('sched_switch')
def df_context_switches(self):
"""
Compute number of context switches on each CPU.
:returns: A :class:`pandas.DataFrame` with:
* A ``context_switch_cnt`` column (the number of context switch per CPU)
"""
# Since we want to count the number of context switches, we don't want
# all tasks to appear
sched_df = self.trace.df_event('sched_switch', signals_init=False)
# Make sure to only get the switches inside the window
sched_df = df_window(
sched_df,
method='exclusive',
window=self.trace.window,
clip_window=False,
)
cpus = list(range(self.trace.cpus_count))
ctx_sw_df = pd.DataFrame(
[len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus],
index=cpus,
columns=['context_switch_cnt']
)
ctx_sw_df.index.name = 'cpu'
return ctx_sw_df
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method
@df_context_switches.used_events
def plot_context_switches(self):
"""
Plot histogram of context switches on each CPU.
"""
ctx_sw_df = self.df_context_switches()
return hv.Bars(
ctx_sw_df["context_switch_cnt"]
).options(
title='Per-CPU Task Context Switches',
xlabel='CPU',
ylabel='Number of context switches',
invert_axes=True,
)
@TraceAnalysisBase.plot_method
def plot_orig_capacity(self, cpu: CPU):
"""
Plot the orig capacity of a CPU onto a given axis
:param cpu: The CPU
:type cpu: int
"""
orig_capacities = self.trace.plat_info['cpu-capacities']['orig']
return hv.HLine(
orig_capacities[cpu],
label='orig capacity'
).options(
backend='matplotlib',
linestyle='--',
).options(
backend='bokeh',
line_dash='dashed',
)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" CPUs Analysis Module """
import pandas as pd
import holoviews as hv
from lisa.analysis.base import TraceAnalysisBase
from lisa.trace import requires_events, CPU
from lisa.datautils import df_window
class CpusAnalysis(TraceAnalysisBase):
"""
Support for CPUs signals analysis
"""
name = 'cpus'
###############################################################################
# DataFrame Getter Methods
###############################################################################
@TraceAnalysisBase.cache
@requires_events('sched_switch')
def df_context_switches(self):
"""
Compute number of context switches on each CPU.
:returns: A :class:`pandas.DataFrame` with:
* A ``context_switch_cnt`` column (the number of context switch per CPU)
"""
# Since we want to count the number of context switches, we don't want
# all tasks to appear
sched_df = self.trace.df_event('sched_switch', signals_init=False)
# Make sure to only get the switches inside the window
sched_df = df_window(
sched_df,
method='exclusive',
window=self.trace.window,
clip_window=False,
)
cpus = list(range(self.trace.cpus_count))
ctx_sw_df = pd.DataFrame(
[len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus],
index=cpus,
columns=['context_switch_cnt']
)
ctx_sw_df.index.name = 'cpu'
return ctx_sw_df
###############################################################################
# Plotting Methods
###############################################################################
@TraceAnalysisBase.plot_method
@df_context_switches.used_events
def plot_context_switches(self):
"""
Plot histogram of context switches on each CPU.
"""
ctx_sw_df = self.df_context_switches()
return hv.Bars(
ctx_sw_df["context_switch_cnt"]
).options(
title='Per-CPU Task Context Switches',
xlabel='CPU',
ylabel='Number of context switches',
invert_axes=True,
)
@TraceAnalysisBase.plot_method
def plot_orig_capacity(self, cpu: CPU):
"""
Plot the orig capacity of a CPU onto a given axis
:param cpu: The CPU
:type cpu: int
"""
orig_capacities = self.trace.plat_info['cpu-capacities']['orig']
return hv.HLine(
orig_capacities[cpu],
label='orig capacity'
).options(
backend='matplotlib',
linestyle='--',
).options(
backend='bokeh',
line_dash='dashed',
)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
en
| 0.516775
|
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # CPUs Analysis Module Support for CPUs signals analysis ############################################################################### # DataFrame Getter Methods ############################################################################### Compute number of context switches on each CPU. :returns: A :class:`pandas.DataFrame` with: * A ``context_switch_cnt`` column (the number of context switch per CPU) # Since we want to count the number of context switches, we don't want # all tasks to appear # Make sure to only get the switches inside the window ############################################################################### # Plotting Methods ############################################################################### Plot histogram of context switches on each CPU. Plot the orig capacity of a CPU onto a given axis :param cpu: The CPU :type cpu: int # vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| 1.956374
| 2
|
PythonVirtEnv/Lib/site-packages/prompt_toolkit/key_binding/bindings/mouse.py
|
zuhorski/EPL_Project
| 5
|
6627185
|
from prompt_toolkit.data_structures import Point
from prompt_toolkit.key_binding.key_processor import KeyPress, KeyPressEvent
from prompt_toolkit.keys import Keys
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from prompt_toolkit.utils import is_windows
from ..key_bindings import KeyBindings
__all__ = [
"load_mouse_bindings",
]
E = KeyPressEvent
def load_mouse_bindings() -> KeyBindings:
"""
Key bindings, required for mouse support.
(Mouse events enter through the key binding system.)
"""
key_bindings = KeyBindings()
@key_bindings.add(Keys.Vt100MouseEvent)
def _(event: E) -> None:
"""
Handling of incoming mouse event.
"""
# TypicaL: "eSC[MaB*"
# Urxvt: "Esc[96;14;13M"
# Xterm SGR: "Esc[<64;85;12M"
# Parse incoming packet.
if event.data[2] == "M":
# Typical.
mouse_event, x, y = map(ord, event.data[3:])
mouse_event_type = {
32: MouseEventType.MOUSE_DOWN,
35: MouseEventType.MOUSE_UP,
96: MouseEventType.SCROLL_UP,
97: MouseEventType.SCROLL_DOWN,
}.get(mouse_event)
# Handle situations where `PosixStdinReader` used surrogateescapes.
if x >= 0xDC00:
x -= 0xDC00
if y >= 0xDC00:
y -= 0xDC00
x -= 32
y -= 32
else:
# Urxvt and Xterm SGR.
# When the '<' is not present, we are not using the Xterm SGR mode,
# but Urxvt instead.
data = event.data[2:]
if data[:1] == "<":
sgr = True
data = data[1:]
else:
sgr = False
# Extract coordinates.
mouse_event, x, y = map(int, data[:-1].split(";"))
m = data[-1]
# Parse event type.
if sgr:
mouse_event_type = {
(0, "M"): MouseEventType.MOUSE_DOWN,
(0, "m"): MouseEventType.MOUSE_UP,
(64, "M"): MouseEventType.SCROLL_UP,
(65, "M"): MouseEventType.SCROLL_DOWN,
}.get((mouse_event, m))
else:
mouse_event_type = {
32: MouseEventType.MOUSE_DOWN,
35: MouseEventType.MOUSE_UP,
96: MouseEventType.SCROLL_UP,
97: MouseEventType.SCROLL_DOWN,
}.get(mouse_event)
x -= 1
y -= 1
# Only handle mouse events when we know the window height.
if event.app.renderer.height_is_known and mouse_event_type is not None:
# Take region above the layout into account. The reported
# coordinates are absolute to the visible part of the terminal.
from prompt_toolkit.renderer import HeightIsUnknownError
try:
y -= event.app.renderer.rows_above_layout
except HeightIsUnknownError:
return
# Call the mouse handler from the renderer.
handler = event.app.renderer.mouse_handlers.mouse_handlers[y][x]
handler(MouseEvent(position=Point(x=x, y=y), event_type=mouse_event_type))
@key_bindings.add(Keys.ScrollUp)
def _scroll_up(event: E) -> None:
"""
Scroll up event without cursor position.
"""
# We don't receive a cursor position, so we don't know which window to
# scroll. Just send an 'up' key press instead.
event.key_processor.feed(KeyPress(Keys.Up), first=True)
@key_bindings.add(Keys.ScrollDown)
def _scroll_down(event: E) -> None:
"""
Scroll down event without cursor position.
"""
event.key_processor.feed(KeyPress(Keys.Down), first=True)
@key_bindings.add(Keys.WindowsMouseEvent)
def _mouse(event: E) -> None:
"""
Handling of mouse events for Windows.
"""
assert is_windows() # This key binding should only exist for Windows.
# Parse data.
pieces = event.data.split(";")
event_type = MouseEventType(pieces[0])
x = int(pieces[1])
y = int(pieces[2])
# Make coordinates absolute to the visible part of the terminal.
output = event.app.renderer.output
from prompt_toolkit.output.win32 import Win32Output
from prompt_toolkit.output.windows10 import Windows10_Output
if isinstance(output, (Win32Output, Windows10_Output)):
screen_buffer_info = output.get_win32_screen_buffer_info()
rows_above_cursor = (
screen_buffer_info.dwCursorPosition.Y - event.app.renderer._cursor_pos.y
)
y -= rows_above_cursor
# Call the mouse event handler.
handler = event.app.renderer.mouse_handlers.mouse_handlers[y][x]
handler(MouseEvent(position=Point(x=x, y=y), event_type=event_type))
return key_bindings
|
from prompt_toolkit.data_structures import Point
from prompt_toolkit.key_binding.key_processor import KeyPress, KeyPressEvent
from prompt_toolkit.keys import Keys
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from prompt_toolkit.utils import is_windows
from ..key_bindings import KeyBindings
__all__ = [
"load_mouse_bindings",
]
E = KeyPressEvent
def load_mouse_bindings() -> KeyBindings:
"""
Key bindings, required for mouse support.
(Mouse events enter through the key binding system.)
"""
key_bindings = KeyBindings()
@key_bindings.add(Keys.Vt100MouseEvent)
def _(event: E) -> None:
"""
Handling of incoming mouse event.
"""
# TypicaL: "eSC[MaB*"
# Urxvt: "Esc[96;14;13M"
# Xterm SGR: "Esc[<64;85;12M"
# Parse incoming packet.
if event.data[2] == "M":
# Typical.
mouse_event, x, y = map(ord, event.data[3:])
mouse_event_type = {
32: MouseEventType.MOUSE_DOWN,
35: MouseEventType.MOUSE_UP,
96: MouseEventType.SCROLL_UP,
97: MouseEventType.SCROLL_DOWN,
}.get(mouse_event)
# Handle situations where `PosixStdinReader` used surrogateescapes.
if x >= 0xDC00:
x -= 0xDC00
if y >= 0xDC00:
y -= 0xDC00
x -= 32
y -= 32
else:
# Urxvt and Xterm SGR.
# When the '<' is not present, we are not using the Xterm SGR mode,
# but Urxvt instead.
data = event.data[2:]
if data[:1] == "<":
sgr = True
data = data[1:]
else:
sgr = False
# Extract coordinates.
mouse_event, x, y = map(int, data[:-1].split(";"))
m = data[-1]
# Parse event type.
if sgr:
mouse_event_type = {
(0, "M"): MouseEventType.MOUSE_DOWN,
(0, "m"): MouseEventType.MOUSE_UP,
(64, "M"): MouseEventType.SCROLL_UP,
(65, "M"): MouseEventType.SCROLL_DOWN,
}.get((mouse_event, m))
else:
mouse_event_type = {
32: MouseEventType.MOUSE_DOWN,
35: MouseEventType.MOUSE_UP,
96: MouseEventType.SCROLL_UP,
97: MouseEventType.SCROLL_DOWN,
}.get(mouse_event)
x -= 1
y -= 1
# Only handle mouse events when we know the window height.
if event.app.renderer.height_is_known and mouse_event_type is not None:
# Take region above the layout into account. The reported
# coordinates are absolute to the visible part of the terminal.
from prompt_toolkit.renderer import HeightIsUnknownError
try:
y -= event.app.renderer.rows_above_layout
except HeightIsUnknownError:
return
# Call the mouse handler from the renderer.
handler = event.app.renderer.mouse_handlers.mouse_handlers[y][x]
handler(MouseEvent(position=Point(x=x, y=y), event_type=mouse_event_type))
@key_bindings.add(Keys.ScrollUp)
def _scroll_up(event: E) -> None:
"""
Scroll up event without cursor position.
"""
# We don't receive a cursor position, so we don't know which window to
# scroll. Just send an 'up' key press instead.
event.key_processor.feed(KeyPress(Keys.Up), first=True)
@key_bindings.add(Keys.ScrollDown)
def _scroll_down(event: E) -> None:
"""
Scroll down event without cursor position.
"""
event.key_processor.feed(KeyPress(Keys.Down), first=True)
@key_bindings.add(Keys.WindowsMouseEvent)
def _mouse(event: E) -> None:
"""
Handling of mouse events for Windows.
"""
assert is_windows() # This key binding should only exist for Windows.
# Parse data.
pieces = event.data.split(";")
event_type = MouseEventType(pieces[0])
x = int(pieces[1])
y = int(pieces[2])
# Make coordinates absolute to the visible part of the terminal.
output = event.app.renderer.output
from prompt_toolkit.output.win32 import Win32Output
from prompt_toolkit.output.windows10 import Windows10_Output
if isinstance(output, (Win32Output, Windows10_Output)):
screen_buffer_info = output.get_win32_screen_buffer_info()
rows_above_cursor = (
screen_buffer_info.dwCursorPosition.Y - event.app.renderer._cursor_pos.y
)
y -= rows_above_cursor
# Call the mouse event handler.
handler = event.app.renderer.mouse_handlers.mouse_handlers[y][x]
handler(MouseEvent(position=Point(x=x, y=y), event_type=event_type))
return key_bindings
|
en
| 0.794869
|
Key bindings, required for mouse support. (Mouse events enter through the key binding system.) Handling of incoming mouse event. # TypicaL: "eSC[MaB*" # Urxvt: "Esc[96;14;13M" # Xterm SGR: "Esc[<64;85;12M" # Parse incoming packet. # Typical. # Handle situations where `PosixStdinReader` used surrogateescapes. # Urxvt and Xterm SGR. # When the '<' is not present, we are not using the Xterm SGR mode, # but Urxvt instead. # Extract coordinates. # Parse event type. # Only handle mouse events when we know the window height. # Take region above the layout into account. The reported # coordinates are absolute to the visible part of the terminal. # Call the mouse handler from the renderer. Scroll up event without cursor position. # We don't receive a cursor position, so we don't know which window to # scroll. Just send an 'up' key press instead. Scroll down event without cursor position. Handling of mouse events for Windows. # This key binding should only exist for Windows. # Parse data. # Make coordinates absolute to the visible part of the terminal. # Call the mouse event handler.
| 2.466291
| 2
|
CellProfiler/cellprofiler/gui/module_view/_file_collection_display_controller.py
|
aidotse/Team-rahma.ai
| 0
|
6627186
|
<filename>CellProfiler/cellprofiler/gui/module_view/_file_collection_display_controller.py
import logging
import os
import sys
import uuid
import wx
from cellprofiler_core.preferences import report_progress
from cellprofiler_core.setting import FileCollectionDisplay
from ..pipeline import Pipeline
from ..utilities.module_view import edit_control_name
from ...icons import get_builtin_image
class FileCollectionDisplayController:
"""This class provides the UI for the file collection display
The UI has a browse button, a hide checkbox and a tree control.
Critical attributes:
self.walks_in_progress - this is a dictionary of keys to directory walks
and metadata fetches that are happening in the
background. The value of the dictionary entry
is the function to call to stop the search.
There's a completion callback that's called to
remove an entry from the dictionary. When the
dictionary size reaches zero, the stop and pause
buttons are disabled.
self.modpath_to_item - a modpath is a collection of path parts to some file
handled by the controller. There's a tree item
for every modpath in this dictionary and the
dictionary can be used for fast lookup of the
item without traversing the entire tree.
"""
IMAGE_LIST = wx.ImageList(16, 16, 3)
FOLDER_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, size=(16, 16))
)
FOLDER_OPEN_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_FOLDER_OPEN, wx.ART_OTHER, size=(16, 16))
)
FILE_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, size=(16, 16))
)
IMAGE_PLANE_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscope-icon_16").ConvertToBitmap()
)
IMAGE_PLANES_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscopes_16").ConvertToBitmap()
)
COLOR_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscope-color_16").ConvertToBitmap()
)
MOVIE_IMAGE_INDEX = IMAGE_LIST.Add(get_builtin_image("movie_16").ConvertToBitmap())
ACTIVE_COLOR = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
FILTERED_COLOR = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)
class FCDCDropTarget(wx.PyDropTarget):
def __init__(self, file_callback_fn, text_callback_fn):
super(self.__class__, self).__init__()
self.file_callback_fn = file_callback_fn
self.text_callback_fn = text_callback_fn
self.file_data_object = wx.FileDataObject()
self.text_data_object = wx.TextDataObject()
self.composite_data_object = wx.DataObjectComposite()
self.composite_data_object.Add(self.file_data_object, True)
self.composite_data_object.Add(self.text_data_object)
self.SetDataObject(self.composite_data_object)
def OnDropFiles(self, x, y, filenames):
self.file_callback_fn(x, y, filenames)
def OnDropText(self, x, y, text):
self.text_callback_fn(x, y, text)
@staticmethod
def OnEnter(x, y, d):
return wx.DragCopy
@staticmethod
def OnDragOver(x, y, d):
return wx.DragCopy
def OnData(self, x, y, d):
if self.GetData():
df = self.composite_data_object.GetReceivedFormat().GetType()
if df in (wx.DF_TEXT, wx.DF_UNICODETEXT):
self.OnDropText(x, y, self.text_data_object.GetText())
elif df == wx.DF_FILENAME:
self.OnDropFiles(x, y, self.file_data_object.GetFilenames())
return wx.DragCopy
@staticmethod
def OnDrop(x, y):
return True
def __init__(self, module_view, v, pipeline):
assert isinstance(v, FileCollectionDisplay)
self.module_view = module_view
self.v = v
assert isinstance(pipeline, Pipeline)
self.pipeline = pipeline
self.panel = wx.Panel(
self.module_view.module_panel, -1, name=edit_control_name(v)
)
self.panel.controller = self
self.panel.Sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel.Sizer.Add(sizer, 0, wx.EXPAND)
self.status_text = wx.StaticText(self.panel, -1)
sizer.Add(self.status_text, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER)
sizer.AddStretchSpacer()
sizer.Add(
wx.StaticText(self.panel, -1, "Drag folders and/or files here or"),
0,
wx.ALIGN_LEFT | wx.ALIGN_CENTER,
)
sizer.AddSpacer((3, 0))
browse_button = wx.Button(self.panel, -1, "Browse...")
sizer.Add(browse_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER)
browse_button.Bind(wx.EVT_BUTTON, self.on_browse)
tree_style = wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS | wx.TR_MULTIPLE
self.tree_ctrl = wx.TreeCtrl(self.panel, -1, style=tree_style)
self.panel.Sizer.Add(self.tree_ctrl, 1, wx.EXPAND)
self.tree_ctrl.SetImageList(self.IMAGE_LIST)
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_MENU, self.on_tree_item_menu)
self.tree_ctrl.Bind(wx.EVT_TREE_KEY_DOWN, self.on_tree_key_down)
#
# Don't auto-expand after the user collapses a node.
#
self.user_collapsed_a_node = False
def on_item_collapsed(event):
logging.debug("On item collapsed")
self.user_collapsed_a_node = True
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_COLLAPSED, on_item_collapsed)
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_tree_doubleclick)
self.tree_ctrl.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)
self.panel.Bind(wx.EVT_WINDOW_DESTROY, self.on_destroy)
self.root_item = self.tree_ctrl.AddRoot("I am the invisible root")
self.tree_ctrl.SetPyData(self.root_item, None)
self.tree_ctrl.SetItemImage(self.root_item, self.FOLDER_IMAGE_INDEX)
self.tree_ctrl.SetItemImage(
self.root_item, self.FOLDER_OPEN_IMAGE_INDEX, wx.TreeItemIcon_Expanded
)
self.tree_ctrl.SetMinSize((100, 300))
self.tree_ctrl.SetMaxSize((sys.maxsize, 300))
self.file_drop_target = self.FCDCDropTarget(
self.on_drop_files, self.on_drop_text
)
self.tree_ctrl.SetDropTarget(self.file_drop_target)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel.Sizer.Add(sizer, 0, wx.EXPAND)
self.hide_show_ctrl = wx.CheckBox(self.panel, -1, self.v.hide_text)
sizer.Add(self.hide_show_ctrl, 0, wx.ALIGN_LEFT | wx.ALIGN_BOTTOM)
self.hide_show_ctrl.Bind(wx.EVT_CHECKBOX, self.on_hide_show_checked)
self.hide_show_ctrl.SetValue(not self.v.show_filtered)
sizer.AddStretchSpacer()
self.stop_button = wx.Button(self.panel, -1, "Stop")
self.stop_button.Enable(False)
self.stop_button.Bind(wx.EVT_BUTTON, self.on_stop)
sizer.Add(self.stop_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.pause_button.Enable(False)
self.pause_button.Bind(wx.EVT_BUTTON, self.on_pause_resume)
sizer.Add(self.pause_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)
v.set_update_function(self.request_update)
self.needs_update = False
self.modpath_to_item = {}
self.request_update()
def __del__(self):
self.on_destroy(None)
def on_destroy(self, event):
self.v.set_update_function()
def on_erase_background(self, event):
assert isinstance(event, wx.EraseEvent)
dc = event.GetDC()
assert isinstance(dc, wx.DC)
brush = wx.Brush(self.tree_ctrl.GetBackgroundColour())
dc.SetBrush(brush)
dc.SetPen(wx.TRANSPARENT_PEN)
width, height = self.tree_ctrl.GetSize()
dc.DrawRectangle(0, 0, width, height)
if len(self.modpath_to_item) == 0:
text = "Drop files and folders here"
font = wx.Font(
36, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD
)
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
dc.SetFont(font)
text_width, text_height = dc.GetTextExtent(text)
dc.DrawText(text, (width - text_width) / 2, (height - text_height) / 2)
def on_browse(self, event):
logging.debug("Browsing for file collection directory")
dlg = wx.DirDialog(self.panel, "Select a directory to add")
try:
if dlg.ShowModal() == wx.ID_OK:
self.v.fn_on_drop([dlg.GetPath()], True)
finally:
dlg.Destroy()
def on_start_received(self):
self.pause_button.Label = "Pause"
self.pause_button.Enable(True)
self.stop_button.Enable(True)
def on_stop_received(self):
self.pause_button.Enable(False)
self.stop_button.Enable(False)
def on_stop(self, event):
"""Stop button pressed"""
self.v.fn_on_bkgnd_control(self.v.BKGND_STOP)
self.pause_button.Label = "Pause"
self.pause_button.Enable(False)
self.stop_button.Enable(False)
def on_pause_resume(self, event):
"""Pause / resume pressed"""
if self.pause_button.Label == "Pause":
action = self.v.BKGND_PAUSE
self.pause_button.Label = "Resume"
else:
action = self.v.BKGND_RESUME
self.pause_button.Label = "Pause"
self.v.fn_on_bkgnd_control(action)
def add_item(self, modpath, text=None, sort=True):
"""Add an item to the tree
modpath - a collection of path parts to the item in the tree
text - the text to appear in the item
"""
parent_key = tuple(modpath[:-1])
modpath = tuple(modpath)
if modpath in self.modpath_to_item:
item = self.modpath_to_item[modpath]
if text is not None:
self.tree_ctrl.SetItemText(item, text)
return item
if text is None:
text = modpath[-1]
if len(modpath) == 1:
parent_item = self.root_item
elif parent_key in self.modpath_to_item:
parent_item = self.modpath_to_item[parent_key]
else:
parent_item = self.add_item(parent_key, sort=sort)
self.tree_ctrl.SetItemImage(parent_item, self.FOLDER_IMAGE_INDEX)
self.tree_ctrl.SetItemImage(
parent_item, self.FOLDER_OPEN_IMAGE_INDEX, wx.TreeItemIcon_Expanded
)
want_erase = len(self.modpath_to_item) == 0
#
# Put in alpha order
#
n_children = self.tree_ctrl.GetChildrenCount(parent_item)
if n_children == 0 or not sort:
item = self.tree_ctrl.AppendItem(parent_item, text)
else:
child, cookie = self.tree_ctrl.GetFirstChild(parent_item)
for i in range(n_children):
ctext = self.tree_ctrl.GetItemText(child)
if ctext > text:
item = self.tree_ctrl.InsertItemBefore(parent_item, i, text)
break
child = self.tree_ctrl.GetNextSibling(child)
else:
item = self.tree_ctrl.AppendItem(parent_item, text)
self.tree_ctrl.SetPyData(item, modpath[-1])
self.modpath_to_item[modpath] = item
if want_erase:
self.tree_ctrl.Refresh(True)
return item
def remove_item(self, modpath):
modpath = tuple(modpath)
if modpath in self.modpath_to_item:
item = self.modpath_to_item[modpath]
n_children = self.tree_ctrl.GetChildrenCount(item, False)
if n_children > 0:
child, cookie = self.tree_ctrl.GetFirstChild(item)
child_tokens = []
for i in range(n_children):
child_tokens.append(self.tree_ctrl.GetItemPyData(child))
child = self.tree_ctrl.GetNextSibling(child)
for child_token in child_tokens:
sub_modpath = list(modpath) + [child_token]
self.remove_item(sub_modpath)
self.tree_ctrl.Delete(self.modpath_to_item[modpath])
del self.modpath_to_item[modpath]
@classmethod
def get_modpath(cls, path):
"""Break a path into its components"""
result = []
while True:
new_path, part = os.path.split(path)
if len(new_path) == 0 or len(part) == 0:
result.insert(0, path)
return result
result.insert(0, part)
path = new_path
def on_drop_files(self, x, y, filenames):
self.v.fn_on_drop(filenames, True)
def on_drop_text(self, x, y, text):
"""Text is assumed to be one file name per line"""
filenames = [line.strip() for line in text.split("\n") if len(line.strip()) > 0]
self.v.fn_on_drop(filenames, False)
def get_path_from_event(self, event):
"""Given a tree control event, find the path from the root
event - event from tree control (e.g., EVT_TREE_ITEM_ACTIVATED)
returns a sequence of path items from the root
"""
item = event.GetItem()
path = []
while True:
item_data = self.tree_ctrl.GetItemPyData(item)
if item_data is None:
break
path.insert(0, item_data)
item = self.tree_ctrl.GetItemParent(item)
return path
def on_tree_item_menu(self, event):
logging.debug("On tree item menu")
path = self.get_path_from_event(event)
if len(path) == 0:
logging.warn("Could not find item associated with tree event")
return
context_menu = self.v.get_context_menu(path)
if len(context_menu) > 0:
menu = wx.Menu()
try:
delete_menu_items = []
for context_item in context_menu:
if isinstance(context_item, FileCollectionDisplay.DeleteMenuItem,):
delete_menu_items.append(menu.Append(-1, context_item.text).Id)
else:
menu.Append(-1, context_item)
def on_menu(event):
logging.debug("On menu")
self.pipeline.start_undoable_action()
try:
for menu_item in menu.GetMenuItems():
if menu_item.Id == event.Id:
logging.debug(" Command = %s" % menu_item.Text)
if menu_item.Id in delete_menu_items:
self.on_delete_selected(event)
else:
self.v.fn_on_menu_command(path, menu_item.Text)
break
finally:
self.pipeline.stop_undoable_action()
self.tree_ctrl.Bind(wx.EVT_MENU, on_menu)
self.tree_ctrl.PopupMenu(menu, event.GetPoint())
self.tree_ctrl.Unbind(wx.EVT_MENU, handler=on_menu)
finally:
menu.Destroy()
def on_tree_doubleclick(self, event):
path = self.get_path_from_event(event)
if self.v.fn_on_menu_command(path, None):
return True
def on_tree_key_down(self, event):
logging.debug("On tree key down")
key = event.GetKeyCode()
if key == wx.WXK_DELETE:
self.on_delete_selected(event)
def on_delete_selected(self, event):
mods = [self.get_item_address(item) for item in self.tree_ctrl.GetSelections()]
mods = [x for x in mods if x is not None]
self.v.on_remove([self.v.get_tree_modpaths(mod) for mod in mods])
def get_item_address(self, item):
"""Get an item's address as a collection of names"""
result = []
while True:
name = self.tree_ctrl.GetItemPyData(item)
if name is None:
break
else:
result.insert(0, name)
item = self.tree_ctrl.GetItemParent(item)
return result
def get_item_from_modpath(self, modpath):
"""Get an item from its modpath
returns the tree item id or None if not found.
"""
return self.modpath_to_item.get(tuple(modpath))
def request_update(self, hint=None, modpath=None):
if hint == FileCollectionDisplay.BKGND_RESUME:
self.on_start_received()
return
if hint == FileCollectionDisplay.BKGND_STOP:
self.on_stop_received()
self.status_text.Label = "Idle..."
return
if modpath is not None and len(modpath) > 0:
#
# Descend down the leftmost side of all of the tuples
# to get something we can display
#
path = []
mp = modpath[0]
any_others = len(modpath) > 1
if hint != FileCollectionDisplay.REMOVE:
# It's likely that the leaf was removed and it doesn't
# make sense to descend
file_tree = self.v.file_tree
is_filtered = False
while True:
if isinstance(mp, str) or isinstance(mp, tuple) and len(mp) == 3:
path.append(mp)
if hint != FileCollectionDisplay.REMOVE:
is_filtered = not file_tree[mp]
break
part, mp_list = mp
path.append(part)
if hint != FileCollectionDisplay.REMOVE:
file_tree = file_tree[part]
if len(mp_list) == 0:
is_filtered = not file_tree[None]
break
any_others = any_others or len(mp_list) > 1
mp = mp_list[0]
if hint != FileCollectionDisplay.REMOVE:
self.status_text.Label = (
"Processing " + path[-1] if isinstance(path[-1], str) else path[-2]
)
self.status_text.Update()
if not any_others:
#
# It's just a modification to a single node. Try and handle
# here.
#
if hint == FileCollectionDisplay.METADATA:
if (not self.v.show_filtered) and is_filtered:
return
item_id = self.get_item_from_modpath(path)
if item_id is not None:
text, node_type, tooltip = self.v.get_node_info(path)
image_id = self.get_image_id_from_nodetype(node_type)
self.tree_ctrl.SetItemText(item_id, text)
self.tree_ctrl.SetItemImage(item_id, image_id)
return
elif hint == FileCollectionDisplay.ADD:
if self.get_item_from_modpath(path) is None:
text, node_type, tooltip = self.v.get_node_info(path)
item_id = self.add_item(path, text)
image_id = self.get_image_id_from_nodetype(node_type)
self.tree_ctrl.SetItemImage(item_id, image_id)
self.manage_expansion()
return
elif hint == FileCollectionDisplay.REMOVE:
if is_filtered:
return
self.remove_item(path)
if len(path) > 1:
super_modpath = tuple(path[:-1])
if super_modpath in self.modpath_to_item:
item = self.modpath_to_item[super_modpath]
n_children = self.tree_ctrl.GetChildrenCount(item, False)
if n_children == 0:
self.remove_item(super_modpath)
return
self.update()
def update(self):
operation_id = uuid.uuid4()
total = self.v.node_count()
if total == 0:
return
self.update_subtree(
self.v.file_tree, self.root_item, False, [], operation_id, 0, total
)
self.manage_expansion()
report_progress(operation_id, 1, None)
def manage_expansion(self):
"""Handle UI expansion issues
Make sure that the tree is auto-expanded if appropriate and that
the root nodes are expanded.
"""
if not self.user_collapsed_a_node:
#
# Expand all until we reach a node that has more than
# one child = ambiguous choice of which to expand
#
item = self.root_item
while self.tree_ctrl.GetChildrenCount(item, False) == 1:
# Can't expand the invisible root for Mac
if sys.platform != "darwin" or item != self.root_item:
self.tree_ctrl.Expand(item)
item, cookie = self.tree_ctrl.GetFirstChild(item)
if self.tree_ctrl.GetChildrenCount(item, False) > 0:
self.tree_ctrl.Expand(item)
#
# The bottom-most nodes don't have expand buttons (why?). If you
# have two bottom-most nodes, neither will be expanded and there
# is no way to expand them using the UI. So, we need to make sure
# all bottom-most nodes are expanded, no matter what.
#
for i in range(self.tree_ctrl.GetChildrenCount(self.root_item, False)):
if i == 0:
bottom_item, thing = self.tree_ctrl.GetFirstChild(self.root_item)
else:
bottom_item, thing = self.tree_ctrl.GetNextChild(self.root_item, thing)
if not self.tree_ctrl.IsExpanded(bottom_item):
self.tree_ctrl.Expand(bottom_item)
def update_subtree(
self, file_tree, parent_item, is_filtered, modpath, operation_id, count, total
):
existing_items = {}
show_filtered = self.v.show_filtered
needs_sort = False
child_count = self.tree_ctrl.GetChildrenCount(parent_item, False)
if child_count > 0:
child_item_id, cookie = self.tree_ctrl.GetFirstChild(parent_item)
for i in range(child_count):
existing_items[self.tree_ctrl.GetItemPyData(child_item_id)] = [
child_item_id,
False,
]
if i < child_count - 1:
child_item_id = self.tree_ctrl.GetNextSibling(child_item_id)
for x in sorted(file_tree.keys()):
sub_modpath = modpath + [x]
if x is None:
continue
text, node_type, tooltip = self.v.get_node_info(sub_modpath)
report_progress(
operation_id, float(count) / float(total), "Processing %s" % text
)
count += 1
image_id = self.get_image_id_from_nodetype(node_type)
if isinstance(file_tree[x], bool) or isinstance(x, tuple):
node_is_filtered = (not file_tree[x]) or is_filtered
if node_is_filtered and not show_filtered:
continue
if x in existing_items:
existing_items[x][1] = True
item_id = existing_items[x][0]
self.tree_ctrl.SetItemText(item_id, text)
else:
item_id = self.add_item(sub_modpath, text, sort=False)
existing_items[x] = (item_id, True)
needs_sort = True
self.tree_ctrl.SetItemImage(item_id, image_id)
elif isinstance(file_tree[x], dict):
subtree = file_tree[x]
node_is_filtered = (not subtree[None]) or is_filtered
(
unfiltered_subfolders,
filtered_subfolders,
unfiltered_files,
filtered_files,
) = self.get_file_and_folder_counts(subtree)
n_subfolders = unfiltered_subfolders + filtered_subfolders
n_files = unfiltered_files + filtered_files
if node_is_filtered and not show_filtered:
continue
if node_type in (
FileCollectionDisplay.NODE_COMPOSITE_IMAGE,
FileCollectionDisplay.NODE_MOVIE,
):
expanded_image_id = image_id
else:
image_id = self.FOLDER_IMAGE_INDEX
expanded_image_id = self.FOLDER_OPEN_IMAGE_INDEX
text = "" + x
if n_subfolders > 0 or n_files > 0:
text += " ("
if n_subfolders > 0:
if node_is_filtered:
text += "\t%d folders" % n_subfolders
else:
text += "\t%d of %d folders" % (
unfiltered_subfolders,
n_subfolders,
)
if n_files > 0:
text += ", "
if n_files > 0:
if node_is_filtered:
text += "\t%d files" % n_files
else:
text += "\t%d of %d files" % (unfiltered_files, n_files)
text += ")"
if x in existing_items:
existing_items[x][1] = True
item_id = existing_items[x][0]
self.tree_ctrl.SetItemText(item_id, text)
else:
item_id = self.add_item(sub_modpath, text, sort=False)
existing_items[x] = (item_id, True)
needs_sort = True
self.tree_ctrl.SetItemImage(item_id, image_id)
self.tree_ctrl.SetItemImage(
item_id, expanded_image_id, wx.TreeItemIcon_Expanded
)
has_children = n_subfolders + n_files > 0
self.tree_ctrl.SetItemHasChildren(item_id, has_children)
count = self.update_subtree(
subtree,
item_id,
node_is_filtered,
sub_modpath,
operation_id,
count,
total,
)
color = self.FILTERED_COLOR if node_is_filtered else self.ACTIVE_COLOR
self.tree_ctrl.SetItemTextColour(item_id, color)
for last_part, (item_id, keep) in list(existing_items.items()):
if not keep:
self.remove_item(modpath + [last_part])
if needs_sort:
self.tree_ctrl.SortChildren(parent_item)
return count
def get_image_id_from_nodetype(self, node_type):
if node_type == FileCollectionDisplay.NODE_COLOR_IMAGE:
image_id = self.COLOR_IMAGE_INDEX
elif node_type == FileCollectionDisplay.NODE_COMPOSITE_IMAGE:
image_id = self.IMAGE_PLANES_IMAGE_INDEX
elif node_type in (
FileCollectionDisplay.NODE_MONOCHROME_IMAGE,
FileCollectionDisplay.NODE_IMAGE_PLANE,
):
image_id = self.IMAGE_PLANE_IMAGE_INDEX
elif node_type == FileCollectionDisplay.NODE_MOVIE:
image_id = self.MOVIE_IMAGE_INDEX
else:
image_id = self.FILE_IMAGE_INDEX
return image_id
@classmethod
def get_file_and_folder_counts(cls, tree):
"""Count the number of files and folders in the tree
returns the number of immediate unfiltered and filtered subfolders
and number of unfiltered and filtered files in the hierarchy
"""
unfiltered_subfolders = filtered_subfolders = 0
unfiltered_files = filtered_files = 0
for key in tree:
if key is None:
continue
if isinstance(tree[key], bool):
if tree[key]:
unfiltered_files += 1
else:
filtered_files += 1
else:
is_filtered = not tree[key][None]
if is_filtered:
unfiltered_subfolders += 1
else:
filtered_subfolders += 1
ufolders, ffolders, ufiles, ffiles = cls.get_file_and_folder_counts(
tree[key]
)
filtered_files += ffiles
if is_filtered:
filtered_files += ufiles
else:
unfiltered_files += ufiles
return (
unfiltered_subfolders,
filtered_subfolders,
unfiltered_files,
filtered_files,
)
def on_hide_show_checked(self, event):
self.v.show_filtered = not self.hide_show_ctrl.GetValue()
self.request_update()
|
<filename>CellProfiler/cellprofiler/gui/module_view/_file_collection_display_controller.py
import logging
import os
import sys
import uuid
import wx
from cellprofiler_core.preferences import report_progress
from cellprofiler_core.setting import FileCollectionDisplay
from ..pipeline import Pipeline
from ..utilities.module_view import edit_control_name
from ...icons import get_builtin_image
class FileCollectionDisplayController:
"""This class provides the UI for the file collection display
The UI has a browse button, a hide checkbox and a tree control.
Critical attributes:
self.walks_in_progress - this is a dictionary of keys to directory walks
and metadata fetches that are happening in the
background. The value of the dictionary entry
is the function to call to stop the search.
There's a completion callback that's called to
remove an entry from the dictionary. When the
dictionary size reaches zero, the stop and pause
buttons are disabled.
self.modpath_to_item - a modpath is a collection of path parts to some file
handled by the controller. There's a tree item
for every modpath in this dictionary and the
dictionary can be used for fast lookup of the
item without traversing the entire tree.
"""
IMAGE_LIST = wx.ImageList(16, 16, 3)
FOLDER_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, size=(16, 16))
)
FOLDER_OPEN_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_FOLDER_OPEN, wx.ART_OTHER, size=(16, 16))
)
FILE_IMAGE_INDEX = IMAGE_LIST.Add(
wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, size=(16, 16))
)
IMAGE_PLANE_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscope-icon_16").ConvertToBitmap()
)
IMAGE_PLANES_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscopes_16").ConvertToBitmap()
)
COLOR_IMAGE_INDEX = IMAGE_LIST.Add(
get_builtin_image("microscope-color_16").ConvertToBitmap()
)
MOVIE_IMAGE_INDEX = IMAGE_LIST.Add(get_builtin_image("movie_16").ConvertToBitmap())
ACTIVE_COLOR = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOWTEXT)
FILTERED_COLOR = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)
class FCDCDropTarget(wx.PyDropTarget):
def __init__(self, file_callback_fn, text_callback_fn):
super(self.__class__, self).__init__()
self.file_callback_fn = file_callback_fn
self.text_callback_fn = text_callback_fn
self.file_data_object = wx.FileDataObject()
self.text_data_object = wx.TextDataObject()
self.composite_data_object = wx.DataObjectComposite()
self.composite_data_object.Add(self.file_data_object, True)
self.composite_data_object.Add(self.text_data_object)
self.SetDataObject(self.composite_data_object)
def OnDropFiles(self, x, y, filenames):
self.file_callback_fn(x, y, filenames)
def OnDropText(self, x, y, text):
self.text_callback_fn(x, y, text)
@staticmethod
def OnEnter(x, y, d):
return wx.DragCopy
@staticmethod
def OnDragOver(x, y, d):
return wx.DragCopy
def OnData(self, x, y, d):
if self.GetData():
df = self.composite_data_object.GetReceivedFormat().GetType()
if df in (wx.DF_TEXT, wx.DF_UNICODETEXT):
self.OnDropText(x, y, self.text_data_object.GetText())
elif df == wx.DF_FILENAME:
self.OnDropFiles(x, y, self.file_data_object.GetFilenames())
return wx.DragCopy
@staticmethod
def OnDrop(x, y):
return True
def __init__(self, module_view, v, pipeline):
assert isinstance(v, FileCollectionDisplay)
self.module_view = module_view
self.v = v
assert isinstance(pipeline, Pipeline)
self.pipeline = pipeline
self.panel = wx.Panel(
self.module_view.module_panel, -1, name=edit_control_name(v)
)
self.panel.controller = self
self.panel.Sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel.Sizer.Add(sizer, 0, wx.EXPAND)
self.status_text = wx.StaticText(self.panel, -1)
sizer.Add(self.status_text, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER)
sizer.AddStretchSpacer()
sizer.Add(
wx.StaticText(self.panel, -1, "Drag folders and/or files here or"),
0,
wx.ALIGN_LEFT | wx.ALIGN_CENTER,
)
sizer.AddSpacer((3, 0))
browse_button = wx.Button(self.panel, -1, "Browse...")
sizer.Add(browse_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER)
browse_button.Bind(wx.EVT_BUTTON, self.on_browse)
tree_style = wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS | wx.TR_MULTIPLE
self.tree_ctrl = wx.TreeCtrl(self.panel, -1, style=tree_style)
self.panel.Sizer.Add(self.tree_ctrl, 1, wx.EXPAND)
self.tree_ctrl.SetImageList(self.IMAGE_LIST)
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_MENU, self.on_tree_item_menu)
self.tree_ctrl.Bind(wx.EVT_TREE_KEY_DOWN, self.on_tree_key_down)
#
# Don't auto-expand after the user collapses a node.
#
self.user_collapsed_a_node = False
def on_item_collapsed(event):
logging.debug("On item collapsed")
self.user_collapsed_a_node = True
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_COLLAPSED, on_item_collapsed)
self.tree_ctrl.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.on_tree_doubleclick)
self.tree_ctrl.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background)
self.panel.Bind(wx.EVT_WINDOW_DESTROY, self.on_destroy)
self.root_item = self.tree_ctrl.AddRoot("I am the invisible root")
self.tree_ctrl.SetPyData(self.root_item, None)
self.tree_ctrl.SetItemImage(self.root_item, self.FOLDER_IMAGE_INDEX)
self.tree_ctrl.SetItemImage(
self.root_item, self.FOLDER_OPEN_IMAGE_INDEX, wx.TreeItemIcon_Expanded
)
self.tree_ctrl.SetMinSize((100, 300))
self.tree_ctrl.SetMaxSize((sys.maxsize, 300))
self.file_drop_target = self.FCDCDropTarget(
self.on_drop_files, self.on_drop_text
)
self.tree_ctrl.SetDropTarget(self.file_drop_target)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel.Sizer.Add(sizer, 0, wx.EXPAND)
self.hide_show_ctrl = wx.CheckBox(self.panel, -1, self.v.hide_text)
sizer.Add(self.hide_show_ctrl, 0, wx.ALIGN_LEFT | wx.ALIGN_BOTTOM)
self.hide_show_ctrl.Bind(wx.EVT_CHECKBOX, self.on_hide_show_checked)
self.hide_show_ctrl.SetValue(not self.v.show_filtered)
sizer.AddStretchSpacer()
self.stop_button = wx.Button(self.panel, -1, "Stop")
self.stop_button.Enable(False)
self.stop_button.Bind(wx.EVT_BUTTON, self.on_stop)
sizer.Add(self.stop_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.pause_button.Enable(False)
self.pause_button.Bind(wx.EVT_BUTTON, self.on_pause_resume)
sizer.Add(self.pause_button, 0, wx.ALIGN_LEFT | wx.ALIGN_CENTER_VERTICAL)
v.set_update_function(self.request_update)
self.needs_update = False
self.modpath_to_item = {}
self.request_update()
def __del__(self):
self.on_destroy(None)
def on_destroy(self, event):
self.v.set_update_function()
def on_erase_background(self, event):
assert isinstance(event, wx.EraseEvent)
dc = event.GetDC()
assert isinstance(dc, wx.DC)
brush = wx.Brush(self.tree_ctrl.GetBackgroundColour())
dc.SetBrush(brush)
dc.SetPen(wx.TRANSPARENT_PEN)
width, height = self.tree_ctrl.GetSize()
dc.DrawRectangle(0, 0, width, height)
if len(self.modpath_to_item) == 0:
text = "Drop files and folders here"
font = wx.Font(
36, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD
)
dc.SetTextForeground(wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT))
dc.SetFont(font)
text_width, text_height = dc.GetTextExtent(text)
dc.DrawText(text, (width - text_width) / 2, (height - text_height) / 2)
def on_browse(self, event):
logging.debug("Browsing for file collection directory")
dlg = wx.DirDialog(self.panel, "Select a directory to add")
try:
if dlg.ShowModal() == wx.ID_OK:
self.v.fn_on_drop([dlg.GetPath()], True)
finally:
dlg.Destroy()
def on_start_received(self):
self.pause_button.Label = "Pause"
self.pause_button.Enable(True)
self.stop_button.Enable(True)
def on_stop_received(self):
self.pause_button.Enable(False)
self.stop_button.Enable(False)
def on_stop(self, event):
"""Stop button pressed"""
self.v.fn_on_bkgnd_control(self.v.BKGND_STOP)
self.pause_button.Label = "Pause"
self.pause_button.Enable(False)
self.stop_button.Enable(False)
def on_pause_resume(self, event):
"""Pause / resume pressed"""
if self.pause_button.Label == "Pause":
action = self.v.BKGND_PAUSE
self.pause_button.Label = "Resume"
else:
action = self.v.BKGND_RESUME
self.pause_button.Label = "Pause"
self.v.fn_on_bkgnd_control(action)
def add_item(self, modpath, text=None, sort=True):
"""Add an item to the tree
modpath - a collection of path parts to the item in the tree
text - the text to appear in the item
"""
parent_key = tuple(modpath[:-1])
modpath = tuple(modpath)
if modpath in self.modpath_to_item:
item = self.modpath_to_item[modpath]
if text is not None:
self.tree_ctrl.SetItemText(item, text)
return item
if text is None:
text = modpath[-1]
if len(modpath) == 1:
parent_item = self.root_item
elif parent_key in self.modpath_to_item:
parent_item = self.modpath_to_item[parent_key]
else:
parent_item = self.add_item(parent_key, sort=sort)
self.tree_ctrl.SetItemImage(parent_item, self.FOLDER_IMAGE_INDEX)
self.tree_ctrl.SetItemImage(
parent_item, self.FOLDER_OPEN_IMAGE_INDEX, wx.TreeItemIcon_Expanded
)
want_erase = len(self.modpath_to_item) == 0
#
# Put in alpha order
#
n_children = self.tree_ctrl.GetChildrenCount(parent_item)
if n_children == 0 or not sort:
item = self.tree_ctrl.AppendItem(parent_item, text)
else:
child, cookie = self.tree_ctrl.GetFirstChild(parent_item)
for i in range(n_children):
ctext = self.tree_ctrl.GetItemText(child)
if ctext > text:
item = self.tree_ctrl.InsertItemBefore(parent_item, i, text)
break
child = self.tree_ctrl.GetNextSibling(child)
else:
item = self.tree_ctrl.AppendItem(parent_item, text)
self.tree_ctrl.SetPyData(item, modpath[-1])
self.modpath_to_item[modpath] = item
if want_erase:
self.tree_ctrl.Refresh(True)
return item
def remove_item(self, modpath):
modpath = tuple(modpath)
if modpath in self.modpath_to_item:
item = self.modpath_to_item[modpath]
n_children = self.tree_ctrl.GetChildrenCount(item, False)
if n_children > 0:
child, cookie = self.tree_ctrl.GetFirstChild(item)
child_tokens = []
for i in range(n_children):
child_tokens.append(self.tree_ctrl.GetItemPyData(child))
child = self.tree_ctrl.GetNextSibling(child)
for child_token in child_tokens:
sub_modpath = list(modpath) + [child_token]
self.remove_item(sub_modpath)
self.tree_ctrl.Delete(self.modpath_to_item[modpath])
del self.modpath_to_item[modpath]
@classmethod
def get_modpath(cls, path):
"""Break a path into its components"""
result = []
while True:
new_path, part = os.path.split(path)
if len(new_path) == 0 or len(part) == 0:
result.insert(0, path)
return result
result.insert(0, part)
path = new_path
def on_drop_files(self, x, y, filenames):
self.v.fn_on_drop(filenames, True)
def on_drop_text(self, x, y, text):
"""Text is assumed to be one file name per line"""
filenames = [line.strip() for line in text.split("\n") if len(line.strip()) > 0]
self.v.fn_on_drop(filenames, False)
def get_path_from_event(self, event):
"""Given a tree control event, find the path from the root
event - event from tree control (e.g., EVT_TREE_ITEM_ACTIVATED)
returns a sequence of path items from the root
"""
item = event.GetItem()
path = []
while True:
item_data = self.tree_ctrl.GetItemPyData(item)
if item_data is None:
break
path.insert(0, item_data)
item = self.tree_ctrl.GetItemParent(item)
return path
def on_tree_item_menu(self, event):
logging.debug("On tree item menu")
path = self.get_path_from_event(event)
if len(path) == 0:
logging.warn("Could not find item associated with tree event")
return
context_menu = self.v.get_context_menu(path)
if len(context_menu) > 0:
menu = wx.Menu()
try:
delete_menu_items = []
for context_item in context_menu:
if isinstance(context_item, FileCollectionDisplay.DeleteMenuItem,):
delete_menu_items.append(menu.Append(-1, context_item.text).Id)
else:
menu.Append(-1, context_item)
def on_menu(event):
logging.debug("On menu")
self.pipeline.start_undoable_action()
try:
for menu_item in menu.GetMenuItems():
if menu_item.Id == event.Id:
logging.debug(" Command = %s" % menu_item.Text)
if menu_item.Id in delete_menu_items:
self.on_delete_selected(event)
else:
self.v.fn_on_menu_command(path, menu_item.Text)
break
finally:
self.pipeline.stop_undoable_action()
self.tree_ctrl.Bind(wx.EVT_MENU, on_menu)
self.tree_ctrl.PopupMenu(menu, event.GetPoint())
self.tree_ctrl.Unbind(wx.EVT_MENU, handler=on_menu)
finally:
menu.Destroy()
def on_tree_doubleclick(self, event):
path = self.get_path_from_event(event)
if self.v.fn_on_menu_command(path, None):
return True
def on_tree_key_down(self, event):
logging.debug("On tree key down")
key = event.GetKeyCode()
if key == wx.WXK_DELETE:
self.on_delete_selected(event)
def on_delete_selected(self, event):
mods = [self.get_item_address(item) for item in self.tree_ctrl.GetSelections()]
mods = [x for x in mods if x is not None]
self.v.on_remove([self.v.get_tree_modpaths(mod) for mod in mods])
def get_item_address(self, item):
"""Get an item's address as a collection of names"""
result = []
while True:
name = self.tree_ctrl.GetItemPyData(item)
if name is None:
break
else:
result.insert(0, name)
item = self.tree_ctrl.GetItemParent(item)
return result
def get_item_from_modpath(self, modpath):
"""Get an item from its modpath
returns the tree item id or None if not found.
"""
return self.modpath_to_item.get(tuple(modpath))
def request_update(self, hint=None, modpath=None):
if hint == FileCollectionDisplay.BKGND_RESUME:
self.on_start_received()
return
if hint == FileCollectionDisplay.BKGND_STOP:
self.on_stop_received()
self.status_text.Label = "Idle..."
return
if modpath is not None and len(modpath) > 0:
#
# Descend down the leftmost side of all of the tuples
# to get something we can display
#
path = []
mp = modpath[0]
any_others = len(modpath) > 1
if hint != FileCollectionDisplay.REMOVE:
# It's likely that the leaf was removed and it doesn't
# make sense to descend
file_tree = self.v.file_tree
is_filtered = False
while True:
if isinstance(mp, str) or isinstance(mp, tuple) and len(mp) == 3:
path.append(mp)
if hint != FileCollectionDisplay.REMOVE:
is_filtered = not file_tree[mp]
break
part, mp_list = mp
path.append(part)
if hint != FileCollectionDisplay.REMOVE:
file_tree = file_tree[part]
if len(mp_list) == 0:
is_filtered = not file_tree[None]
break
any_others = any_others or len(mp_list) > 1
mp = mp_list[0]
if hint != FileCollectionDisplay.REMOVE:
self.status_text.Label = (
"Processing " + path[-1] if isinstance(path[-1], str) else path[-2]
)
self.status_text.Update()
if not any_others:
#
# It's just a modification to a single node. Try and handle
# here.
#
if hint == FileCollectionDisplay.METADATA:
if (not self.v.show_filtered) and is_filtered:
return
item_id = self.get_item_from_modpath(path)
if item_id is not None:
text, node_type, tooltip = self.v.get_node_info(path)
image_id = self.get_image_id_from_nodetype(node_type)
self.tree_ctrl.SetItemText(item_id, text)
self.tree_ctrl.SetItemImage(item_id, image_id)
return
elif hint == FileCollectionDisplay.ADD:
if self.get_item_from_modpath(path) is None:
text, node_type, tooltip = self.v.get_node_info(path)
item_id = self.add_item(path, text)
image_id = self.get_image_id_from_nodetype(node_type)
self.tree_ctrl.SetItemImage(item_id, image_id)
self.manage_expansion()
return
elif hint == FileCollectionDisplay.REMOVE:
if is_filtered:
return
self.remove_item(path)
if len(path) > 1:
super_modpath = tuple(path[:-1])
if super_modpath in self.modpath_to_item:
item = self.modpath_to_item[super_modpath]
n_children = self.tree_ctrl.GetChildrenCount(item, False)
if n_children == 0:
self.remove_item(super_modpath)
return
self.update()
def update(self):
operation_id = uuid.uuid4()
total = self.v.node_count()
if total == 0:
return
self.update_subtree(
self.v.file_tree, self.root_item, False, [], operation_id, 0, total
)
self.manage_expansion()
report_progress(operation_id, 1, None)
def manage_expansion(self):
"""Handle UI expansion issues
Make sure that the tree is auto-expanded if appropriate and that
the root nodes are expanded.
"""
if not self.user_collapsed_a_node:
#
# Expand all until we reach a node that has more than
# one child = ambiguous choice of which to expand
#
item = self.root_item
while self.tree_ctrl.GetChildrenCount(item, False) == 1:
# Can't expand the invisible root for Mac
if sys.platform != "darwin" or item != self.root_item:
self.tree_ctrl.Expand(item)
item, cookie = self.tree_ctrl.GetFirstChild(item)
if self.tree_ctrl.GetChildrenCount(item, False) > 0:
self.tree_ctrl.Expand(item)
#
# The bottom-most nodes don't have expand buttons (why?). If you
# have two bottom-most nodes, neither will be expanded and there
# is no way to expand them using the UI. So, we need to make sure
# all bottom-most nodes are expanded, no matter what.
#
for i in range(self.tree_ctrl.GetChildrenCount(self.root_item, False)):
if i == 0:
bottom_item, thing = self.tree_ctrl.GetFirstChild(self.root_item)
else:
bottom_item, thing = self.tree_ctrl.GetNextChild(self.root_item, thing)
if not self.tree_ctrl.IsExpanded(bottom_item):
self.tree_ctrl.Expand(bottom_item)
def update_subtree(
self, file_tree, parent_item, is_filtered, modpath, operation_id, count, total
):
existing_items = {}
show_filtered = self.v.show_filtered
needs_sort = False
child_count = self.tree_ctrl.GetChildrenCount(parent_item, False)
if child_count > 0:
child_item_id, cookie = self.tree_ctrl.GetFirstChild(parent_item)
for i in range(child_count):
existing_items[self.tree_ctrl.GetItemPyData(child_item_id)] = [
child_item_id,
False,
]
if i < child_count - 1:
child_item_id = self.tree_ctrl.GetNextSibling(child_item_id)
for x in sorted(file_tree.keys()):
sub_modpath = modpath + [x]
if x is None:
continue
text, node_type, tooltip = self.v.get_node_info(sub_modpath)
report_progress(
operation_id, float(count) / float(total), "Processing %s" % text
)
count += 1
image_id = self.get_image_id_from_nodetype(node_type)
if isinstance(file_tree[x], bool) or isinstance(x, tuple):
node_is_filtered = (not file_tree[x]) or is_filtered
if node_is_filtered and not show_filtered:
continue
if x in existing_items:
existing_items[x][1] = True
item_id = existing_items[x][0]
self.tree_ctrl.SetItemText(item_id, text)
else:
item_id = self.add_item(sub_modpath, text, sort=False)
existing_items[x] = (item_id, True)
needs_sort = True
self.tree_ctrl.SetItemImage(item_id, image_id)
elif isinstance(file_tree[x], dict):
subtree = file_tree[x]
node_is_filtered = (not subtree[None]) or is_filtered
(
unfiltered_subfolders,
filtered_subfolders,
unfiltered_files,
filtered_files,
) = self.get_file_and_folder_counts(subtree)
n_subfolders = unfiltered_subfolders + filtered_subfolders
n_files = unfiltered_files + filtered_files
if node_is_filtered and not show_filtered:
continue
if node_type in (
FileCollectionDisplay.NODE_COMPOSITE_IMAGE,
FileCollectionDisplay.NODE_MOVIE,
):
expanded_image_id = image_id
else:
image_id = self.FOLDER_IMAGE_INDEX
expanded_image_id = self.FOLDER_OPEN_IMAGE_INDEX
text = "" + x
if n_subfolders > 0 or n_files > 0:
text += " ("
if n_subfolders > 0:
if node_is_filtered:
text += "\t%d folders" % n_subfolders
else:
text += "\t%d of %d folders" % (
unfiltered_subfolders,
n_subfolders,
)
if n_files > 0:
text += ", "
if n_files > 0:
if node_is_filtered:
text += "\t%d files" % n_files
else:
text += "\t%d of %d files" % (unfiltered_files, n_files)
text += ")"
if x in existing_items:
existing_items[x][1] = True
item_id = existing_items[x][0]
self.tree_ctrl.SetItemText(item_id, text)
else:
item_id = self.add_item(sub_modpath, text, sort=False)
existing_items[x] = (item_id, True)
needs_sort = True
self.tree_ctrl.SetItemImage(item_id, image_id)
self.tree_ctrl.SetItemImage(
item_id, expanded_image_id, wx.TreeItemIcon_Expanded
)
has_children = n_subfolders + n_files > 0
self.tree_ctrl.SetItemHasChildren(item_id, has_children)
count = self.update_subtree(
subtree,
item_id,
node_is_filtered,
sub_modpath,
operation_id,
count,
total,
)
color = self.FILTERED_COLOR if node_is_filtered else self.ACTIVE_COLOR
self.tree_ctrl.SetItemTextColour(item_id, color)
for last_part, (item_id, keep) in list(existing_items.items()):
if not keep:
self.remove_item(modpath + [last_part])
if needs_sort:
self.tree_ctrl.SortChildren(parent_item)
return count
def get_image_id_from_nodetype(self, node_type):
if node_type == FileCollectionDisplay.NODE_COLOR_IMAGE:
image_id = self.COLOR_IMAGE_INDEX
elif node_type == FileCollectionDisplay.NODE_COMPOSITE_IMAGE:
image_id = self.IMAGE_PLANES_IMAGE_INDEX
elif node_type in (
FileCollectionDisplay.NODE_MONOCHROME_IMAGE,
FileCollectionDisplay.NODE_IMAGE_PLANE,
):
image_id = self.IMAGE_PLANE_IMAGE_INDEX
elif node_type == FileCollectionDisplay.NODE_MOVIE:
image_id = self.MOVIE_IMAGE_INDEX
else:
image_id = self.FILE_IMAGE_INDEX
return image_id
@classmethod
def get_file_and_folder_counts(cls, tree):
"""Count the number of files and folders in the tree
returns the number of immediate unfiltered and filtered subfolders
and number of unfiltered and filtered files in the hierarchy
"""
unfiltered_subfolders = filtered_subfolders = 0
unfiltered_files = filtered_files = 0
for key in tree:
if key is None:
continue
if isinstance(tree[key], bool):
if tree[key]:
unfiltered_files += 1
else:
filtered_files += 1
else:
is_filtered = not tree[key][None]
if is_filtered:
unfiltered_subfolders += 1
else:
filtered_subfolders += 1
ufolders, ffolders, ufiles, ffiles = cls.get_file_and_folder_counts(
tree[key]
)
filtered_files += ffiles
if is_filtered:
filtered_files += ufiles
else:
unfiltered_files += ufiles
return (
unfiltered_subfolders,
filtered_subfolders,
unfiltered_files,
filtered_files,
)
def on_hide_show_checked(self, event):
self.v.show_filtered = not self.hide_show_ctrl.GetValue()
self.request_update()
|
en
| 0.894447
|
This class provides the UI for the file collection display The UI has a browse button, a hide checkbox and a tree control. Critical attributes: self.walks_in_progress - this is a dictionary of keys to directory walks and metadata fetches that are happening in the background. The value of the dictionary entry is the function to call to stop the search. There's a completion callback that's called to remove an entry from the dictionary. When the dictionary size reaches zero, the stop and pause buttons are disabled. self.modpath_to_item - a modpath is a collection of path parts to some file handled by the controller. There's a tree item for every modpath in this dictionary and the dictionary can be used for fast lookup of the item without traversing the entire tree. # # Don't auto-expand after the user collapses a node. # Stop button pressed Pause / resume pressed Add an item to the tree modpath - a collection of path parts to the item in the tree text - the text to appear in the item # # Put in alpha order # Break a path into its components Text is assumed to be one file name per line Given a tree control event, find the path from the root event - event from tree control (e.g., EVT_TREE_ITEM_ACTIVATED) returns a sequence of path items from the root Get an item's address as a collection of names Get an item from its modpath returns the tree item id or None if not found. # # Descend down the leftmost side of all of the tuples # to get something we can display # # It's likely that the leaf was removed and it doesn't # make sense to descend # # It's just a modification to a single node. Try and handle # here. # Handle UI expansion issues Make sure that the tree is auto-expanded if appropriate and that the root nodes are expanded. # # Expand all until we reach a node that has more than # one child = ambiguous choice of which to expand # # Can't expand the invisible root for Mac # # The bottom-most nodes don't have expand buttons (why?). If you # have two bottom-most nodes, neither will be expanded and there # is no way to expand them using the UI. So, we need to make sure # all bottom-most nodes are expanded, no matter what. # Count the number of files and folders in the tree returns the number of immediate unfiltered and filtered subfolders and number of unfiltered and filtered files in the hierarchy
| 2.21531
| 2
|
discord_bot/discord_bot.py
|
SpiderDAO/spiderdao_testnet
| 0
|
6627187
|
<filename>discord_bot/discord_bot.py
import discord
from discord.ext import commands
from substrateinterface import SubstrateInterface, Keypair
import asyncio
import os
import sys
import logging
import sys
sys.path.insert(0, '../src')
from spiderdao import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(formatter)
file_handler = logging.FileHandler('bot_logs.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
def print_logs(*argv):
log_str = ""
for arg in argv:
log_str += " " + str(arg)
logger.info(log_str)
return
botcalls = botcalls_mods
for bc in botcalls:
botcalls[bc] = sorted(botcalls[bc])
botmods_list = sorted(botcalls.keys())
# GLOBALS
TOKEN = os.environ.get('BOT_TOKEN')
pub_channel_id = int(os.environ.get('pub_channel_id'))
#private key of sudo key, used to set testnet tokens for bot users
SUDO_KEY = os.environ.get('SUDO_KEY')
launch_period = os.environ.get('LAUNCH_PERIOD')
bot_users = {}
bot = commands.Bot(command_prefix='!')
pub_channel = bot.get_channel(pub_channel_id)
async def cb_ch(event_index, lev, ebh):
if event_index == "1e01": #'event_id': 'Tabled'
print_logs("CALLED send_referendum_started")
await send_referendum_started(lev, ebh)
if event_index == "1e04" or event_index == "1e05": #'event_id': 'Passed' or 'NotPassed'
print_logs("CALLED send_referendum_results")
await send_referendum_results(lev, ebh)
return
# Chain events listener
chain_evs = None
try:
chain_evs = SpiderDaoChain(chain_events_cb=cb_ch)
except:
print_logs("Couldn't start SpiderDAOChain, check that the chain is running")
sys.exit(0)
print_logs("SpiderDAO BOT START")
democracy_commands = [
"!helpall modules",
"!helpmods [module name]",
"!wallet [create|import [seed phrase]]",
"!propose [module name] [module call] [call parameters]",
"!second [proposal index]",
"!vote [referendum index] [yes|no]",
"!balance [address]",
"!ref [referendum index]",
"!proposals [proposal index]",
"!send [address] [value]"
]
@bot.event
async def on_command_error(ctx, error):
rep_cmd_err = f"{str(error)}, Available Commands:\n"+" 👈\n".join(democracy_commands)
print_logs("BOT ERROR", str(error))
await ctx.send(rep_cmd_err)
return
@bot.command(name='hi', help='Shows bot commands')
async def bot_greet(ctx, *arg):
print_logs(*arg)
await ctx.send("Available Commands:\n"+" 👈\n".join(democracy_commands))
@bot.command(name='wallet', help='Create or Import wallet')
async def bot_wallet(ctx, *arg):
if str(ctx.channel.type) != "private":
await ctx.send("Use !wallet commands in Private channel only")
return
async with ctx.typing():
cmd = arg[0].strip()
if cmd == "create":
spdr = SpiderDaoInterface()
n_keypair = spdr.create_wallet()
#Set initial balance for Discord bot Testnet users
current_balance = spdr.get_balance(n_keypair.ss58_address)
if current_balance == 0 or current_balance is None:
if not spdr.set_balance(n_keypair.ss58_address):
await ctx.send(f"{ctx.author}: Error Transferring initial balance")
return
balance = spdr.get_balance(n_keypair.ss58_address)
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
wallet_info = f" \
Seed phrase: `{n_keypair.mnemonic}`\n \
Address: `{n_keypair.ss58_address}`\n \
Public Key: `{n_keypair.public_key}`\n \
Private key: `{n_keypair.private_key}`\n \
Current Balance: `{balance}` SPDR`\n"
bot_users[ctx.author] = {
"keypair" : n_keypair
}
await ctx.send(f"Wallet Created:\n{wallet_info}")
elif cmd == "import":
print_logs("AUTHOR", ctx.author)
mnemonic = arg[1].strip()
spdr = None
try:
spdr = SpiderDaoInterface(mnemonic=mnemonic)
except:
await ctx.send(f"Error Importing Wallet")
return
if spdr is None:
await ctx.send(f"Error Importing Wallet")
return
n_keypair = spdr.keypair
current_balance = 0
try:
current_balance = spdr.get_balance(n_keypair.ss58_address)
if current_balance == 0 or current_balance is None:
if not spdr.set_balance(n_keypair.ss58_address):
await ctx.send(f"{ctx.author}: Error Transferring initial balance")
return
except:
await ctx.send(f"Wallet not found")
return
balance = spdr.get_balance(n_keypair.ss58_address)
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
wallet_info = f" \
Seed phrase: `{n_keypair.mnemonic}`\n \
Address: `{n_keypair.ss58_address}`\n \
Public Key: `{n_keypair.public_key}`\n \
Private key: `{n_keypair.private_key}`\n \
Current Balance: `{balance}` SPDR`\n"
bot_users[ctx.author] = {
"keypair" : n_keypair
}
await ctx.send(f"Wallet Imported:\n{wallet_info}")
else:
await ctx.send(f"{cmd}: Wrong wallet command, usage !wallet create or !wallet import [seed_phrase]")
return
@bot.command(name='helpall', help='Bot Help')
async def bot_help(ctx, *arg):
print_logs(*arg)
h_m = "modules"
if "" == str(*arg).strip():
h_m = "modules"
else:
h_m = arg[0].strip()
if h_m == "modules":
res = ""
smo = botmods_list
for s in smo:
res = res + "🧩 " + s + "\n"
res = str(res)
res = res[:min(len(res), 300)]
msg_rep = "Available Modules: " + str(len(botcalls.keys())) + " | 👉 !helpmods [Module name] for module calls" + "\n"+"\n"+res
await ctx.send(msg_rep)
if h_m == "calls":
res = chn.get_modules_calls()
res = str(res)
res = res[:min(len(res), 100)]
await ctx.send("Available Commands:\n"+"\n"+res)
if h_m == "call":
mod_id = arg[1]
call_id = arg[2]
res = chn.get_module_call(mod_id, call_id)
res = str(res)
res = res[:min(len(res), 100)]
await ctx.send("Available Commands:\n"+res)
return
@bot.command(name='helpmods', help='Modules usage')
async def bot_help(ctx, *arg):
print_logs(*arg)
if len(arg) != 1:
await ctx.send(f"!helpmods [module_name]")
return
h_m = arg[0].strip()
if h_m not in botcalls:
await ctx.send(f"{h_m} not found 🔧")
return
res = "\n".join(botcalls[h_m])
res = res[:min(len(res), 1900)]
msg_rep = f"Module `🧩{h_m}` Calls:\n\n"+res
await ctx.send(msg_rep)
return
@bot.command(name='send', help='Send balance to other SpiderDAO testnet users')
async def send_balance(ctx, *arg):
if len(arg) != 2:
await ctx.send(f"!send [address] [value], e.g !send 5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL 10")
return
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
address = str(arg[0])
value = str(arg[1])
balance = {}
bot_msg = ""
try:
balance = spdr.send_balance(address, value)
except Exception as e:
print_logs("Balance sending error", e)
await ctx.send(f"!send [address] [value], e.g !send 5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL 10")
return
if "error" in balance:
bot_msg = "Balance sending error: " + str(balance["error"])
await ctx.send(bot_msg)
else:
bot_msg = "Balance Sent Successfully: " + str(balance["success"]) + " Block Hash: " + str(balance["block_hash"])
await ctx.send(bot_msg)
return
@bot.command(name='propose', help='Start a proposal')
async def bot_propose(ctx, *arg):
# !propose [module] [call_id] [args]
# !propose balance transfer [dest_user, value]
if ctx.author not in bot_users:
await ctx.send(f"{ctx.author}: No Wallet found, Create/Import Wallet `!wallet create|import`")
return
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
proposal = {}
preimage_hash = ""
storageFee = ""
async with ctx.typing():
proposal = spdr.pre_propose(arg)
if "error" in proposal:
await ctx.send("1: propose: Something wrong happend!" + str(proposal))
return
preimage_hash = proposal["preimage_hash"]
storageFee = str(proposal["storageFee"])
bot_msg = f"Preimage Hash: `{preimage_hash}`, Proposal Storage Fee: `{storageFee}`"
await ctx.send(bot_msg)
await ctx.send(f"Confirm Submitting Proposal? (y/n)")
msg = await bot.wait_for("message", check=lambda m:m.author == ctx.author and m.channel.id == ctx.channel.id)
rep = str(msg.content).strip().lower()
if rep not in ("y", "yes"):
await ctx.send("🔴 Proposal Cancelled, Bye!")
return
else:
await ctx.send("🟢 Submitting Proposal")
async with ctx.typing():
try:
proposal = spdr.propose(proposal["preimage_hash"])
ev = spdr.substrate.get_events(block_hash=proposal["block_hash"])
block_hash = proposal["block_hash"]
PropIndex = proposal["PropIndex"]
launch_period = proposal["launch_period"]
msg_rep = f"Proposal Submitted:\n \
✍️ Preimage Hash `{preimage_hash}` \n \
🛡️ Block Hash `{block_hash}` \n \
👉 Proposal Index: `{PropIndex}` \n \
🕑 `{launch_period}` "
#Proposals off-chain storage
#spdr.db_set_user(PropIndex, str(ctx.author))
print_logs(f"{ctx.author}: MSG REP PROP", msg_rep)
await ctx.send(msg_rep)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print_logs(exc_type, fname, exc_tb.tb_lineno)
await ctx.send("2: propose: Something wrong happend!" + str(proposal))
return
@bot.command(name='second', help='Second a proposal')
async def bot_second(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
async with ctx.typing():
try:
prop_index = str(arg[0]) ##RECHECK
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
proposal = spdr.second(prop_index)
if "error" in proposal:
await ctx.send("second: error!" + str(proposal["error"]))
return
await ctx.send(f"✅ You `Seconded` Proposal `{prop_index}`")
except Exception as e:
print_logs(f"{ctx.author}: Error", str(e))
await ctx.send("Something wrong happend!, `!second`")
return
@bot.command(name='vote', help='Vote on a Referendum')
async def bot_vote(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
ref_index = -1
vote = ""
try:
ref_index = int(arg[0])
vote = arg[1]
except:
await ctx.send(f"Wrong Input: !vote ref_index [yes|no], i.e !vote 1 yes")
return
async with ctx.typing():
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
vote = spdr.vote(ref_index, vote)
if "error" in vote:
await ctx.send("vote error! " + vote["error"])
return
res_vote = ""
if vote["vote"].lower() == "yes":
res_vote = "✅ " + vote["vote"].capitalize()
else:
res_vote = "❌ " + vote["vote"].capitalize()
bot_msg = f"You Voted `{res_vote}` on Referendum `{ref_index}`"
await ctx.send(bot_msg)
return
@bot.command(name='balance', help='Get balance of current user address or any other chain address')
async def bot_getbalance(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
addr = ""
if len(arg) == 1:
addr = arg[0]
else:
addr = bot_users[ctx.author]["keypair"].ss58_address
async with ctx.typing():
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"]) #RECHECK
balance = spdr.get_balance(addr)
if balance is None:
await ctx.send("balance: Something wrong happend!")
return
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
msg_rep = f"💰 Balance `{balance}` SPDR"
print_logs(f"{ctx.author} {msg_rep}")
await ctx.send(msg_rep)
@bot.command(name='ref', help='Get Referendum info `!ref [ref_index]`')
async def bot_getrefinfo(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
_all = True
ref_idx = ""
ref_msg = "Referendum Not Found"
if len(arg) == 1:
_all = False
ref_idx = str(arg[0])
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"]) #RECHECK
print_logs(f"{ctx.author}: REFINFO", ref_idx, _all)
ret_ref_list = []
async with ctx.typing():
if _all:
ref_list = spdr.get_all_refs()
if len(ref_list) == 0:
await ctx.send(ref_msg)
return
for r in ref_list:
ret_ref_list.append(r["ref_msg"])
ref_msg = "\n\n".join(ret_ref_list)
else:
ref_msg = spdr.get_ref_status(ref_idx)
if ref_msg is None:
ref_msg = f"🎗 Referendum `{ref_idx}` is Not Found"
else:
ref_msg = ref_msg["ref_msg"]
print_logs("REFMSG", ref_msg)
await ctx.send(ref_msg)
@bot.command(name='proposals', help='Get Proposals')
async def bot_getprops(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
props_msg = "Proposal Not Found"
async with ctx.typing():
_all = True
prop_idx = ""
if len(arg) == 1:
_all = False
prop_idx = str(arg[0])
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
print_logs(f"{ctx.author}: PROPINFO", prop_idx, _all)
prop = None
ret_prop_list = []
if _all:
props_list = spdr.get_all_proposals()
if len(props_list) == 0:
await ctx.send(props_msg)
return
for p in props_list:
ret_prop_list.append(p["prop_msg"])
props_msg = "\n\n".join(ret_prop_list)
else:
props_msg = spdr.get_proposal(prop_idx)
if props_msg is not None:
props_msg = props_msg["prop_msg"]
print_logs("Sending prop_msg", ctx.author, props_msg)
await ctx.send(props_msg)
def parse_refstarted(ledx):
ref_dic = {}
for e in ledx:
if e.event_index == "1e01": #Tabled
for ep in list(e.params):
if ep["type"] == "PropIndex":
ref_dic["PropIndex"] = str(ep["value"])
if ep["type"] == "Vec<AccountId>":
ref_dic["pub_key"] = str(ep["value"][0])
if e.event_index == "1e03": #Started
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
if ep["type"] == "VoteThreshold":
ref_dic["VoteThreshold"] = str(ep["value"])
ref_dic["username"] = None
for u in bot_users:
if bot_users[u]["keypair"].public_key == ref_dic["pub_key"]:
ref_dic["username"] = bot_users[u]["keypair"].ss58_address
if ref_dic["username"] is None:
ref_dic["username"] = Keypair(public_key=str(ref_dic["pub_key"]), ss58_format=42).ss58_address
return ref_dic
dup_ref_started = []
loop = asyncio.get_event_loop() #recheck
async def send_referendum_started(ledx, ebh):
global dup_ref_started
dex = parse_refstarted(ledx)
pub_channel = bot.get_channel(pub_channel_id)
ReferendumIndex = str(dex["ReferendumIndex"])
PropIndex = dex["PropIndex"]
username = dex["username"]
msg_rep = f"Referendum `{ReferendumIndex}` Started from Proposal `{PropIndex}` by `{str(username)}` \n \
To Vote call: !vote `{ReferendumIndex}` [yes|no]"
if ReferendumIndex in dup_ref_started:
print_logs("send_referendum_started DUPLICATE", msg_rep)
if len(dup_ref_started) > 10:
dup_ref_started = []
return
dup_ref_started.append(ReferendumIndex) #Event is sent two times for some reason, this removes duplicate event
print_logs("*** send_referendum_started MSG_REP", msg_rep)
loop.create_task(pub_channel.send(msg_rep))
print_logs("REF DEX", dex)
return
def parse_refresult(ledx):
ref_dic = {}
for e in ledx:
if e.event_index == "1e04": #Tabled
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
ref_dic["result"] = "Approved"
if e.event_index == "1e05": #Started
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
ref_dic["result"] = "Not Approved"
return ref_dic
dup_ref_results = []
async def send_referendum_results(ledx, ebh):
global dup_ref_results
dex = parse_refresult(ledx)
pub_channel = bot.get_channel(pub_channel_id)
ReferendumIndex = str(dex["ReferendumIndex"])
result = dex["result"]
msg_rep = f"Referendum `{ReferendumIndex}` Result `{result}`"
if ReferendumIndex in dup_ref_results:
print_logs("send_referendum_results DUPLICATE", msg_rep)
if len(dup_ref_results) > 10:
dup_ref_results = []
return
dup_ref_results.append(ReferendumIndex) #Event is sent two times for some reason, this removes duplicate event
loop.create_task(pub_channel.send(msg_rep))
return
bot.run(TOKEN)
|
<filename>discord_bot/discord_bot.py
import discord
from discord.ext import commands
from substrateinterface import SubstrateInterface, Keypair
import asyncio
import os
import sys
import logging
import sys
sys.path.insert(0, '../src')
from spiderdao import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.setFormatter(formatter)
file_handler = logging.FileHandler('bot_logs.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
def print_logs(*argv):
log_str = ""
for arg in argv:
log_str += " " + str(arg)
logger.info(log_str)
return
botcalls = botcalls_mods
for bc in botcalls:
botcalls[bc] = sorted(botcalls[bc])
botmods_list = sorted(botcalls.keys())
# GLOBALS
TOKEN = os.environ.get('BOT_TOKEN')
pub_channel_id = int(os.environ.get('pub_channel_id'))
#private key of sudo key, used to set testnet tokens for bot users
SUDO_KEY = os.environ.get('SUDO_KEY')
launch_period = os.environ.get('LAUNCH_PERIOD')
bot_users = {}
bot = commands.Bot(command_prefix='!')
pub_channel = bot.get_channel(pub_channel_id)
async def cb_ch(event_index, lev, ebh):
if event_index == "1e01": #'event_id': 'Tabled'
print_logs("CALLED send_referendum_started")
await send_referendum_started(lev, ebh)
if event_index == "1e04" or event_index == "1e05": #'event_id': 'Passed' or 'NotPassed'
print_logs("CALLED send_referendum_results")
await send_referendum_results(lev, ebh)
return
# Chain events listener
chain_evs = None
try:
chain_evs = SpiderDaoChain(chain_events_cb=cb_ch)
except:
print_logs("Couldn't start SpiderDAOChain, check that the chain is running")
sys.exit(0)
print_logs("SpiderDAO BOT START")
democracy_commands = [
"!helpall modules",
"!helpmods [module name]",
"!wallet [create|import [seed phrase]]",
"!propose [module name] [module call] [call parameters]",
"!second [proposal index]",
"!vote [referendum index] [yes|no]",
"!balance [address]",
"!ref [referendum index]",
"!proposals [proposal index]",
"!send [address] [value]"
]
@bot.event
async def on_command_error(ctx, error):
rep_cmd_err = f"{str(error)}, Available Commands:\n"+" 👈\n".join(democracy_commands)
print_logs("BOT ERROR", str(error))
await ctx.send(rep_cmd_err)
return
@bot.command(name='hi', help='Shows bot commands')
async def bot_greet(ctx, *arg):
print_logs(*arg)
await ctx.send("Available Commands:\n"+" 👈\n".join(democracy_commands))
@bot.command(name='wallet', help='Create or Import wallet')
async def bot_wallet(ctx, *arg):
if str(ctx.channel.type) != "private":
await ctx.send("Use !wallet commands in Private channel only")
return
async with ctx.typing():
cmd = arg[0].strip()
if cmd == "create":
spdr = SpiderDaoInterface()
n_keypair = spdr.create_wallet()
#Set initial balance for Discord bot Testnet users
current_balance = spdr.get_balance(n_keypair.ss58_address)
if current_balance == 0 or current_balance is None:
if not spdr.set_balance(n_keypair.ss58_address):
await ctx.send(f"{ctx.author}: Error Transferring initial balance")
return
balance = spdr.get_balance(n_keypair.ss58_address)
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
wallet_info = f" \
Seed phrase: `{n_keypair.mnemonic}`\n \
Address: `{n_keypair.ss58_address}`\n \
Public Key: `{n_keypair.public_key}`\n \
Private key: `{n_keypair.private_key}`\n \
Current Balance: `{balance}` SPDR`\n"
bot_users[ctx.author] = {
"keypair" : n_keypair
}
await ctx.send(f"Wallet Created:\n{wallet_info}")
elif cmd == "import":
print_logs("AUTHOR", ctx.author)
mnemonic = arg[1].strip()
spdr = None
try:
spdr = SpiderDaoInterface(mnemonic=mnemonic)
except:
await ctx.send(f"Error Importing Wallet")
return
if spdr is None:
await ctx.send(f"Error Importing Wallet")
return
n_keypair = spdr.keypair
current_balance = 0
try:
current_balance = spdr.get_balance(n_keypair.ss58_address)
if current_balance == 0 or current_balance is None:
if not spdr.set_balance(n_keypair.ss58_address):
await ctx.send(f"{ctx.author}: Error Transferring initial balance")
return
except:
await ctx.send(f"Wallet not found")
return
balance = spdr.get_balance(n_keypair.ss58_address)
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
wallet_info = f" \
Seed phrase: `{n_keypair.mnemonic}`\n \
Address: `{n_keypair.ss58_address}`\n \
Public Key: `{n_keypair.public_key}`\n \
Private key: `{n_keypair.private_key}`\n \
Current Balance: `{balance}` SPDR`\n"
bot_users[ctx.author] = {
"keypair" : n_keypair
}
await ctx.send(f"Wallet Imported:\n{wallet_info}")
else:
await ctx.send(f"{cmd}: Wrong wallet command, usage !wallet create or !wallet import [seed_phrase]")
return
@bot.command(name='helpall', help='Bot Help')
async def bot_help(ctx, *arg):
print_logs(*arg)
h_m = "modules"
if "" == str(*arg).strip():
h_m = "modules"
else:
h_m = arg[0].strip()
if h_m == "modules":
res = ""
smo = botmods_list
for s in smo:
res = res + "🧩 " + s + "\n"
res = str(res)
res = res[:min(len(res), 300)]
msg_rep = "Available Modules: " + str(len(botcalls.keys())) + " | 👉 !helpmods [Module name] for module calls" + "\n"+"\n"+res
await ctx.send(msg_rep)
if h_m == "calls":
res = chn.get_modules_calls()
res = str(res)
res = res[:min(len(res), 100)]
await ctx.send("Available Commands:\n"+"\n"+res)
if h_m == "call":
mod_id = arg[1]
call_id = arg[2]
res = chn.get_module_call(mod_id, call_id)
res = str(res)
res = res[:min(len(res), 100)]
await ctx.send("Available Commands:\n"+res)
return
@bot.command(name='helpmods', help='Modules usage')
async def bot_help(ctx, *arg):
print_logs(*arg)
if len(arg) != 1:
await ctx.send(f"!helpmods [module_name]")
return
h_m = arg[0].strip()
if h_m not in botcalls:
await ctx.send(f"{h_m} not found 🔧")
return
res = "\n".join(botcalls[h_m])
res = res[:min(len(res), 1900)]
msg_rep = f"Module `🧩{h_m}` Calls:\n\n"+res
await ctx.send(msg_rep)
return
@bot.command(name='send', help='Send balance to other SpiderDAO testnet users')
async def send_balance(ctx, *arg):
if len(arg) != 2:
await ctx.send(f"!send [address] [value], e.g !send 5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL 10")
return
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
address = str(arg[0])
value = str(arg[1])
balance = {}
bot_msg = ""
try:
balance = spdr.send_balance(address, value)
except Exception as e:
print_logs("Balance sending error", e)
await ctx.send(f"!send [address] [value], e.g !send 5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL 10")
return
if "error" in balance:
bot_msg = "Balance sending error: " + str(balance["error"])
await ctx.send(bot_msg)
else:
bot_msg = "Balance Sent Successfully: " + str(balance["success"]) + " Block Hash: " + str(balance["block_hash"])
await ctx.send(bot_msg)
return
@bot.command(name='propose', help='Start a proposal')
async def bot_propose(ctx, *arg):
# !propose [module] [call_id] [args]
# !propose balance transfer [dest_user, value]
if ctx.author not in bot_users:
await ctx.send(f"{ctx.author}: No Wallet found, Create/Import Wallet `!wallet create|import`")
return
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
proposal = {}
preimage_hash = ""
storageFee = ""
async with ctx.typing():
proposal = spdr.pre_propose(arg)
if "error" in proposal:
await ctx.send("1: propose: Something wrong happend!" + str(proposal))
return
preimage_hash = proposal["preimage_hash"]
storageFee = str(proposal["storageFee"])
bot_msg = f"Preimage Hash: `{preimage_hash}`, Proposal Storage Fee: `{storageFee}`"
await ctx.send(bot_msg)
await ctx.send(f"Confirm Submitting Proposal? (y/n)")
msg = await bot.wait_for("message", check=lambda m:m.author == ctx.author and m.channel.id == ctx.channel.id)
rep = str(msg.content).strip().lower()
if rep not in ("y", "yes"):
await ctx.send("🔴 Proposal Cancelled, Bye!")
return
else:
await ctx.send("🟢 Submitting Proposal")
async with ctx.typing():
try:
proposal = spdr.propose(proposal["preimage_hash"])
ev = spdr.substrate.get_events(block_hash=proposal["block_hash"])
block_hash = proposal["block_hash"]
PropIndex = proposal["PropIndex"]
launch_period = proposal["launch_period"]
msg_rep = f"Proposal Submitted:\n \
✍️ Preimage Hash `{preimage_hash}` \n \
🛡️ Block Hash `{block_hash}` \n \
👉 Proposal Index: `{PropIndex}` \n \
🕑 `{launch_period}` "
#Proposals off-chain storage
#spdr.db_set_user(PropIndex, str(ctx.author))
print_logs(f"{ctx.author}: MSG REP PROP", msg_rep)
await ctx.send(msg_rep)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print_logs(exc_type, fname, exc_tb.tb_lineno)
await ctx.send("2: propose: Something wrong happend!" + str(proposal))
return
@bot.command(name='second', help='Second a proposal')
async def bot_second(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
async with ctx.typing():
try:
prop_index = str(arg[0]) ##RECHECK
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
proposal = spdr.second(prop_index)
if "error" in proposal:
await ctx.send("second: error!" + str(proposal["error"]))
return
await ctx.send(f"✅ You `Seconded` Proposal `{prop_index}`")
except Exception as e:
print_logs(f"{ctx.author}: Error", str(e))
await ctx.send("Something wrong happend!, `!second`")
return
@bot.command(name='vote', help='Vote on a Referendum')
async def bot_vote(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
ref_index = -1
vote = ""
try:
ref_index = int(arg[0])
vote = arg[1]
except:
await ctx.send(f"Wrong Input: !vote ref_index [yes|no], i.e !vote 1 yes")
return
async with ctx.typing():
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
vote = spdr.vote(ref_index, vote)
if "error" in vote:
await ctx.send("vote error! " + vote["error"])
return
res_vote = ""
if vote["vote"].lower() == "yes":
res_vote = "✅ " + vote["vote"].capitalize()
else:
res_vote = "❌ " + vote["vote"].capitalize()
bot_msg = f"You Voted `{res_vote}` on Referendum `{ref_index}`"
await ctx.send(bot_msg)
return
@bot.command(name='balance', help='Get balance of current user address or any other chain address')
async def bot_getbalance(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
addr = ""
if len(arg) == 1:
addr = arg[0]
else:
addr = bot_users[ctx.author]["keypair"].ss58_address
async with ctx.typing():
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"]) #RECHECK
balance = spdr.get_balance(addr)
if balance is None:
await ctx.send("balance: Something wrong happend!")
return
balance = str(round(float(balance / spdr.CHAIN_DEC), 4))
msg_rep = f"💰 Balance `{balance}` SPDR"
print_logs(f"{ctx.author} {msg_rep}")
await ctx.send(msg_rep)
@bot.command(name='ref', help='Get Referendum info `!ref [ref_index]`')
async def bot_getrefinfo(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
_all = True
ref_idx = ""
ref_msg = "Referendum Not Found"
if len(arg) == 1:
_all = False
ref_idx = str(arg[0])
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"]) #RECHECK
print_logs(f"{ctx.author}: REFINFO", ref_idx, _all)
ret_ref_list = []
async with ctx.typing():
if _all:
ref_list = spdr.get_all_refs()
if len(ref_list) == 0:
await ctx.send(ref_msg)
return
for r in ref_list:
ret_ref_list.append(r["ref_msg"])
ref_msg = "\n\n".join(ret_ref_list)
else:
ref_msg = spdr.get_ref_status(ref_idx)
if ref_msg is None:
ref_msg = f"🎗 Referendum `{ref_idx}` is Not Found"
else:
ref_msg = ref_msg["ref_msg"]
print_logs("REFMSG", ref_msg)
await ctx.send(ref_msg)
@bot.command(name='proposals', help='Get Proposals')
async def bot_getprops(ctx, *arg):
if ctx.author not in bot_users:
await ctx.send("No Wallet found, Create/Import Wallet `!wallet create|import`")
return
props_msg = "Proposal Not Found"
async with ctx.typing():
_all = True
prop_idx = ""
if len(arg) == 1:
_all = False
prop_idx = str(arg[0])
spdr = SpiderDaoInterface(keypair=bot_users[ctx.author]["keypair"])
print_logs(f"{ctx.author}: PROPINFO", prop_idx, _all)
prop = None
ret_prop_list = []
if _all:
props_list = spdr.get_all_proposals()
if len(props_list) == 0:
await ctx.send(props_msg)
return
for p in props_list:
ret_prop_list.append(p["prop_msg"])
props_msg = "\n\n".join(ret_prop_list)
else:
props_msg = spdr.get_proposal(prop_idx)
if props_msg is not None:
props_msg = props_msg["prop_msg"]
print_logs("Sending prop_msg", ctx.author, props_msg)
await ctx.send(props_msg)
def parse_refstarted(ledx):
ref_dic = {}
for e in ledx:
if e.event_index == "1e01": #Tabled
for ep in list(e.params):
if ep["type"] == "PropIndex":
ref_dic["PropIndex"] = str(ep["value"])
if ep["type"] == "Vec<AccountId>":
ref_dic["pub_key"] = str(ep["value"][0])
if e.event_index == "1e03": #Started
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
if ep["type"] == "VoteThreshold":
ref_dic["VoteThreshold"] = str(ep["value"])
ref_dic["username"] = None
for u in bot_users:
if bot_users[u]["keypair"].public_key == ref_dic["pub_key"]:
ref_dic["username"] = bot_users[u]["keypair"].ss58_address
if ref_dic["username"] is None:
ref_dic["username"] = Keypair(public_key=str(ref_dic["pub_key"]), ss58_format=42).ss58_address
return ref_dic
dup_ref_started = []
loop = asyncio.get_event_loop() #recheck
async def send_referendum_started(ledx, ebh):
global dup_ref_started
dex = parse_refstarted(ledx)
pub_channel = bot.get_channel(pub_channel_id)
ReferendumIndex = str(dex["ReferendumIndex"])
PropIndex = dex["PropIndex"]
username = dex["username"]
msg_rep = f"Referendum `{ReferendumIndex}` Started from Proposal `{PropIndex}` by `{str(username)}` \n \
To Vote call: !vote `{ReferendumIndex}` [yes|no]"
if ReferendumIndex in dup_ref_started:
print_logs("send_referendum_started DUPLICATE", msg_rep)
if len(dup_ref_started) > 10:
dup_ref_started = []
return
dup_ref_started.append(ReferendumIndex) #Event is sent two times for some reason, this removes duplicate event
print_logs("*** send_referendum_started MSG_REP", msg_rep)
loop.create_task(pub_channel.send(msg_rep))
print_logs("REF DEX", dex)
return
def parse_refresult(ledx):
ref_dic = {}
for e in ledx:
if e.event_index == "1e04": #Tabled
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
ref_dic["result"] = "Approved"
if e.event_index == "1e05": #Started
for ep in list(e.params):
if ep["type"] == "ReferendumIndex":
ref_dic["ReferendumIndex"] = str(ep["value"])
ref_dic["result"] = "Not Approved"
return ref_dic
dup_ref_results = []
async def send_referendum_results(ledx, ebh):
global dup_ref_results
dex = parse_refresult(ledx)
pub_channel = bot.get_channel(pub_channel_id)
ReferendumIndex = str(dex["ReferendumIndex"])
result = dex["result"]
msg_rep = f"Referendum `{ReferendumIndex}` Result `{result}`"
if ReferendumIndex in dup_ref_results:
print_logs("send_referendum_results DUPLICATE", msg_rep)
if len(dup_ref_results) > 10:
dup_ref_results = []
return
dup_ref_results.append(ReferendumIndex) #Event is sent two times for some reason, this removes duplicate event
loop.create_task(pub_channel.send(msg_rep))
return
bot.run(TOKEN)
|
en
| 0.630622
|
# GLOBALS #private key of sudo key, used to set testnet tokens for bot users #'event_id': 'Tabled' #'event_id': 'Passed' or 'NotPassed' # Chain events listener #Set initial balance for Discord bot Testnet users # !propose [module] [call_id] [args] # !propose balance transfer [dest_user, value] #Proposals off-chain storage #spdr.db_set_user(PropIndex, str(ctx.author)) ##RECHECK #RECHECK #RECHECK #Tabled #Started #recheck #Event is sent two times for some reason, this removes duplicate event #Tabled #Started #Event is sent two times for some reason, this removes duplicate event
| 2.359585
| 2
|
2018/05/test.py
|
lfrommelt/monty
| 0
|
6627188
|
import re
match = re.match(r"(.*) ((a+)(b+))", "1234 aaabbb")
print(match.groups())
# import binary # file from last slide
# for b in ["0b01010", "01b110", "0b110", "0b", "0b010b", "0bb100", "0b1", "1b001"]:
# print('{} : {}'.format(b, binary.fsa(b)))
|
import re
match = re.match(r"(.*) ((a+)(b+))", "1234 aaabbb")
print(match.groups())
# import binary # file from last slide
# for b in ["0b01010", "01b110", "0b110", "0b", "0b010b", "0bb100", "0b1", "1b001"]:
# print('{} : {}'.format(b, binary.fsa(b)))
|
en
| 0.560899
|
# import binary # file from last slide # for b in ["0b01010", "01b110", "0b110", "0b", "0b010b", "0bb100", "0b1", "1b001"]: # print('{} : {}'.format(b, binary.fsa(b)))
| 3.1525
| 3
|
covid19/stats/wrappers.py
|
FlyingBird95/covid-19
| 0
|
6627189
|
from functools import wraps
from flask import flash, redirect, url_for
from service.data.models import Location
def with_location(func):
@wraps(func)
def wrapper(*args, **kwargs):
if 'location_id' in kwargs:
location = Location.query.get(kwargs.pop('location_id'))
if location is not None:
return func(*args, location=location, **kwargs)
flash('Location cannot be found', "info")
return redirect(url_for('stats.overview'))
raise ValueError("Could not find mandatory keyword argument 'location_id'.")
return wrapper
def with_china(func):
@wraps(func)
def wrapper(*args, **kwargs):
china = Location.get_china()
return func(*args, china=china, **kwargs)
return wrapper
|
from functools import wraps
from flask import flash, redirect, url_for
from service.data.models import Location
def with_location(func):
@wraps(func)
def wrapper(*args, **kwargs):
if 'location_id' in kwargs:
location = Location.query.get(kwargs.pop('location_id'))
if location is not None:
return func(*args, location=location, **kwargs)
flash('Location cannot be found', "info")
return redirect(url_for('stats.overview'))
raise ValueError("Could not find mandatory keyword argument 'location_id'.")
return wrapper
def with_china(func):
@wraps(func)
def wrapper(*args, **kwargs):
china = Location.get_china()
return func(*args, china=china, **kwargs)
return wrapper
|
none
| 1
| 2.660509
| 3
|
|
train.py
|
Cousin-Zan/Semantic-Segmentation-for-Steel-Strip-Surface-Defect-Detection
| 0
|
6627190
|
"""
The file defines the training process.
@Author: <NAME>
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from utils.data_generator import ImageDataGenerator
from utils.helpers import get_dataset_info, check_related_path
from utils.callbacks import LearningRateScheduler
from utils.optimizers import *
from utils.losses import *
from utils.learning_rate import *
from utils.metrics import MeanIoU
from utils import utils
from builders import builder
import tensorflow as tf
import argparse
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Choose the semantic segmentation methods.', type=str, required=True)
parser.add_argument('--base_model', help='Choose the backbone model.', type=str, default=None)
parser.add_argument('--dataset', help='The path of the dataset.', type=str, default='SD')
parser.add_argument('--loss', help='The loss function for traing.', type=str, default=None,
choices=['ce', 'dice_ce', 'focal_loss', 'miou_loss', 'self_balanced_focal_loss', 'ssim_loss','mix_loss'])
parser.add_argument('--num_classes', help='The number of classes to be segmented.', type=int, default=32)
parser.add_argument('--random_crop', help='Whether to randomly crop the image.', type=str2bool, default=False)
parser.add_argument('--crop_height', help='The height to crop the image.', type=int, default=256)
parser.add_argument('--crop_width', help='The width to crop the image.', type=int, default=256)
parser.add_argument('--batch_size', help='The training batch size.', type=int, default=5)
parser.add_argument('--valid_batch_size', help='The validation batch size.', type=int, default=1)
parser.add_argument('--num_epochs', help='The number of epochs to train for.', type=int, default=100)
parser.add_argument('--initial_epoch', help='The initial epoch of training.', type=int, default=0)
parser.add_argument('--h_flip', help='Whether to randomly flip the image horizontally.', type=str2bool, default=False)
parser.add_argument('--v_flip', help='Whether to randomly flip the image vertically.', type=str2bool, default=False)
parser.add_argument('--brightness', help='Randomly change the brightness (list).', type=float, default=None, nargs='+')
parser.add_argument('--rotation', help='The angle to randomly rotate the image.', type=float, default=0.)
parser.add_argument('--zoom_range', help='The times for zooming the image.', type=float, default=0., nargs='+')
parser.add_argument('--channel_shift', help='The channel shift range.', type=float, default=0.)
parser.add_argument('--data_aug_rate', help='The rate of data augmentation.', type=float, default=0.)
parser.add_argument('--checkpoint_freq', help='How often to save a checkpoint.', type=int, default=1)
parser.add_argument('--validation_freq', help='How often to perform validation.', type=int, default=1)
parser.add_argument('--num_valid_images', help='The number of images used for validation.', type=int, default=20)
parser.add_argument('--data_shuffle', help='Whether to shuffle the data.', type=str2bool, default=True)
parser.add_argument('--random_seed', help='The random shuffle seed.', type=int, default=None)
parser.add_argument('--weights', help='The path of weights to be loaded.', type=str, default=None)
parser.add_argument('--steps_per_epoch', help='The training steps of each epoch', type=int, default=None)
parser.add_argument('--lr_scheduler', help='The strategy to schedule learning rate.', type=str, default='cosine_decay',
choices=['step_decay', 'poly_decay', 'cosine_decay'])
parser.add_argument('--lr_warmup', help='Whether to use lr warm up.', type=bool, default=False)
parser.add_argument('--learning_rate', help='The initial learning rate.', type=float, default=3e-4)
parser.add_argument('--optimizer', help='The optimizer for training.', type=str, default='adam',
choices=['sgd', 'adam', 'nadam', 'adamw', 'nadamw', 'sgdw'])
args = parser.parse_args()
# check related paths
paths = check_related_path(os.getcwd())
# get image and label file names for training and validation
train_image_names, train_label_names, valid_image_names, valid_label_names, _, _ = get_dataset_info(args.dataset)
# build the model
net, base_model = builder(args.num_classes, (args.crop_height, args.crop_width), args.model, args.base_model)
# summary
net.summary()
# load weights
if args.weights is not None:
print('Loading the weights...')
net.load_weights(args.weights)
# chose loss
losses = {'ce': categorical_crossentropy_with_logits,
'dice_ce': dice_and_categorical_crossentropy_with_logits,
'focal_loss': focal_loss(),
'miou_loss': miou_loss,
'self_balanced_focal_loss': self_balanced_focal_loss(),
'ssim_loss': ssim_loss,
'mix_loss': mix_loss}
loss = losses[args.loss] if args.loss is not None else categorical_crossentropy_with_logits
# chose optimizer
total_iterations = len(train_image_names) * args.num_epochs // args.batch_size
wd_dict = utils.get_weight_decays(net)
ordered_values = []
weight_decays = utils.fill_dict_in_order(wd_dict, ordered_values)
optimizers = {'adam': tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
'nadam': tf.keras.optimizers.Nadam(learning_rate=args.learning_rate),
'sgd': tf.keras.optimizers.SGD(learning_rate=args.learning_rate, momentum=0.99),
'adamw': AdamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'nadamw': NadamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'sgdw': SGDW(learning_rate=args.learning_rate, momentum=0.99, batch_size=args.batch_size,
total_iterations=total_iterations)}
# lr schedule strategy
if args.lr_warmup and args.num_epochs - 5 <= 0:
raise ValueError('num_epochs must be larger than 5 if lr warm up is used.')
lr_decays = {'step_decay': step_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'poly_decay': poly_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'cosine_decay': cosine_decay(args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
args.learning_rate, warmup=args.lr_warmup)}
lr_decay = lr_decays[args.lr_scheduler]
# training and validation steps
steps_per_epoch = len(train_image_names) // args.batch_size if not args.steps_per_epoch else args.steps_per_epoch
validation_steps = args.num_valid_images // args.valid_batch_size
# compile the model
if args.model == 'CFNET':
loss = {'re_lu_16':mix_loss,'re_lu_27':mix_loss}
net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
loss=loss,
metrics=[MeanIoU(args.num_classes)])
# data generator
# data augmentation setting
train_gen = ImageDataGenerator(random_crop=args.random_crop,
rotation_range=args.rotation,
brightness_range=args.brightness,
zoom_range=args.zoom_range,
channel_shift_range=args.channel_shift,
horizontal_flip=args.v_flip,
vertical_flip=args.v_flip)
valid_gen = ImageDataGenerator()
train_generator = train_gen.flow(images_list=train_image_names,
labels_list=train_label_names,
num_classes=args.num_classes,
batch_size=args.batch_size,
target_size=(args.crop_height, args.crop_width),
shuffle=args.data_shuffle,
seed=args.random_seed,
data_aug_rate=args.data_aug_rate)
valid_generator = valid_gen.flow(images_list=valid_image_names,
labels_list=valid_label_names,
num_classes=args.num_classes,
batch_size=args.valid_batch_size,
target_size=(args.crop_height, args.crop_width))
# callbacks setting
# checkpoint setting
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(paths['checkpoints_path'],
'{model}_based_on_{base}_'.format(model=args.model, base=base_model) +
'miou_{val_re_lu_27_mean_io_u:04f}_' + 'ep_{epoch:02d}.h5'),
save_best_only=True, period=args.checkpoint_freq, monitor='val_re_lu_27_mean_io_u', mode='max')
# tensorboard setting
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=paths['logs_path'])
# learning rate scheduler setting
learning_rate_scheduler = LearningRateScheduler(lr_decay, args.learning_rate, args.lr_warmup, steps_per_epoch,
verbose=1)
callbacks = [model_checkpoint, tensorboard, learning_rate_scheduler]
# begin training
print("\n***** Begin training *****")
print("Dataset -->", args.dataset)
print("Num Images -->", len(train_image_names))
print("Model -->", args.model)
print("Base Model -->", base_model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Initial Epoch -->", args.initial_epoch)
print("Batch Size -->", args.batch_size)
print("Num Classes -->", args.num_classes)
print("Data Augmentation:")
print("\tData Augmentation Rate -->", args.data_aug_rate)
print("\tVertical Flip -->", args.v_flip)
print("\tHorizontal Flip -->", args.h_flip)
print("\tBrightness Alteration -->", args.brightness)
print("\tRotation -->", args.rotation)
print("\tZoom -->", args.zoom_range)
print("\tChannel Shift -->", args.channel_shift)
print("")
# training...
net.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=args.num_epochs,
callbacks=callbacks,
validation_data=valid_generator,
validation_steps=validation_steps,
validation_freq=args.validation_freq,
max_queue_size=10,
workers=os.cpu_count(),
use_multiprocessing=False,
initial_epoch=args.initial_epoch)
# save weights
net.save(filepath=os.path.join(
paths['weights_path'], '{model}_based_on_{base_model}.h5'.format(model=args.model, base_model=base_model)))
|
"""
The file defines the training process.
@Author: <NAME>
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from utils.data_generator import ImageDataGenerator
from utils.helpers import get_dataset_info, check_related_path
from utils.callbacks import LearningRateScheduler
from utils.optimizers import *
from utils.losses import *
from utils.learning_rate import *
from utils.metrics import MeanIoU
from utils import utils
from builders import builder
import tensorflow as tf
import argparse
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Choose the semantic segmentation methods.', type=str, required=True)
parser.add_argument('--base_model', help='Choose the backbone model.', type=str, default=None)
parser.add_argument('--dataset', help='The path of the dataset.', type=str, default='SD')
parser.add_argument('--loss', help='The loss function for traing.', type=str, default=None,
choices=['ce', 'dice_ce', 'focal_loss', 'miou_loss', 'self_balanced_focal_loss', 'ssim_loss','mix_loss'])
parser.add_argument('--num_classes', help='The number of classes to be segmented.', type=int, default=32)
parser.add_argument('--random_crop', help='Whether to randomly crop the image.', type=str2bool, default=False)
parser.add_argument('--crop_height', help='The height to crop the image.', type=int, default=256)
parser.add_argument('--crop_width', help='The width to crop the image.', type=int, default=256)
parser.add_argument('--batch_size', help='The training batch size.', type=int, default=5)
parser.add_argument('--valid_batch_size', help='The validation batch size.', type=int, default=1)
parser.add_argument('--num_epochs', help='The number of epochs to train for.', type=int, default=100)
parser.add_argument('--initial_epoch', help='The initial epoch of training.', type=int, default=0)
parser.add_argument('--h_flip', help='Whether to randomly flip the image horizontally.', type=str2bool, default=False)
parser.add_argument('--v_flip', help='Whether to randomly flip the image vertically.', type=str2bool, default=False)
parser.add_argument('--brightness', help='Randomly change the brightness (list).', type=float, default=None, nargs='+')
parser.add_argument('--rotation', help='The angle to randomly rotate the image.', type=float, default=0.)
parser.add_argument('--zoom_range', help='The times for zooming the image.', type=float, default=0., nargs='+')
parser.add_argument('--channel_shift', help='The channel shift range.', type=float, default=0.)
parser.add_argument('--data_aug_rate', help='The rate of data augmentation.', type=float, default=0.)
parser.add_argument('--checkpoint_freq', help='How often to save a checkpoint.', type=int, default=1)
parser.add_argument('--validation_freq', help='How often to perform validation.', type=int, default=1)
parser.add_argument('--num_valid_images', help='The number of images used for validation.', type=int, default=20)
parser.add_argument('--data_shuffle', help='Whether to shuffle the data.', type=str2bool, default=True)
parser.add_argument('--random_seed', help='The random shuffle seed.', type=int, default=None)
parser.add_argument('--weights', help='The path of weights to be loaded.', type=str, default=None)
parser.add_argument('--steps_per_epoch', help='The training steps of each epoch', type=int, default=None)
parser.add_argument('--lr_scheduler', help='The strategy to schedule learning rate.', type=str, default='cosine_decay',
choices=['step_decay', 'poly_decay', 'cosine_decay'])
parser.add_argument('--lr_warmup', help='Whether to use lr warm up.', type=bool, default=False)
parser.add_argument('--learning_rate', help='The initial learning rate.', type=float, default=3e-4)
parser.add_argument('--optimizer', help='The optimizer for training.', type=str, default='adam',
choices=['sgd', 'adam', 'nadam', 'adamw', 'nadamw', 'sgdw'])
args = parser.parse_args()
# check related paths
paths = check_related_path(os.getcwd())
# get image and label file names for training and validation
train_image_names, train_label_names, valid_image_names, valid_label_names, _, _ = get_dataset_info(args.dataset)
# build the model
net, base_model = builder(args.num_classes, (args.crop_height, args.crop_width), args.model, args.base_model)
# summary
net.summary()
# load weights
if args.weights is not None:
print('Loading the weights...')
net.load_weights(args.weights)
# chose loss
losses = {'ce': categorical_crossentropy_with_logits,
'dice_ce': dice_and_categorical_crossentropy_with_logits,
'focal_loss': focal_loss(),
'miou_loss': miou_loss,
'self_balanced_focal_loss': self_balanced_focal_loss(),
'ssim_loss': ssim_loss,
'mix_loss': mix_loss}
loss = losses[args.loss] if args.loss is not None else categorical_crossentropy_with_logits
# chose optimizer
total_iterations = len(train_image_names) * args.num_epochs // args.batch_size
wd_dict = utils.get_weight_decays(net)
ordered_values = []
weight_decays = utils.fill_dict_in_order(wd_dict, ordered_values)
optimizers = {'adam': tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
'nadam': tf.keras.optimizers.Nadam(learning_rate=args.learning_rate),
'sgd': tf.keras.optimizers.SGD(learning_rate=args.learning_rate, momentum=0.99),
'adamw': AdamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'nadamw': NadamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'sgdw': SGDW(learning_rate=args.learning_rate, momentum=0.99, batch_size=args.batch_size,
total_iterations=total_iterations)}
# lr schedule strategy
if args.lr_warmup and args.num_epochs - 5 <= 0:
raise ValueError('num_epochs must be larger than 5 if lr warm up is used.')
lr_decays = {'step_decay': step_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'poly_decay': poly_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'cosine_decay': cosine_decay(args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
args.learning_rate, warmup=args.lr_warmup)}
lr_decay = lr_decays[args.lr_scheduler]
# training and validation steps
steps_per_epoch = len(train_image_names) // args.batch_size if not args.steps_per_epoch else args.steps_per_epoch
validation_steps = args.num_valid_images // args.valid_batch_size
# compile the model
if args.model == 'CFNET':
loss = {'re_lu_16':mix_loss,'re_lu_27':mix_loss}
net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
loss=loss,
metrics=[MeanIoU(args.num_classes)])
# data generator
# data augmentation setting
train_gen = ImageDataGenerator(random_crop=args.random_crop,
rotation_range=args.rotation,
brightness_range=args.brightness,
zoom_range=args.zoom_range,
channel_shift_range=args.channel_shift,
horizontal_flip=args.v_flip,
vertical_flip=args.v_flip)
valid_gen = ImageDataGenerator()
train_generator = train_gen.flow(images_list=train_image_names,
labels_list=train_label_names,
num_classes=args.num_classes,
batch_size=args.batch_size,
target_size=(args.crop_height, args.crop_width),
shuffle=args.data_shuffle,
seed=args.random_seed,
data_aug_rate=args.data_aug_rate)
valid_generator = valid_gen.flow(images_list=valid_image_names,
labels_list=valid_label_names,
num_classes=args.num_classes,
batch_size=args.valid_batch_size,
target_size=(args.crop_height, args.crop_width))
# callbacks setting
# checkpoint setting
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(paths['checkpoints_path'],
'{model}_based_on_{base}_'.format(model=args.model, base=base_model) +
'miou_{val_re_lu_27_mean_io_u:04f}_' + 'ep_{epoch:02d}.h5'),
save_best_only=True, period=args.checkpoint_freq, monitor='val_re_lu_27_mean_io_u', mode='max')
# tensorboard setting
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=paths['logs_path'])
# learning rate scheduler setting
learning_rate_scheduler = LearningRateScheduler(lr_decay, args.learning_rate, args.lr_warmup, steps_per_epoch,
verbose=1)
callbacks = [model_checkpoint, tensorboard, learning_rate_scheduler]
# begin training
print("\n***** Begin training *****")
print("Dataset -->", args.dataset)
print("Num Images -->", len(train_image_names))
print("Model -->", args.model)
print("Base Model -->", base_model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Initial Epoch -->", args.initial_epoch)
print("Batch Size -->", args.batch_size)
print("Num Classes -->", args.num_classes)
print("Data Augmentation:")
print("\tData Augmentation Rate -->", args.data_aug_rate)
print("\tVertical Flip -->", args.v_flip)
print("\tHorizontal Flip -->", args.h_flip)
print("\tBrightness Alteration -->", args.brightness)
print("\tRotation -->", args.rotation)
print("\tZoom -->", args.zoom_range)
print("\tChannel Shift -->", args.channel_shift)
print("")
# training...
net.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=args.num_epochs,
callbacks=callbacks,
validation_data=valid_generator,
validation_steps=validation_steps,
validation_freq=args.validation_freq,
max_queue_size=10,
workers=os.cpu_count(),
use_multiprocessing=False,
initial_epoch=args.initial_epoch)
# save weights
net.save(filepath=os.path.join(
paths['weights_path'], '{model}_based_on_{base_model}.h5'.format(model=args.model, base_model=base_model)))
|
en
| 0.785919
|
The file defines the training process. @Author: <NAME> @Github: https://github.com/luyanger1799 @Project: https://github.com/luyanger1799/amazing-semantic-segmentation # check related paths # get image and label file names for training and validation # build the model # summary # load weights # chose loss # chose optimizer # lr schedule strategy # training and validation steps # compile the model # data generator # data augmentation setting # callbacks setting # checkpoint setting # tensorboard setting # learning rate scheduler setting # begin training # training... # save weights
| 2.361405
| 2
|
contrib/gp_replica_check/gp_replica_check.py
|
rodel-talampas/gpdb
| 9
|
6627191
|
<gh_stars>1-10
#! /usr/bin/env python
'''
Tool to validate replication consistency between primary and mirror.
============================= DISCLAIMER =============================
This gp_replica_check tool is for 6.0+ development testing replication
consistency between pairs of primary and mirror segments. Currently
the tool is only supported on a single node cluster. The tool creates
a new extension called gp_replica_check and then invokes the extension
on all the primary segments in Utility mode. The extension will do md5
checksum validation on each block of the relation and report a
mismatch for the first inconsistent block. Each block read from disk
will utilize the internal masking functions to ensure that false
mismatches are not reported such as header or hint-bit mismatches. The
user is able to specify what relation types and databases they would
like to validate or it defaults to all.
======================================================================
Note:
For global directory checking, -d flag must be all or template1 at the
moment due to the filespace mapping query we execute.
Usage:
gp_replica_check.py
gp_replica_check.py -d "mydb1,mydb2,..."
gp_replica_check.py -r "heap,ao,btree,..."
gp_replica_check.py -d "mydb1,mydb2,..." -r "hash,bitmap,gist,..."
'''
import argparse
import sys
import subprocess
import threading
import Queue
class ReplicaCheck(threading.Thread):
def __init__(self, segrow, datname, relation_types):
super(ReplicaCheck, self).__init__()
self.host = segrow[0]
self.port = segrow[1]
self.content = segrow[2]
self.primary_status = segrow[3]
self.ploc = segrow[4]
self.mloc = segrow[5]
self.datname = datname
self.relation_types = relation_types;
self.result = False
def __str__(self):
return 'Host: %s, Port: %s, Database: %s\n\
Primary Data Directory Location: %s\n\
Mirror Data Directory Location: %s' % (self.host, self.port, self.datname,
self.ploc, self.mloc)
def run(self):
print(self)
cmd = '''PGOPTIONS='-c gp_session_role=utility' psql -h %s -p %s -c "select * from gp_replica_check('%s', '%s', '%s')" %s''' % (self.host, self.port,
self.ploc, self.mloc,
self.relation_types,
self.datname)
if self.primary_status.strip() == 'd':
print "Primary segment for content %d is down" % self.content
else:
try:
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print res
self.result = True if res.strip().split('\n')[-2].strip() == 't' else False
except subprocess.CalledProcessError, e:
print 'returncode: (%s), cmd: (%s), output: (%s)' % (e.returncode, e.cmd, e.output)
def install_extension(databases):
get_datname_sql = ''' SELECT datname FROM pg_database WHERE datname != 'template0' '''
create_ext_sql = ''' CREATE EXTENSION IF NOT EXISTS gp_replica_check '''
database_list = map(str.strip, databases.split(','))
print "Creating gp_replica_check extension on databases if needed:"
datnames = subprocess.check_output('psql postgres -t -c "%s"' % get_datname_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
for datname in datnames:
if len(datname) > 1 and (datname.strip() in database_list or 'all' in database_list):
print subprocess.check_output('psql %s -t -c "%s"' % (datname.strip(), create_ext_sql), stderr=subprocess.STDOUT, shell=True)
# Get the primary and mirror servers, for each content ID.
def get_segments():
seglist_sql = '''
SELECT gscp.address, gscp.port, gscp.content, gscp.status, gscp.datadir as p_datadir, gscm.datadir as m_datadir
FROM gp_segment_configuration gscp,
gp_segment_configuration gscm
WHERE gscp.content = gscm.content
AND gscp.role = 'p'
AND gscm.role = 'm'
'''
seglist = subprocess.check_output('psql postgres -t -c "%s"' % seglist_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
segmap = {}
for segrow in seglist:
segelements = map(str.strip, segrow.split('|'))
if len(segelements) > 1:
segmap.setdefault(segelements[2], []).append(segelements)
return segmap
# Get list of database from pg_database.
#
# The argument is a list of "allowed" database. The returned list will be
# filtered to contain only database from that list. If it includes 'all',
# then all databases are returned. (template0 is left out in any case)
def get_databases(databases):
dblist_sql = '''
SELECT datname FROM pg_catalog.pg_database WHERE datname != 'template0'
'''
database_list = map(str.strip, databases.split(','))
dbrawlist = subprocess.check_output('psql postgres -t -c "%s"' % dblist_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
dblist = []
for dbrow in dbrawlist:
dbname = dbrow.strip()
if dbname != '':
if dbname in database_list or 'all' in database_list:
dblist.append(dbname)
return dblist
def start_verification(segmap, dblist, relation_types):
replica_check_list = []
for content, seglist in segmap.items():
for segrow in seglist:
for dbname in dblist:
replica_check = ReplicaCheck(segrow, dbname, relation_types)
replica_check_list.append(replica_check)
replica_check.start()
replica_check.join()
for replica_check in replica_check_list:
if not replica_check.result:
print "replica check failed"
sys.exit(1)
print "replica check succeeded"
def defargs():
parser = argparse.ArgumentParser(description='Run replication check on all segments')
parser.add_argument('--databases', '-d', type=str, required=False, default='all',
help='Database names to run replication check on')
parser.add_argument('--relation-types', '-r', type=str, required=False, default='all',
help='Relation types to run replication check on')
return parser.parse_args()
if __name__ == '__main__':
args = defargs()
install_extension(args.databases)
start_verification(get_segments(), get_databases(args.databases), args.relation_types)
|
#! /usr/bin/env python
'''
Tool to validate replication consistency between primary and mirror.
============================= DISCLAIMER =============================
This gp_replica_check tool is for 6.0+ development testing replication
consistency between pairs of primary and mirror segments. Currently
the tool is only supported on a single node cluster. The tool creates
a new extension called gp_replica_check and then invokes the extension
on all the primary segments in Utility mode. The extension will do md5
checksum validation on each block of the relation and report a
mismatch for the first inconsistent block. Each block read from disk
will utilize the internal masking functions to ensure that false
mismatches are not reported such as header or hint-bit mismatches. The
user is able to specify what relation types and databases they would
like to validate or it defaults to all.
======================================================================
Note:
For global directory checking, -d flag must be all or template1 at the
moment due to the filespace mapping query we execute.
Usage:
gp_replica_check.py
gp_replica_check.py -d "mydb1,mydb2,..."
gp_replica_check.py -r "heap,ao,btree,..."
gp_replica_check.py -d "mydb1,mydb2,..." -r "hash,bitmap,gist,..."
'''
import argparse
import sys
import subprocess
import threading
import Queue
class ReplicaCheck(threading.Thread):
def __init__(self, segrow, datname, relation_types):
super(ReplicaCheck, self).__init__()
self.host = segrow[0]
self.port = segrow[1]
self.content = segrow[2]
self.primary_status = segrow[3]
self.ploc = segrow[4]
self.mloc = segrow[5]
self.datname = datname
self.relation_types = relation_types;
self.result = False
def __str__(self):
return 'Host: %s, Port: %s, Database: %s\n\
Primary Data Directory Location: %s\n\
Mirror Data Directory Location: %s' % (self.host, self.port, self.datname,
self.ploc, self.mloc)
def run(self):
print(self)
cmd = '''PGOPTIONS='-c gp_session_role=utility' psql -h %s -p %s -c "select * from gp_replica_check('%s', '%s', '%s')" %s''' % (self.host, self.port,
self.ploc, self.mloc,
self.relation_types,
self.datname)
if self.primary_status.strip() == 'd':
print "Primary segment for content %d is down" % self.content
else:
try:
res = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print res
self.result = True if res.strip().split('\n')[-2].strip() == 't' else False
except subprocess.CalledProcessError, e:
print 'returncode: (%s), cmd: (%s), output: (%s)' % (e.returncode, e.cmd, e.output)
def install_extension(databases):
get_datname_sql = ''' SELECT datname FROM pg_database WHERE datname != 'template0' '''
create_ext_sql = ''' CREATE EXTENSION IF NOT EXISTS gp_replica_check '''
database_list = map(str.strip, databases.split(','))
print "Creating gp_replica_check extension on databases if needed:"
datnames = subprocess.check_output('psql postgres -t -c "%s"' % get_datname_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
for datname in datnames:
if len(datname) > 1 and (datname.strip() in database_list or 'all' in database_list):
print subprocess.check_output('psql %s -t -c "%s"' % (datname.strip(), create_ext_sql), stderr=subprocess.STDOUT, shell=True)
# Get the primary and mirror servers, for each content ID.
def get_segments():
seglist_sql = '''
SELECT gscp.address, gscp.port, gscp.content, gscp.status, gscp.datadir as p_datadir, gscm.datadir as m_datadir
FROM gp_segment_configuration gscp,
gp_segment_configuration gscm
WHERE gscp.content = gscm.content
AND gscp.role = 'p'
AND gscm.role = 'm'
'''
seglist = subprocess.check_output('psql postgres -t -c "%s"' % seglist_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
segmap = {}
for segrow in seglist:
segelements = map(str.strip, segrow.split('|'))
if len(segelements) > 1:
segmap.setdefault(segelements[2], []).append(segelements)
return segmap
# Get list of database from pg_database.
#
# The argument is a list of "allowed" database. The returned list will be
# filtered to contain only database from that list. If it includes 'all',
# then all databases are returned. (template0 is left out in any case)
def get_databases(databases):
dblist_sql = '''
SELECT datname FROM pg_catalog.pg_database WHERE datname != 'template0'
'''
database_list = map(str.strip, databases.split(','))
dbrawlist = subprocess.check_output('psql postgres -t -c "%s"' % dblist_sql, stderr=subprocess.STDOUT, shell=True).split('\n')
dblist = []
for dbrow in dbrawlist:
dbname = dbrow.strip()
if dbname != '':
if dbname in database_list or 'all' in database_list:
dblist.append(dbname)
return dblist
def start_verification(segmap, dblist, relation_types):
replica_check_list = []
for content, seglist in segmap.items():
for segrow in seglist:
for dbname in dblist:
replica_check = ReplicaCheck(segrow, dbname, relation_types)
replica_check_list.append(replica_check)
replica_check.start()
replica_check.join()
for replica_check in replica_check_list:
if not replica_check.result:
print "replica check failed"
sys.exit(1)
print "replica check succeeded"
def defargs():
parser = argparse.ArgumentParser(description='Run replication check on all segments')
parser.add_argument('--databases', '-d', type=str, required=False, default='all',
help='Database names to run replication check on')
parser.add_argument('--relation-types', '-r', type=str, required=False, default='all',
help='Relation types to run replication check on')
return parser.parse_args()
if __name__ == '__main__':
args = defargs()
install_extension(args.databases)
start_verification(get_segments(), get_databases(args.databases), args.relation_types)
|
en
| 0.672858
|
#! /usr/bin/env python Tool to validate replication consistency between primary and mirror. ============================= DISCLAIMER ============================= This gp_replica_check tool is for 6.0+ development testing replication consistency between pairs of primary and mirror segments. Currently the tool is only supported on a single node cluster. The tool creates a new extension called gp_replica_check and then invokes the extension on all the primary segments in Utility mode. The extension will do md5 checksum validation on each block of the relation and report a mismatch for the first inconsistent block. Each block read from disk will utilize the internal masking functions to ensure that false mismatches are not reported such as header or hint-bit mismatches. The user is able to specify what relation types and databases they would like to validate or it defaults to all. ====================================================================== Note: For global directory checking, -d flag must be all or template1 at the moment due to the filespace mapping query we execute. Usage: gp_replica_check.py gp_replica_check.py -d "mydb1,mydb2,..." gp_replica_check.py -r "heap,ao,btree,..." gp_replica_check.py -d "mydb1,mydb2,..." -r "hash,bitmap,gist,..." PGOPTIONS='-c gp_session_role=utility' psql -h %s -p %s -c "select * from gp_replica_check('%s', '%s', '%s')" %s SELECT datname FROM pg_database WHERE datname != 'template0' CREATE EXTENSION IF NOT EXISTS gp_replica_check # Get the primary and mirror servers, for each content ID. SELECT gscp.address, gscp.port, gscp.content, gscp.status, gscp.datadir as p_datadir, gscm.datadir as m_datadir FROM gp_segment_configuration gscp, gp_segment_configuration gscm WHERE gscp.content = gscm.content AND gscp.role = 'p' AND gscm.role = 'm' # Get list of database from pg_database. # # The argument is a list of "allowed" database. The returned list will be # filtered to contain only database from that list. If it includes 'all', # then all databases are returned. (template0 is left out in any case) SELECT datname FROM pg_catalog.pg_database WHERE datname != 'template0'
| 2.242115
| 2
|
amazon-alexa-reviews-nlp/code.py
|
I-Tingya/ga-learner-dsmp-repo
| 0
|
6627192
|
# --------------
# import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
import warnings
warnings.filterwarnings("ignore")
# Load the dataset
df = pd.read_csv(path,sep='\t')
print(df.shape)
# Converting date attribute from string to datetime.date datatype
print(df['date'].dtype)
df['date'] = df['date'].astype(datetime.date)
# calculate the total length of word
df['length'] = df['verified_reviews'].str.len()
# --------------
## Rating vs feedback
# set figure size
plt.figure(figsize=(10,6))
# generate countplot
sns.countplot(x='rating',hue='feedback',data=df)
# display plot
plt.show()
## Product rating vs feedback
# set figure size
plt.figure(figsize=(10,6))
# generate barplot
sns.barplot(x='rating',hue='feedback',y='variation',data=df)
# display plot
plt.show()
# --------------
# import packages
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# declare empty list 'corpus'
corpus=[]
# for loop to fill in corpus
for i in range(3150):
# retain alphabets
review = re.findall(r'\w+',df.loc[i,'verified_reviews'])
# convert to lower case
review = [txt.lower() for txt in review]
# tokenize
#review = review.split()
# initialize stemmer object
ps = PorterStemmer()
# perform stemming
stop_words=set(stopwords.words('english'))
review = [ps.stem(i) for i in review if i not in stop_words]
# join elements of list
review = ' '.join(review)
# add to 'corpus'
corpus.append(review)
# display 'corpus'
corpus
# --------------
# import libraries
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# Instantiate count vectorizer
cv = CountVectorizer(max_features=1500)
# Independent variable
X = cv.fit_transform(corpus)
# dependent variable
y = df.feedback
# Counts
count = y.value_counts()
# Split the dataset
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
# --------------
# import packages
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
# Instantiate calssifier
rf = RandomForestClassifier(random_state=2)
# fit model on training data
rf.fit(X_train,y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test,y_pred)
# calculate the precision
precision = precision_score(y_test,y_pred)
# display 'score' and 'precision'
# --------------
# import packages
from imblearn.over_sampling import SMOTE
# Instantiate smote
smote = SMOTE()
# fit_sample onm training data
X_train,y_train = smote.fit_sample(X_train,y_train)
# fit modelk on training data
rf.fit(X_train,y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test,y_pred)
# calculate the precision
precision = precision_score(y_test,y_pred)
# display precision and score
print(score,precision)
|
# --------------
# import packages
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
import warnings
warnings.filterwarnings("ignore")
# Load the dataset
df = pd.read_csv(path,sep='\t')
print(df.shape)
# Converting date attribute from string to datetime.date datatype
print(df['date'].dtype)
df['date'] = df['date'].astype(datetime.date)
# calculate the total length of word
df['length'] = df['verified_reviews'].str.len()
# --------------
## Rating vs feedback
# set figure size
plt.figure(figsize=(10,6))
# generate countplot
sns.countplot(x='rating',hue='feedback',data=df)
# display plot
plt.show()
## Product rating vs feedback
# set figure size
plt.figure(figsize=(10,6))
# generate barplot
sns.barplot(x='rating',hue='feedback',y='variation',data=df)
# display plot
plt.show()
# --------------
# import packages
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# declare empty list 'corpus'
corpus=[]
# for loop to fill in corpus
for i in range(3150):
# retain alphabets
review = re.findall(r'\w+',df.loc[i,'verified_reviews'])
# convert to lower case
review = [txt.lower() for txt in review]
# tokenize
#review = review.split()
# initialize stemmer object
ps = PorterStemmer()
# perform stemming
stop_words=set(stopwords.words('english'))
review = [ps.stem(i) for i in review if i not in stop_words]
# join elements of list
review = ' '.join(review)
# add to 'corpus'
corpus.append(review)
# display 'corpus'
corpus
# --------------
# import libraries
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
# Instantiate count vectorizer
cv = CountVectorizer(max_features=1500)
# Independent variable
X = cv.fit_transform(corpus)
# dependent variable
y = df.feedback
# Counts
count = y.value_counts()
# Split the dataset
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
# --------------
# import packages
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score
# Instantiate calssifier
rf = RandomForestClassifier(random_state=2)
# fit model on training data
rf.fit(X_train,y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test,y_pred)
# calculate the precision
precision = precision_score(y_test,y_pred)
# display 'score' and 'precision'
# --------------
# import packages
from imblearn.over_sampling import SMOTE
# Instantiate smote
smote = SMOTE()
# fit_sample onm training data
X_train,y_train = smote.fit_sample(X_train,y_train)
# fit modelk on training data
rf.fit(X_train,y_train)
# predict on test data
y_pred = rf.predict(X_test)
# calculate the accuracy score
score = accuracy_score(y_test,y_pred)
# calculate the precision
precision = precision_score(y_test,y_pred)
# display precision and score
print(score,precision)
|
en
| 0.484015
|
# -------------- # import packages # Load the dataset # Converting date attribute from string to datetime.date datatype # calculate the total length of word # -------------- ## Rating vs feedback # set figure size # generate countplot # display plot ## Product rating vs feedback # set figure size # generate barplot # display plot # -------------- # import packages # declare empty list 'corpus' # for loop to fill in corpus # retain alphabets # convert to lower case # tokenize #review = review.split() # initialize stemmer object # perform stemming # join elements of list # add to 'corpus' # display 'corpus' # -------------- # import libraries # Instantiate count vectorizer # Independent variable # dependent variable # Counts # Split the dataset # -------------- # import packages # Instantiate calssifier # fit model on training data # predict on test data # calculate the accuracy score # calculate the precision # display 'score' and 'precision' # -------------- # import packages # Instantiate smote # fit_sample onm training data # fit modelk on training data # predict on test data # calculate the accuracy score # calculate the precision # display precision and score
| 3.300413
| 3
|
oil_division/action.py
|
ikngtty/oil-division
| 0
|
6627193
|
<filename>oil_division/action.py
from dataclasses import dataclass
from typing import Callable, List
from .state import Rule, State
Action = Callable[[State], State]
@dataclass(frozen=True)
class OilMoveAction:
source_pot_index: int
dest_pot_index: int
def __post_init__(self) -> None:
assert self.source_pot_index >= 0
assert self.source_pot_index < Rule.POT_COUNT
assert self.dest_pot_index >= 0
assert self.dest_pot_index < Rule.POT_COUNT
def __call__(self, state: State) -> State:
source_pot = state.pots[self.source_pot_index]
dest_pot = state.pots[self.dest_pot_index]
move_oil_volume = min(source_pot.oil_volume, dest_pot.space)
oil_moved_pots = list(state.pots)
oil_moved_pots[self.source_pot_index] = source_pot.add_oil(-1 * move_oil_volume)
oil_moved_pots[self.dest_pot_index] = dest_pot.add_oil(move_oil_volume)
return State(tuple(oil_moved_pots))
def __str__(self) -> str:
return f"{self.source_pot_index}->{self.dest_pot_index}"
def available_actions() -> List[Action]:
actions: List[Action] = []
for source_pot_index in range(Rule.POT_COUNT):
for dest_pot_index in range(Rule.POT_COUNT):
if dest_pot_index == source_pot_index:
continue
actions.append(OilMoveAction(source_pot_index, dest_pot_index))
return actions
|
<filename>oil_division/action.py
from dataclasses import dataclass
from typing import Callable, List
from .state import Rule, State
Action = Callable[[State], State]
@dataclass(frozen=True)
class OilMoveAction:
source_pot_index: int
dest_pot_index: int
def __post_init__(self) -> None:
assert self.source_pot_index >= 0
assert self.source_pot_index < Rule.POT_COUNT
assert self.dest_pot_index >= 0
assert self.dest_pot_index < Rule.POT_COUNT
def __call__(self, state: State) -> State:
source_pot = state.pots[self.source_pot_index]
dest_pot = state.pots[self.dest_pot_index]
move_oil_volume = min(source_pot.oil_volume, dest_pot.space)
oil_moved_pots = list(state.pots)
oil_moved_pots[self.source_pot_index] = source_pot.add_oil(-1 * move_oil_volume)
oil_moved_pots[self.dest_pot_index] = dest_pot.add_oil(move_oil_volume)
return State(tuple(oil_moved_pots))
def __str__(self) -> str:
return f"{self.source_pot_index}->{self.dest_pot_index}"
def available_actions() -> List[Action]:
actions: List[Action] = []
for source_pot_index in range(Rule.POT_COUNT):
for dest_pot_index in range(Rule.POT_COUNT):
if dest_pot_index == source_pot_index:
continue
actions.append(OilMoveAction(source_pot_index, dest_pot_index))
return actions
|
none
| 1
| 2.806766
| 3
|
|
utils/knn_partition.py
|
AntonioCCosta/predictive_ensemble_dynamics
| 6
|
6627194
|
<reponame>AntonioCCosta/predictive_ensemble_dynamics
import numpy as np
import numpy.ma as ma
from sklearn.cluster import MiniBatchKMeans
def kmeans_knn_partition(tseries,n_seeds,batchsize=None,return_centers=False):
if batchsize==None:
batchsize = n_seeds*5
if ma.count_masked(tseries)>0:
labels = ma.zeros(tseries.shape[0],dtype=int)
labels.mask = np.any(tseries.mask,axis=1)
kmeans = MiniBatchKMeans(batch_size=batchsize,n_clusters=n_seeds).fit(ma.compress_rows(tseries))
labels[~np.any(tseries.mask,axis=1)] = kmeans.labels_
else:
kmeans = MiniBatchKMeans(batch_size=batchsize,n_clusters=n_seeds).fit(tseries)
labels=kmeans.labels_
if return_centers:
return labels,kmeans.cluster_centers_
return labels
#trying using single set of centroid seeds acros worms
#how can I fix the boundaries?
from sklearn.utils import check_random_state,check_array
from sklearn.utils.validation import _num_samples
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.extmath import row_norms, squared_norm, stable_cumsum
import scipy.sparse as sp
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
from sklearn.cluster.k_means_ import _k_means,_k_init,_labels_inertia,_check_sample_weight
def _init_centroids(X, k, init = 'k-means++', random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
centers = _k_init(X, k, random_state=random_state,x_squared_norms=x_squared_norms)
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
from sklearn.utils._joblib import Parallel
from sklearn.utils._joblib import delayed
def get_centroid_parallel(X, k, init = 'k-means++', random_state=None, x_squared_norms=None,
init_size=None, n_init = 50, n_jobs=-1):
random_state = check_random_state(random_state)
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_init_centroids)(X, k, init, random_state,
x_squared_norms,init_size)
for seed in seeds)
return results
def kmeans_single(X, n_clusters, centers, sample_weight=None, max_iter=300,
init='k-means++', verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, sample_weight, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, sample_weight, labels,
n_clusters, distances)
else:
centers = _k_means._centers_dense(X, sample_weight, labels,
n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, sample_weight, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels
def obtain_labels(X_train,X,n_clusters,n_init):
centers_seeds = get_centroid_parallel(X,n_clusters,n_init)
labels_seeds = Parallel(n_jobs=-1, verbose=0)(delayed(kmeans_single)(X, n_clusters, centers)
for centers in centers_seeds)
|
import numpy as np
import numpy.ma as ma
from sklearn.cluster import MiniBatchKMeans
def kmeans_knn_partition(tseries,n_seeds,batchsize=None,return_centers=False):
if batchsize==None:
batchsize = n_seeds*5
if ma.count_masked(tseries)>0:
labels = ma.zeros(tseries.shape[0],dtype=int)
labels.mask = np.any(tseries.mask,axis=1)
kmeans = MiniBatchKMeans(batch_size=batchsize,n_clusters=n_seeds).fit(ma.compress_rows(tseries))
labels[~np.any(tseries.mask,axis=1)] = kmeans.labels_
else:
kmeans = MiniBatchKMeans(batch_size=batchsize,n_clusters=n_seeds).fit(tseries)
labels=kmeans.labels_
if return_centers:
return labels,kmeans.cluster_centers_
return labels
#trying using single set of centroid seeds acros worms
#how can I fix the boundaries?
from sklearn.utils import check_random_state,check_array
from sklearn.utils.validation import _num_samples
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.extmath import row_norms, squared_norm, stable_cumsum
import scipy.sparse as sp
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
from sklearn.cluster.k_means_ import _k_means,_k_init,_labels_inertia,_check_sample_weight
def _init_centroids(X, k, init = 'k-means++', random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
centers = _k_init(X, k, random_state=random_state,x_squared_norms=x_squared_norms)
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
from sklearn.utils._joblib import Parallel
from sklearn.utils._joblib import delayed
def get_centroid_parallel(X, k, init = 'k-means++', random_state=None, x_squared_norms=None,
init_size=None, n_init = 50, n_jobs=-1):
random_state = check_random_state(random_state)
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_init_centroids)(X, k, init, random_state,
x_squared_norms,init_size)
for seed in seeds)
return results
def kmeans_single(X, n_clusters, centers, sample_weight=None, max_iter=300,
init='k-means++', verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : int, RandomState instance or None (default)
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(X, sample_weight)
best_labels, best_inertia, best_centers = None, None, None
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, sample_weight, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, sample_weight, labels,
n_clusters, distances)
else:
centers = _k_means._centers_dense(X, sample_weight, labels,
n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, sample_weight, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels
def obtain_labels(X_train,X,n_clusters,n_init):
centers_seeds = get_centroid_parallel(X,n_clusters,n_init)
labels_seeds = Parallel(n_jobs=-1, verbose=0)(delayed(kmeans_single)(X, n_clusters, centers)
for centers in centers_seeds)
|
en
| 0.764332
|
#trying using single set of centroid seeds acros worms #how can I fix the boundaries? Return a tolerance which is independent of the dataset Check if centers is compatible with X and n_centers Compute the initial centroids Parameters ---------- X : array, shape (n_samples, n_features) k : int number of centroids init : {'k-means++', 'random' or ndarray or callable} optional Method for initialization random_state : int, RandomState instance or None (default) Determines random number generation for centroid initialization. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. x_squared_norms : array, shape (n_samples,), optional Squared euclidean norm of each data point. Pass it if you have it at hands already to avoid it being recomputed here. Default: None init_size : int, optional Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than k. Returns ------- centers : array, shape(k, n_features) A single run of k-means Parameters ---------- X : array-like of floats, shape (n_samples, n_features) The observations to cluster. n_clusters : int The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. init : {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (k, p) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. tol : float, optional The relative increment in the results before declaring convergence. verbose : boolean, optional Verbosity mode x_squared_norms : array Precomputed x_squared_norms. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). random_state : int, RandomState instance or None (default) Determines random number generation for centroid initialization. Use an int to make the randomness deterministic. See :term:`Glossary <random_state>`. Returns ------- centroid : float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label : integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties # iterations # labels assignment is also called the E-step of EM # computation of the means is also called the M-step of EM # rerun E-step in case of non-convergence so that predicted labels # match cluster centers
| 2.466139
| 2
|
qdef2d/slabutils.py
|
aztan2/charged-defects-framework
| 4
|
6627195
|
import os
import numpy as np
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Structure
from pymatgen.core.operations import SymmOp
## A lot of the functions in here have been copied and only
## slightly modified from functions in the MPInterfaces package.
## It should be possible to merge and replace those functions
## and import from MPInterfaces instead of keeping separate versions.
## I'll leave that as a task for someone else though...
def get_rotation_matrix(axis, theta):
"""
Copied from MPInterfaces with some slight modification.
Find the rotation matrix associated with counterclockwise rotation
about the given axis by theta radians, using Euler–Rodrigues formula.
Credit: http://stackoverflow.com/users/190597/unutbu
Parameters
----------
axis (list): rotation axis of the form [x, y, z]
theta (float): rotational angle in radians
Returns
-------
(array) Rotation matrix.
"""
axis = np.array(list(axis))
axis = axis / np.linalg.norm(axis)
axis *= -np.sin(theta/2.0)
a = np.cos(theta/2.0)
b, c, d = tuple(axis.tolist())
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def align_axis(structure, axis='c', direction=(0, 0, 1)):
"""
Copied from MPInterfaces with some modification.
Rotates a structure so that the specified axis is along
the [001] direction. This is useful for adding vacuum, and
in general for using vasp compiled with no z-axis relaxation.
Parameters
----------
structure (Structure): Pymatgen Structure object to rotate.
axis: Axis to be rotated. Can be 'a', 'b', 'c', or a 1x3 vector.
direction (vector): Final axis to be rotated to.
Returns
-------
(Structure) Structure object rotated to align axis along direction.
"""
## rotate the specified axis to be along the 001 direction
if axis == 'a':
axis = structure.lattice._matrix[0]
elif axis == 'b':
axis = structure.lattice._matrix[1]
elif axis == 'c':
axis = structure.lattice._matrix[2]
rot_axis = np.cross(axis, direction)
if not(rot_axis[0] == 0 and rot_axis[1] == 0):
theta = (np.arccos(np.dot(axis, direction) /
(np.linalg.norm(axis) * np.linalg.norm(direction))))
R = get_rotation_matrix(rot_axis, theta)
rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)
structure.apply_operation(rotation,fractional=False)
## rotate such that the 001 direction lies along the 'c' axis
axis = structure.lattice._matrix[2]
rot_axis = np.cross(direction, axis)
if not(rot_axis[0] == 0 and rot_axis[1] == 0):
theta = (np.arccos(np.dot(axis, direction) /
(np.linalg.norm(axis) * np.linalg.norm(direction))))
R = get_rotation_matrix(rot_axis, theta)
rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)
structure.apply_operation(rotation,fractional=True)
# structure.lattice._matrix[2][2] = abs(structure.lattice._matrix[2][2])
return structure
def center_slab(structure):
"""
Copied from MPInterfaces with some modification.
Centers the atoms in a slab structure around 0.5 fractional height.
Parameters
----------
structure (Structure): Structure to center
Returns
-------
(Structure) Centered Structure object.
"""
## attempt to catch literal edge cases
## in which the layers are centered around 0.0, e.g. at ~0.1 and ~0.9
slab_center = np.average([s.frac_coords[2] for s in structure.sites])
if any([abs(s.frac_coords[2]-slab_center) > 0.25 for s in structure.sites]):
structure.translate_sites(range(structure.num_sites), (0, 0, 0.5))
## repeat this process to make sure it is properly centered
## sometimes the slab center is wrongly identified the first time because of the PBC
## after shifting it once, it *should* be away from such edge cases
slab_center = np.average([s. frac_coords[2] for s in structure.sites])
structure.translate_sites(range(structure.num_sites), (0, 0, 0.5 - slab_center))
return structure
def get_slab_thickness(structure):
"""
Returns the interlayer spacing for a 2D material or slab.
Parameters
----------
structure (Structure): Structure to check spacing for.
cut (float): a fractional z-coordinate that must be within the vacuum region.
Returns
-------
(float) Spacing in Angstroms.
"""
structure = align_axis(structure)
structure = center_slab(structure)
max_height = max([s.coords[2] for s in structure.sites])
min_height = min([s.coords[2] for s in structure.sites])
return (max_height - min_height)
def add_vacuum(structure, vacuum):
"""
Copied from MPInterfaces with some slight modification.
Adds padding to a slab or 2D material.
Parameters
----------
structure (Structure): Structure to add vacuum to
vacuum (float): Vacuum thickness to add in Angstroms
Returns
-------
(Structure) Structure object with vacuum added.
"""
structure = align_axis(structure)
coords = [s.coords for s in structure.sites]
species = [s.specie for s in structure.sites]
lattice = structure.lattice.matrix.copy()
lattice[2][2] += vacuum
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return center_slab(structure)
def layer_from_bulk(struct_bulk,slabmin,slabmax):
"""
Extracts a layer from a layered bulk material.
Parameters
----------
struct_bulk (Structure): Pymatgen Structure object of the layered bulk
slabmin (float): fractional coord of the bottom of the layer to isolate
slabmax (float): fractional coord of the top of the layer to isolate
Returns
-------
(Structure) Structure object of the single layer.
"""
struct_layer = struct_bulk.copy()
not_in_layer = [i for i,site in enumerate(struct_layer.sites) \
if site.c < slabmin or site.c > slabmax]
struct_layer.remove_sites(not_in_layer)
return struct_layer
def gen_unitcell_2d(path_poscar,vacuum,zaxis='c',from_bulk=False,slabmin=None,slabmax=None):
"""
Generate 2D unitcell.
Parameters
----------
path_poscar (str): path to unitcell POSCAR
vacuum (int): vacuum spacing in Angstroms
[optional] zaxis (str): axis perpendicular to layer: a/b/c(default)
[optional] from_bulk (bool): extract layer from bulk? Default=False.
[optional] slabmin (float): fractional coord of the bottom of the layer to isolate
[optional] slabmax (float): fractional coord of the top of the layer to isolate
"""
## get current working directory
## subdirectories for different vacuum spacings will be created here
dir_main = os.getcwd()
poscar = Poscar.from_file(path_poscar, check_for_POTCAR=False)
struct = align_axis(poscar.structure,axis=zaxis)
if from_bulk:
if slabmin == None or slabmax == None:
raise ValueError('missing slabmin and/or slabmax argument')
else:
if slabmin > slabmax:
raise ValueError('incorrect slabmin and/or slabmax argument')
else:
struct = layer_from_bulk(struct,slabmin,slabmax)
slab_d = get_slab_thickness(struct)
struct = add_vacuum(struct, vacuum - (struct.lattice.c - slab_d))
struct = center_slab(struct)
dir_sub = os.path.join(dir_main,"vac_%d"%vacuum)
if not os.path.exists(dir_sub):
os.makedirs(dir_sub)
Poscar.write_file(Poscar(struct),os.path.join(dir_sub,"POSCAR"))
|
import os
import numpy as np
from pymatgen.io.vasp.inputs import Poscar
from pymatgen import Structure
from pymatgen.core.operations import SymmOp
## A lot of the functions in here have been copied and only
## slightly modified from functions in the MPInterfaces package.
## It should be possible to merge and replace those functions
## and import from MPInterfaces instead of keeping separate versions.
## I'll leave that as a task for someone else though...
def get_rotation_matrix(axis, theta):
"""
Copied from MPInterfaces with some slight modification.
Find the rotation matrix associated with counterclockwise rotation
about the given axis by theta radians, using Euler–Rodrigues formula.
Credit: http://stackoverflow.com/users/190597/unutbu
Parameters
----------
axis (list): rotation axis of the form [x, y, z]
theta (float): rotational angle in radians
Returns
-------
(array) Rotation matrix.
"""
axis = np.array(list(axis))
axis = axis / np.linalg.norm(axis)
axis *= -np.sin(theta/2.0)
a = np.cos(theta/2.0)
b, c, d = tuple(axis.tolist())
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def align_axis(structure, axis='c', direction=(0, 0, 1)):
"""
Copied from MPInterfaces with some modification.
Rotates a structure so that the specified axis is along
the [001] direction. This is useful for adding vacuum, and
in general for using vasp compiled with no z-axis relaxation.
Parameters
----------
structure (Structure): Pymatgen Structure object to rotate.
axis: Axis to be rotated. Can be 'a', 'b', 'c', or a 1x3 vector.
direction (vector): Final axis to be rotated to.
Returns
-------
(Structure) Structure object rotated to align axis along direction.
"""
## rotate the specified axis to be along the 001 direction
if axis == 'a':
axis = structure.lattice._matrix[0]
elif axis == 'b':
axis = structure.lattice._matrix[1]
elif axis == 'c':
axis = structure.lattice._matrix[2]
rot_axis = np.cross(axis, direction)
if not(rot_axis[0] == 0 and rot_axis[1] == 0):
theta = (np.arccos(np.dot(axis, direction) /
(np.linalg.norm(axis) * np.linalg.norm(direction))))
R = get_rotation_matrix(rot_axis, theta)
rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)
structure.apply_operation(rotation,fractional=False)
## rotate such that the 001 direction lies along the 'c' axis
axis = structure.lattice._matrix[2]
rot_axis = np.cross(direction, axis)
if not(rot_axis[0] == 0 and rot_axis[1] == 0):
theta = (np.arccos(np.dot(axis, direction) /
(np.linalg.norm(axis) * np.linalg.norm(direction))))
R = get_rotation_matrix(rot_axis, theta)
rotation = SymmOp.from_rotation_and_translation(rotation_matrix=R)
structure.apply_operation(rotation,fractional=True)
# structure.lattice._matrix[2][2] = abs(structure.lattice._matrix[2][2])
return structure
def center_slab(structure):
"""
Copied from MPInterfaces with some modification.
Centers the atoms in a slab structure around 0.5 fractional height.
Parameters
----------
structure (Structure): Structure to center
Returns
-------
(Structure) Centered Structure object.
"""
## attempt to catch literal edge cases
## in which the layers are centered around 0.0, e.g. at ~0.1 and ~0.9
slab_center = np.average([s.frac_coords[2] for s in structure.sites])
if any([abs(s.frac_coords[2]-slab_center) > 0.25 for s in structure.sites]):
structure.translate_sites(range(structure.num_sites), (0, 0, 0.5))
## repeat this process to make sure it is properly centered
## sometimes the slab center is wrongly identified the first time because of the PBC
## after shifting it once, it *should* be away from such edge cases
slab_center = np.average([s. frac_coords[2] for s in structure.sites])
structure.translate_sites(range(structure.num_sites), (0, 0, 0.5 - slab_center))
return structure
def get_slab_thickness(structure):
"""
Returns the interlayer spacing for a 2D material or slab.
Parameters
----------
structure (Structure): Structure to check spacing for.
cut (float): a fractional z-coordinate that must be within the vacuum region.
Returns
-------
(float) Spacing in Angstroms.
"""
structure = align_axis(structure)
structure = center_slab(structure)
max_height = max([s.coords[2] for s in structure.sites])
min_height = min([s.coords[2] for s in structure.sites])
return (max_height - min_height)
def add_vacuum(structure, vacuum):
"""
Copied from MPInterfaces with some slight modification.
Adds padding to a slab or 2D material.
Parameters
----------
structure (Structure): Structure to add vacuum to
vacuum (float): Vacuum thickness to add in Angstroms
Returns
-------
(Structure) Structure object with vacuum added.
"""
structure = align_axis(structure)
coords = [s.coords for s in structure.sites]
species = [s.specie for s in structure.sites]
lattice = structure.lattice.matrix.copy()
lattice[2][2] += vacuum
structure = Structure(lattice, species, coords, coords_are_cartesian=True)
return center_slab(structure)
def layer_from_bulk(struct_bulk,slabmin,slabmax):
"""
Extracts a layer from a layered bulk material.
Parameters
----------
struct_bulk (Structure): Pymatgen Structure object of the layered bulk
slabmin (float): fractional coord of the bottom of the layer to isolate
slabmax (float): fractional coord of the top of the layer to isolate
Returns
-------
(Structure) Structure object of the single layer.
"""
struct_layer = struct_bulk.copy()
not_in_layer = [i for i,site in enumerate(struct_layer.sites) \
if site.c < slabmin or site.c > slabmax]
struct_layer.remove_sites(not_in_layer)
return struct_layer
def gen_unitcell_2d(path_poscar,vacuum,zaxis='c',from_bulk=False,slabmin=None,slabmax=None):
"""
Generate 2D unitcell.
Parameters
----------
path_poscar (str): path to unitcell POSCAR
vacuum (int): vacuum spacing in Angstroms
[optional] zaxis (str): axis perpendicular to layer: a/b/c(default)
[optional] from_bulk (bool): extract layer from bulk? Default=False.
[optional] slabmin (float): fractional coord of the bottom of the layer to isolate
[optional] slabmax (float): fractional coord of the top of the layer to isolate
"""
## get current working directory
## subdirectories for different vacuum spacings will be created here
dir_main = os.getcwd()
poscar = Poscar.from_file(path_poscar, check_for_POTCAR=False)
struct = align_axis(poscar.structure,axis=zaxis)
if from_bulk:
if slabmin == None or slabmax == None:
raise ValueError('missing slabmin and/or slabmax argument')
else:
if slabmin > slabmax:
raise ValueError('incorrect slabmin and/or slabmax argument')
else:
struct = layer_from_bulk(struct,slabmin,slabmax)
slab_d = get_slab_thickness(struct)
struct = add_vacuum(struct, vacuum - (struct.lattice.c - slab_d))
struct = center_slab(struct)
dir_sub = os.path.join(dir_main,"vac_%d"%vacuum)
if not os.path.exists(dir_sub):
os.makedirs(dir_sub)
Poscar.write_file(Poscar(struct),os.path.join(dir_sub,"POSCAR"))
|
en
| 0.7242
|
## A lot of the functions in here have been copied and only ## slightly modified from functions in the MPInterfaces package. ## It should be possible to merge and replace those functions ## and import from MPInterfaces instead of keeping separate versions. ## I'll leave that as a task for someone else though... Copied from MPInterfaces with some slight modification. Find the rotation matrix associated with counterclockwise rotation about the given axis by theta radians, using Euler–Rodrigues formula. Credit: http://stackoverflow.com/users/190597/unutbu Parameters ---------- axis (list): rotation axis of the form [x, y, z] theta (float): rotational angle in radians Returns ------- (array) Rotation matrix. Copied from MPInterfaces with some modification. Rotates a structure so that the specified axis is along the [001] direction. This is useful for adding vacuum, and in general for using vasp compiled with no z-axis relaxation. Parameters ---------- structure (Structure): Pymatgen Structure object to rotate. axis: Axis to be rotated. Can be 'a', 'b', 'c', or a 1x3 vector. direction (vector): Final axis to be rotated to. Returns ------- (Structure) Structure object rotated to align axis along direction. ## rotate the specified axis to be along the 001 direction ## rotate such that the 001 direction lies along the 'c' axis # structure.lattice._matrix[2][2] = abs(structure.lattice._matrix[2][2]) Copied from MPInterfaces with some modification. Centers the atoms in a slab structure around 0.5 fractional height. Parameters ---------- structure (Structure): Structure to center Returns ------- (Structure) Centered Structure object. ## attempt to catch literal edge cases ## in which the layers are centered around 0.0, e.g. at ~0.1 and ~0.9 ## repeat this process to make sure it is properly centered ## sometimes the slab center is wrongly identified the first time because of the PBC ## after shifting it once, it *should* be away from such edge cases Returns the interlayer spacing for a 2D material or slab. Parameters ---------- structure (Structure): Structure to check spacing for. cut (float): a fractional z-coordinate that must be within the vacuum region. Returns ------- (float) Spacing in Angstroms. Copied from MPInterfaces with some slight modification. Adds padding to a slab or 2D material. Parameters ---------- structure (Structure): Structure to add vacuum to vacuum (float): Vacuum thickness to add in Angstroms Returns ------- (Structure) Structure object with vacuum added. Extracts a layer from a layered bulk material. Parameters ---------- struct_bulk (Structure): Pymatgen Structure object of the layered bulk slabmin (float): fractional coord of the bottom of the layer to isolate slabmax (float): fractional coord of the top of the layer to isolate Returns ------- (Structure) Structure object of the single layer. Generate 2D unitcell. Parameters ---------- path_poscar (str): path to unitcell POSCAR vacuum (int): vacuum spacing in Angstroms [optional] zaxis (str): axis perpendicular to layer: a/b/c(default) [optional] from_bulk (bool): extract layer from bulk? Default=False. [optional] slabmin (float): fractional coord of the bottom of the layer to isolate [optional] slabmax (float): fractional coord of the top of the layer to isolate ## get current working directory ## subdirectories for different vacuum spacings will be created here
| 2.783407
| 3
|
Tests/Plot/LamHole/test_Hole_51_plot.py
|
helene-t/pyleecan
| 2
|
6627196
|
# -*- coding: utf-8 -*-
from os.path import join
import pytest
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.MachineIPMSM import MachineIPMSM
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.HoleM51 import HoleM51
from Tests import save_plot_path as save_path
"""unittest for Lamination with Hole 51 plot"""
@pytest.fixture
def machine():
"""Run at the begining of every test to setup the machine"""
plt.close("all")
test_obj = MachineIPMSM()
test_obj.rotor = LamHole(
Rint=45e-3 / 2, Rext=81.5e-3, is_stator=False, is_internal=True, L1=0.9
)
test_obj.rotor.hole = list()
test_obj.rotor.hole.append(
HoleM51(
Zh=8,
W0=0.016,
W1=pi / 6,
W2=0.004,
W3=0.01,
W4=0.002,
W5=0.01,
W6=0.002,
W7=0.01,
H0=0.01096,
H1=0.0015,
H2=0.0055,
)
)
test_obj.shaft = Shaft(Drsh=test_obj.rotor.Rint * 2, Lshaft=1.2)
test_obj.stator = LamSlotWind(
Rint=0.09, Rext=0.12, is_internal=False, is_stator=True, L1=0.9, slot=None
)
test_obj.frame = Frame(Rint=0.12, Rext=0.12, Lfra=0.7)
return test_obj
def test_Lam_Hole_51_012(machine):
"""Test machine plot hole 51 with all magnets
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = Magnet()
machine.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_1-Machine_012.png"))
# Rotor + 2 for stator + 0 for frame + 1 for shaft
assert len(fig.axes[0].patches) == 61
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_1-Rotor_012.png"))
# 2 for lam + 7*8 for holes
assert len(fig.axes[0].patches) == 58
def test_Lam_Hole_51_N12(machine):
"""Test machine plot hole 51 with no magnet_0
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_2-Rotor_N12.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_0N2(machine):
"""Test machine plot hole 51 with no magnet_1
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_3-Rotor_0N2.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_NN2(machine):
"""Test machine plot hole 51 with no magnet_0 and no magnet_1
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_4-Rotor_NN2.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_01N(machine):
"""Test machine plot hole 51 with no magnet_2
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_5-Rotor_01N.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_N1N(machine):
"""Test machine plot hole 51 with no magnet_0 and no magnet_2
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_6-Rotor_N1N.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_0NN(machine):
"""Test machine plot hole 51 with no magnet_1 and no magnet_2
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_7-Rotor_0NN.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_NNN(machine):
"""Test machine plot hole 51 with no magnet
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_8-Rotor_NNN.png"))
# 2 for lam + 1*8 for holes
assert len(fig.axes[0].patches) == 10
|
# -*- coding: utf-8 -*-
from os.path import join
import pytest
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamHole import LamHole
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.MachineIPMSM import MachineIPMSM
from pyleecan.Classes.Magnet import Magnet
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.HoleM51 import HoleM51
from Tests import save_plot_path as save_path
"""unittest for Lamination with Hole 51 plot"""
@pytest.fixture
def machine():
"""Run at the begining of every test to setup the machine"""
plt.close("all")
test_obj = MachineIPMSM()
test_obj.rotor = LamHole(
Rint=45e-3 / 2, Rext=81.5e-3, is_stator=False, is_internal=True, L1=0.9
)
test_obj.rotor.hole = list()
test_obj.rotor.hole.append(
HoleM51(
Zh=8,
W0=0.016,
W1=pi / 6,
W2=0.004,
W3=0.01,
W4=0.002,
W5=0.01,
W6=0.002,
W7=0.01,
H0=0.01096,
H1=0.0015,
H2=0.0055,
)
)
test_obj.shaft = Shaft(Drsh=test_obj.rotor.Rint * 2, Lshaft=1.2)
test_obj.stator = LamSlotWind(
Rint=0.09, Rext=0.12, is_internal=False, is_stator=True, L1=0.9, slot=None
)
test_obj.frame = Frame(Rint=0.12, Rext=0.12, Lfra=0.7)
return test_obj
def test_Lam_Hole_51_012(machine):
"""Test machine plot hole 51 with all magnets
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = Magnet()
machine.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_1-Machine_012.png"))
# Rotor + 2 for stator + 0 for frame + 1 for shaft
assert len(fig.axes[0].patches) == 61
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_1-Rotor_012.png"))
# 2 for lam + 7*8 for holes
assert len(fig.axes[0].patches) == 58
def test_Lam_Hole_51_N12(machine):
"""Test machine plot hole 51 with no magnet_0
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_2-Rotor_N12.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_0N2(machine):
"""Test machine plot hole 51 with no magnet_1
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_3-Rotor_0N2.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_NN2(machine):
"""Test machine plot hole 51 with no magnet_0 and no magnet_1
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = Magnet()
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_4-Rotor_NN2.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_01N(machine):
"""Test machine plot hole 51 with no magnet_2
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_5-Rotor_01N.png"))
# 2 for lam + 5*8 for holes
assert len(fig.axes[0].patches) == 42
def test_Lam_Hole_51_N1N(machine):
"""Test machine plot hole 51 with no magnet_0 and no magnet_2
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = Magnet()
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_6-Rotor_N1N.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_0NN(machine):
"""Test machine plot hole 51 with no magnet_1 and no magnet_2
"""
machine.rotor.hole[0].magnet_0 = Magnet()
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_7-Rotor_0NN.png"))
# 2 for lam + 3*8 for holes
assert len(fig.axes[0].patches) == 26
def test_Lam_Hole_51_NNN(machine):
"""Test machine plot hole 51 with no magnet
"""
machine.rotor.hole[0].magnet_0 = None
machine.rotor.hole[0].magnet_1 = None
machine.rotor.hole[0].magnet_2 = None
machine.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Hole_s51_8-Rotor_NNN.png"))
# 2 for lam + 1*8 for holes
assert len(fig.axes[0].patches) == 10
|
en
| 0.774431
|
# -*- coding: utf-8 -*- unittest for Lamination with Hole 51 plot Run at the begining of every test to setup the machine Test machine plot hole 51 with all magnets # Rotor + 2 for stator + 0 for frame + 1 for shaft # 2 for lam + 7*8 for holes Test machine plot hole 51 with no magnet_0 # 2 for lam + 5*8 for holes Test machine plot hole 51 with no magnet_1 # 2 for lam + 5*8 for holes Test machine plot hole 51 with no magnet_0 and no magnet_1 # 2 for lam + 3*8 for holes Test machine plot hole 51 with no magnet_2 # 2 for lam + 5*8 for holes Test machine plot hole 51 with no magnet_0 and no magnet_2 # 2 for lam + 3*8 for holes Test machine plot hole 51 with no magnet_1 and no magnet_2 # 2 for lam + 3*8 for holes Test machine plot hole 51 with no magnet # 2 for lam + 1*8 for holes
| 2.14671
| 2
|
bindings/python/test.py
|
arizvisa/capstone
| 0
|
6627197
|
<filename>bindings/python/test.py
#!/usr/bin/env python
# Capstone Python bindings, by <NAME> <<EMAIL>>
from __future__ import print_function
from capstone import *
import binascii
import sys
from xprint import to_hex, to_x, to_x_32
_python3 = sys.version_info.major == 3
X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00"
ARM_CODE = b"\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3"
ARM_CODE2 = b"\x10\xf1\x10\xe7\x11\xf2\x31\xe7\xdc\xa1\x2e\xf3\xe8\x4e\x62\xf3"
THUMB_CODE = b"\x70\x47\xeb\x46\x83\xb0\xc9\x68"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88\xd1\xe8\x00\xf0"
MIPS_CODE = b"\x0C\x10\x00\x97\x00\x00\x00\x00\x24\x02\x00\x0c\x8f\xa2\x00\x00\x34\x21\x34\x56"
MIPS_CODE2 = b"\x56\x34\x21\x34\xc2\x17\x01\x00"
ARM64_CODE = b"\x21\x7c\x02\x9b\x21\x7c\x00\x53\x00\x40\x21\x4b\xe1\x0b\x40\xb9"
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
SPARC_CODE = b"\x80\xa0\x40\x02\x85\xc2\x60\x08\x85\xe8\x20\x01\x81\xe8\x00\x00\x90\x10\x20\x01\xd5\xf6\x10\x16\x21\x00\x00\x0a\x86\x00\x40\x02\x01\x00\x00\x00\x12\xbf\xff\xff\x10\xbf\xff\xff\xa0\x02\x00\x09\x0d\xbf\xff\xff\xd4\x20\x60\x00\xd4\x4e\x00\x16\x2a\xc2\x80\x03"
SPARCV9_CODE = b"\x81\xa8\x0a\x24\x89\xa0\x10\x20\x89\xa0\x1a\x60\x89\xa0\x00\xe0"
SYSZ_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32bit (ATT syntax)", CS_OPT_SYNTAX_ATT),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_64, X86_CODE64, "X86 64 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "THUMB-2", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE2, "ARM: Cortex-A15 + NEON", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "THUMB", 0),
(CS_ARCH_MIPS, CS_MODE_32 + CS_MODE_BIG_ENDIAN, MIPS_CODE, "MIPS-32 (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_64 + CS_MODE_LITTLE_ENDIAN, MIPS_CODE2, "MIPS-64-EL (Little-endian)", 0),
(CS_ARCH_ARM64, CS_MODE_ARM, ARM64_CODE, "ARM-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64, print register with number only", CS_OPT_SYNTAX_NOREGNAME),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN, SPARC_CODE, "Sparc", 0),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN + CS_MODE_V9, SPARCV9_CODE, "SparcV9", 0),
(CS_ARCH_SYSZ, 0, SYSZ_CODE, "SystemZ", 0),
(CS_ARCH_XCORE, 0, XCORE_CODE, "XCore", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print()
# ## Test class Cs
def test_class():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
for insn in md.disasm(code, 0x1000):
# bytes = binascii.hexlify(insn.bytes)
# print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print()
except CsError as e:
print("ERROR: %s" % e)
# test_cs_disasm_quick()
# print ("*" * 40)
if __name__ == '__main__':
test_class()
|
<filename>bindings/python/test.py
#!/usr/bin/env python
# Capstone Python bindings, by <NAME> <<EMAIL>>
from __future__ import print_function
from capstone import *
import binascii
import sys
from xprint import to_hex, to_x, to_x_32
_python3 = sys.version_info.major == 3
X86_CODE16 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE32 = b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00"
X86_CODE64 = b"\x55\x48\x8b\x05\xb8\x13\x00\x00"
ARM_CODE = b"\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3"
ARM_CODE2 = b"\x10\xf1\x10\xe7\x11\xf2\x31\xe7\xdc\xa1\x2e\xf3\xe8\x4e\x62\xf3"
THUMB_CODE = b"\x70\x47\xeb\x46\x83\xb0\xc9\x68"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88\xd1\xe8\x00\xf0"
MIPS_CODE = b"\x0C\x10\x00\x97\x00\x00\x00\x00\x24\x02\x00\x0c\x8f\xa2\x00\x00\x34\x21\x34\x56"
MIPS_CODE2 = b"\x56\x34\x21\x34\xc2\x17\x01\x00"
ARM64_CODE = b"\x21\x7c\x02\x9b\x21\x7c\x00\x53\x00\x40\x21\x4b\xe1\x0b\x40\xb9"
PPC_CODE = b"\x80\x20\x00\x00\x80\x3f\x00\x00\x10\x43\x23\x0e\xd0\x44\x00\x80\x4c\x43\x22\x02\x2d\x03\x00\x80\x7c\x43\x20\x14\x7c\x43\x20\x93\x4f\x20\x00\x21\x4c\xc8\x00\x21"
SPARC_CODE = b"\x80\xa0\x40\x02\x85\xc2\x60\x08\x85\xe8\x20\x01\x81\xe8\x00\x00\x90\x10\x20\x01\xd5\xf6\x10\x16\x21\x00\x00\x0a\x86\x00\x40\x02\x01\x00\x00\x00\x12\xbf\xff\xff\x10\xbf\xff\xff\xa0\x02\x00\x09\x0d\xbf\xff\xff\xd4\x20\x60\x00\xd4\x4e\x00\x16\x2a\xc2\x80\x03"
SPARCV9_CODE = b"\x81\xa8\x0a\x24\x89\xa0\x10\x20\x89\xa0\x1a\x60\x89\xa0\x00\xe0"
SYSZ_CODE = b"\xed\x00\x00\x00\x00\x1a\x5a\x0f\x1f\xff\xc2\x09\x80\x00\x00\x00\x07\xf7\xeb\x2a\xff\xff\x7f\x57\xe3\x01\xff\xff\x7f\x57\xeb\x00\xf0\x00\x00\x24\xb2\x4f\x00\x78"
XCORE_CODE = b"\xfe\x0f\xfe\x17\x13\x17\xc6\xfe\xec\x17\x97\xf8\xec\x4f\x1f\xfd\xec\x37\x07\xf2\x45\x5b\xf9\xfa\x02\x06\x1b\x10"
all_tests = (
(CS_ARCH_X86, CS_MODE_16, X86_CODE16, "X86 16bit (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32bit (ATT syntax)", CS_OPT_SYNTAX_ATT),
(CS_ARCH_X86, CS_MODE_32, X86_CODE32, "X86 32 (Intel syntax)", 0),
(CS_ARCH_X86, CS_MODE_64, X86_CODE64, "X86 64 (Intel syntax)", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "THUMB-2", 0),
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE2, "ARM: Cortex-A15 + NEON", 0),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "THUMB", 0),
(CS_ARCH_MIPS, CS_MODE_32 + CS_MODE_BIG_ENDIAN, MIPS_CODE, "MIPS-32 (Big-endian)", 0),
(CS_ARCH_MIPS, CS_MODE_64 + CS_MODE_LITTLE_ENDIAN, MIPS_CODE2, "MIPS-64-EL (Little-endian)", 0),
(CS_ARCH_ARM64, CS_MODE_ARM, ARM64_CODE, "ARM-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64", 0),
(CS_ARCH_PPC, CS_MODE_BIG_ENDIAN, PPC_CODE, "PPC-64, print register with number only", CS_OPT_SYNTAX_NOREGNAME),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN, SPARC_CODE, "Sparc", 0),
(CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN + CS_MODE_V9, SPARCV9_CODE, "SparcV9", 0),
(CS_ARCH_SYSZ, 0, SYSZ_CODE, "SystemZ", 0),
(CS_ARCH_XCORE, 0, XCORE_CODE, "XCore", 0),
)
# ## Test cs_disasm_quick()
def test_cs_disasm_quick():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 40)
print("Platform: %s" % comment)
print("Disasm:"),
print(to_hex(code))
for insn in cs_disasm_quick(arch, mode, code, 0x1000):
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print()
# ## Test class Cs
def test_class():
for arch, mode, code, comment, syntax in all_tests:
print('*' * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax != 0:
md.syntax = syntax
for insn in md.disasm(code, 0x1000):
# bytes = binascii.hexlify(insn.bytes)
# print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes))
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
print("0x%x:" % (insn.address + insn.size))
print()
except CsError as e:
print("ERROR: %s" % e)
# test_cs_disasm_quick()
# print ("*" * 40)
if __name__ == '__main__':
test_class()
|
en
| 0.348707
|
#!/usr/bin/env python # Capstone Python bindings, by <NAME> <<EMAIL>> # ## Test cs_disasm_quick() # ## Test class Cs # bytes = binascii.hexlify(insn.bytes) # print("0x%x:\t%s\t%s\t// hex-code: %s" %(insn.address, insn.mnemonic, insn.op_str, bytes)) # test_cs_disasm_quick() # print ("*" * 40)
| 1.951454
| 2
|
image_vision/plugins/image_viewer/tools/image_viewer_tool.py
|
IvanKosik/ImageVision
| 0
|
6627198
|
<reponame>IvanKosik/ImageVision
from core import settings
from core.image import FlatImage
from PyQt5.QtCore import QObject, pyqtSignal
import numpy as np
class ImageViewerTool(QObject):
before_activation = pyqtSignal(object)
activated = pyqtSignal(object)
deactivated = pyqtSignal(object)
def __init__(self, viewer, parent=None):
super().__init__(parent)
# self.type = tool_type
self.viewer = viewer
self.tool_mask = None
self.tool_mask_layer = None
self.mask_class = None
@classmethod
def name(cls):
return cls.__name__
def activate(self):
self.before_activation.emit(self)
print('activate tool', self.name())
self._activation()
self.activated.emit(self)
def _activation(self):
self.tool_mask_layer = self.viewer.add_layer('Tool Mask')
self.recreate_tool_mask()
self.viewer.update_scaled_combined_image()
self.viewer.viewport().installEventFilter(self)
self.viewer.before_image_changed.connect(self.on_before_viewer_image_changed)
self.viewer.image_changed.connect(self.on_viewer_image_changed)
self.viewer.colormap_active_class_changed.connect(self.on_viewer_colormap_active_class_changed)
def deactivate(self):
print('deactivate tool', self.name())
self._deactivation()
self.deactivated.emit(self)
def _deactivation(self):
self.viewer.colormap_active_class_changed.disconnect(self.on_viewer_colormap_active_class_changed)
self.viewer.image_changed.disconnect(self.on_viewer_image_changed)
self.viewer.before_image_changed.disconnect(self.on_before_viewer_image_changed)
self.viewer.viewport().removeEventFilter(self)
self.tool_mask = None
self.viewer.remove_layer(self.tool_mask_layer)
self.tool_mask_layer = None
def on_before_viewer_image_changed(self):
pass
def on_viewer_colormap_active_class_changed(self, color_class: int):
self.mask_class = color_class
def on_viewer_image_changed(self):
self.recreate_tool_mask()
def recreate_tool_mask(self):
if not self.viewer.has_image():
return
tool_mask_data = np.full((self.viewer.image().data.shape[0], self.viewer.image().data.shape[1]),
settings.TOOL_NO_COLOR_CLASS, np.uint8)
if self.tool_mask is None:
self.tool_mask = FlatImage(tool_mask_data)
self.tool_mask_layer.image = self.tool_mask
else:
self.tool_mask.data = tool_mask_data
|
from core import settings
from core.image import FlatImage
from PyQt5.QtCore import QObject, pyqtSignal
import numpy as np
class ImageViewerTool(QObject):
before_activation = pyqtSignal(object)
activated = pyqtSignal(object)
deactivated = pyqtSignal(object)
def __init__(self, viewer, parent=None):
super().__init__(parent)
# self.type = tool_type
self.viewer = viewer
self.tool_mask = None
self.tool_mask_layer = None
self.mask_class = None
@classmethod
def name(cls):
return cls.__name__
def activate(self):
self.before_activation.emit(self)
print('activate tool', self.name())
self._activation()
self.activated.emit(self)
def _activation(self):
self.tool_mask_layer = self.viewer.add_layer('Tool Mask')
self.recreate_tool_mask()
self.viewer.update_scaled_combined_image()
self.viewer.viewport().installEventFilter(self)
self.viewer.before_image_changed.connect(self.on_before_viewer_image_changed)
self.viewer.image_changed.connect(self.on_viewer_image_changed)
self.viewer.colormap_active_class_changed.connect(self.on_viewer_colormap_active_class_changed)
def deactivate(self):
print('deactivate tool', self.name())
self._deactivation()
self.deactivated.emit(self)
def _deactivation(self):
self.viewer.colormap_active_class_changed.disconnect(self.on_viewer_colormap_active_class_changed)
self.viewer.image_changed.disconnect(self.on_viewer_image_changed)
self.viewer.before_image_changed.disconnect(self.on_before_viewer_image_changed)
self.viewer.viewport().removeEventFilter(self)
self.tool_mask = None
self.viewer.remove_layer(self.tool_mask_layer)
self.tool_mask_layer = None
def on_before_viewer_image_changed(self):
pass
def on_viewer_colormap_active_class_changed(self, color_class: int):
self.mask_class = color_class
def on_viewer_image_changed(self):
self.recreate_tool_mask()
def recreate_tool_mask(self):
if not self.viewer.has_image():
return
tool_mask_data = np.full((self.viewer.image().data.shape[0], self.viewer.image().data.shape[1]),
settings.TOOL_NO_COLOR_CLASS, np.uint8)
if self.tool_mask is None:
self.tool_mask = FlatImage(tool_mask_data)
self.tool_mask_layer.image = self.tool_mask
else:
self.tool_mask.data = tool_mask_data
|
or
| 0.106624
|
# self.type = tool_type
| 2.215554
| 2
|
scripts/lexer/asn1lexer/__init__.py
|
arturasl/latex-template
| 0
|
6627199
|
import asn1lexer
Asn1Lexer = asn1lexer.Asn1Lexer
|
import asn1lexer
Asn1Lexer = asn1lexer.Asn1Lexer
|
none
| 1
| 1.112101
| 1
|
|
tamper/commalessmid.py
|
danielvvDev/Sqlmap-Reforced2
| 3
|
6627200
|
<filename>tamper/commalessmid.py
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import os
import re
from lib.core.common import singleTimeWarnMessage
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGH
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces instances like 'MID(A, B, C)' with 'MID(A FROM B FOR C)'
Requirement:
* MySQL
Tested against:
* MySQL 5.0 and 5.5
>>> tamper('MID(VERSION(), 1, 1)')
'MID(VERSION() FROM 1 FOR 1)'
"""
retVal = payload
warnMsg = "you should consider usage of switch '--no-cast' along with "
warnMsg += "tamper script '%s'" % os.path.basename(__file__).split(".")[0]
singleTimeWarnMessage(warnMsg)
match = re.search(r"(?i)MID\((.+?)\s*,\s*(\d+)\s*\,\s*(\d+)\s*\)", payload or "")
if match:
retVal = retVal.replace(match.group(0), "MID(%s FROM %s FOR %s)" % (match.group(1), match.group(2), match.group(3)))
return retVal
|
<filename>tamper/commalessmid.py
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import os
import re
from lib.core.common import singleTimeWarnMessage
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGH
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces instances like 'MID(A, B, C)' with 'MID(A FROM B FOR C)'
Requirement:
* MySQL
Tested against:
* MySQL 5.0 and 5.5
>>> tamper('MID(VERSION(), 1, 1)')
'MID(VERSION() FROM 1 FOR 1)'
"""
retVal = payload
warnMsg = "you should consider usage of switch '--no-cast' along with "
warnMsg += "tamper script '%s'" % os.path.basename(__file__).split(".")[0]
singleTimeWarnMessage(warnMsg)
match = re.search(r"(?i)MID\((.+?)\s*,\s*(\d+)\s*\,\s*(\d+)\s*\)", payload or "")
if match:
retVal = retVal.replace(match.group(0), "MID(%s FROM %s FOR %s)" % (match.group(1), match.group(2), match.group(3)))
return retVal
|
en
| 0.526295
|
#!/usr/bin/env python Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/) See the file 'LICENSE' for copying permission Replaces instances like 'MID(A, B, C)' with 'MID(A FROM B FOR C)' Requirement: * MySQL Tested against: * MySQL 5.0 and 5.5 >>> tamper('MID(VERSION(), 1, 1)') 'MID(VERSION() FROM 1 FOR 1)'
| 2.335177
| 2
|
stats.py
|
QuIIL/Scale-Embedding-Shared-Neural-Network
| 0
|
6627201
|
<filename>stats.py
import csv
import json
import operator
import numpy as np
import pandas
# extract valid_accuracy during training across cross-validation fold
nr_fold = 5
log_path = '/media/vqdang/Data_2/dang/output/NUCLEI-ATTENTION/miccai2019/colon/v1.0.0.0_base1_aug1_xy_only/'
fold_stat = []
for fold_idx in range(0, nr_fold):
stat_file = '%s/%02d/stats.json' % (log_path, fold_idx)
with open(stat_file) as f:
info = json.load(f)
best_value = 0
for epoch in info:
epoch_stats = info[epoch]
epoch_value = epoch_stats['valid-acc']
if epoch_value > best_value:
best_value = epoch_value
fold_stat.append(best_value)
print(fold_stat)
|
<filename>stats.py
import csv
import json
import operator
import numpy as np
import pandas
# extract valid_accuracy during training across cross-validation fold
nr_fold = 5
log_path = '/media/vqdang/Data_2/dang/output/NUCLEI-ATTENTION/miccai2019/colon/v1.0.0.0_base1_aug1_xy_only/'
fold_stat = []
for fold_idx in range(0, nr_fold):
stat_file = '%s/%02d/stats.json' % (log_path, fold_idx)
with open(stat_file) as f:
info = json.load(f)
best_value = 0
for epoch in info:
epoch_stats = info[epoch]
epoch_value = epoch_stats['valid-acc']
if epoch_value > best_value:
best_value = epoch_value
fold_stat.append(best_value)
print(fold_stat)
|
en
| 0.970817
|
# extract valid_accuracy during training across cross-validation fold
| 2.601917
| 3
|
Snapchat_Filters/Goku_Filter/Goku.py
|
swapnilgarg7/Face-X
| 175
|
6627202
|
import dlib
import cv2
def mask(frame, landmarks):
imgGoku = cv2.imread("Goku.png", -1)
orig_mask = imgGoku[:, :, 3]
orig_mask_inv = cv2.bitwise_not(orig_mask)
imgGoku = imgGoku[:, :, 0:3]
origGokuHeight, origGokuWidth = imgGoku.shape[:2]
GokuWidth = abs(3 * (landmarks.part(36).x - landmarks.part(45).x))
GokuHeight = int(GokuWidth * origGokuHeight / origGokuWidth)
Goku = cv2.resize(imgGoku, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
y1 = int(landmarks.part(21).y - (GokuHeight / 2)) - 100
y2 = int(y1 + GokuHeight)
x1 = int(landmarks.part(27).x - (GokuWidth / 2))
x2 = int(x1 + GokuWidth)
roi = frame[y1:y2, x1:x2]
roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
roi_fg = cv2.bitwise_and(Goku, Goku, mask=mask)
frame[y1:y2, x1:x2] = cv2.add(roi_bg, roi_fg)
return frame
def filter():
"""
This function consists main logic of the program in which
1. detect faces
2. from 68 landmark points we detect eyes
3. Applies Filter
:return: None
"""
# detector for detecting the face in the image
detector = dlib.get_frontal_face_detector()
# predictor of locating 68 landmark points from the face by using a pretrained model
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detecting faces in the frame
faces = detector(frameGray)
# if faces are present then locating the landmark points
for face in faces:
landmarks = predictor(frameGray, face)
frame = mask(frame, landmarks)
# for showing frames on the window named Detector
cv2.imshow('Detector', frame)
# for quiting the program press 'ESC'
if cv2.waitKey(1) & 0xFF == 27:
break
else:
break
# releasing all the frames we captured and destroying the windows
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
filter()
|
import dlib
import cv2
def mask(frame, landmarks):
imgGoku = cv2.imread("Goku.png", -1)
orig_mask = imgGoku[:, :, 3]
orig_mask_inv = cv2.bitwise_not(orig_mask)
imgGoku = imgGoku[:, :, 0:3]
origGokuHeight, origGokuWidth = imgGoku.shape[:2]
GokuWidth = abs(3 * (landmarks.part(36).x - landmarks.part(45).x))
GokuHeight = int(GokuWidth * origGokuHeight / origGokuWidth)
Goku = cv2.resize(imgGoku, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (GokuWidth, GokuHeight), interpolation=cv2.INTER_AREA)
y1 = int(landmarks.part(21).y - (GokuHeight / 2)) - 100
y2 = int(y1 + GokuHeight)
x1 = int(landmarks.part(27).x - (GokuWidth / 2))
x2 = int(x1 + GokuWidth)
roi = frame[y1:y2, x1:x2]
roi_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
roi_fg = cv2.bitwise_and(Goku, Goku, mask=mask)
frame[y1:y2, x1:x2] = cv2.add(roi_bg, roi_fg)
return frame
def filter():
"""
This function consists main logic of the program in which
1. detect faces
2. from 68 landmark points we detect eyes
3. Applies Filter
:return: None
"""
# detector for detecting the face in the image
detector = dlib.get_frontal_face_detector()
# predictor of locating 68 landmark points from the face by using a pretrained model
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret:
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detecting faces in the frame
faces = detector(frameGray)
# if faces are present then locating the landmark points
for face in faces:
landmarks = predictor(frameGray, face)
frame = mask(frame, landmarks)
# for showing frames on the window named Detector
cv2.imshow('Detector', frame)
# for quiting the program press 'ESC'
if cv2.waitKey(1) & 0xFF == 27:
break
else:
break
# releasing all the frames we captured and destroying the windows
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
filter()
|
en
| 0.83415
|
This function consists main logic of the program in which
1. detect faces
2. from 68 landmark points we detect eyes
3. Applies Filter
:return: None # detector for detecting the face in the image # predictor of locating 68 landmark points from the face by using a pretrained model # detecting faces in the frame # if faces are present then locating the landmark points # for showing frames on the window named Detector # for quiting the program press 'ESC' # releasing all the frames we captured and destroying the windows
| 3.039505
| 3
|
setup.py
|
ziplokk1/botopipeline
| 4
|
6627203
|
from setuptools import setup, find_packages
version = '0.1.1'
REQUIREMENTS = [
'boto',
'scrapy'
]
setup(
name='scrapy-sqs-pipeline',
version=version,
packages=find_packages(),
url='https://github.com/ziplokk1/botopipeline',
license='LICENSE.txt',
author='<NAME>',
author_email='<EMAIL>',
install_requires=REQUIREMENTS,
description='Write scraped items to Amazon SQS.'
)
|
from setuptools import setup, find_packages
version = '0.1.1'
REQUIREMENTS = [
'boto',
'scrapy'
]
setup(
name='scrapy-sqs-pipeline',
version=version,
packages=find_packages(),
url='https://github.com/ziplokk1/botopipeline',
license='LICENSE.txt',
author='<NAME>',
author_email='<EMAIL>',
install_requires=REQUIREMENTS,
description='Write scraped items to Amazon SQS.'
)
|
none
| 1
| 1.36672
| 1
|
|
lexibank_sidwellvietic.py
|
lexibank/sidwellvietic
| 1
|
6627204
|
<reponame>lexibank/sidwellvietic
import pathlib
import attr
from clldutils.misc import slug
from pylexibank import Dataset as BaseDataset
from pylexibank import progressbar as pb
from pylexibank import Language
from pylexibank import FormSpec
@attr.s
class CustomLanguage(Language):
Sources = attr.ib(default=None)
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "sidwellvietic"
language_class = CustomLanguage
form_spec = FormSpec(
separators="~;,/", missing_data=["∅", "#", "NA", 'XX', '*#'], first_form_only=True,
replacements=[
(x, y) for x, y in zip(
'1234567890',
'¹²³⁴⁵⁶⁷⁸⁹⁰',
)
]+[
('-', ''),
("(diː | tiː)", "diː"),
("(guːs | kuːs)", "guːs"),
("(ɟiːŋ | ciɲ)", "ɟiːŋ"),
("(k-riɛs / k-rɛs | res)", "k-riɛs"),
#("'", 'ʰ'),
(' "mountain"', ''),
(' "hill"', ''),
(' [<Lao]', ''),
('[', ''),
(']', ''),
(' < Lao', ''),
(' ', '_'),
("ʔək__̄", "ʔək"),
("anaŋ__᷅ ", "anaŋ"),
("_'abdomen'", ""),
("dŋ.³³", "dəŋ³³"),
("_᷄ "[:-2], ""),
("m̀", "m"),
("ŋ᷄ "[:-1], "ŋ"),
("\u1dc4", ""),
("\u1dc5", ""),
])
def cmd_makecldf(self, args):
# add bib
args.writer.add_sources()
args.log.info("added sources")
# add concept
concepts = {}
for concept in self.concepts:
idx = concept["NUMBER"]+"_"+slug(concept["ENGLISH"])
concepts[concept["ENGLISH"]] = idx
args.writer.add_concept(
ID=idx,
Name=concept["ENGLISH"],
Concepticon_ID=concept["CONCEPTICON_ID"],
Concepticon_Gloss=concept["CONCEPTICON_GLOSS"],
)
args.log.info("added concepts")
# add language
languages = args.writer.add_languages()
sources = {
language["ID"]: language["Sources"].strip().replace(" ", "")
for language in self.languages}
args.log.info("added languages")
# read in data
data = self.raw_dir.read_csv(
"data.tsv", delimiter="\t",
)
header = data[0]
header[0] = "Gloss"
cognates = {}
cogidx = 1
for i in range(2, len(data), 2):
words = dict(zip(header, data[i]))
cognates = dict(zip(header, data[i+1]))
concept = data[i][0]
for language in languages:
entry = words.get(language).strip()
cog = cognates.get(language).strip()
if entry.replace('#', '').strip():
if concept+'-'+cog not in cognates:
cognates[concept+'-'+cog] = cogidx
cogidx += 1
cogid = cognates[concept+'-'+cog]
for lex in args.writer.add_forms_from_value(
Language_ID=language,
Parameter_ID=concepts[concept],
Value=entry,
Source=sources[language],
Cognacy=cogid
):
args.writer.add_cognate(
lexeme=lex,
Cognateset_ID=cogid,
Source="Sidwell2021"
)
|
import pathlib
import attr
from clldutils.misc import slug
from pylexibank import Dataset as BaseDataset
from pylexibank import progressbar as pb
from pylexibank import Language
from pylexibank import FormSpec
@attr.s
class CustomLanguage(Language):
Sources = attr.ib(default=None)
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "sidwellvietic"
language_class = CustomLanguage
form_spec = FormSpec(
separators="~;,/", missing_data=["∅", "#", "NA", 'XX', '*#'], first_form_only=True,
replacements=[
(x, y) for x, y in zip(
'1234567890',
'¹²³⁴⁵⁶⁷⁸⁹⁰',
)
]+[
('-', ''),
("(diː | tiː)", "diː"),
("(guːs | kuːs)", "guːs"),
("(ɟiːŋ | ciɲ)", "ɟiːŋ"),
("(k-riɛs / k-rɛs | res)", "k-riɛs"),
#("'", 'ʰ'),
(' "mountain"', ''),
(' "hill"', ''),
(' [<Lao]', ''),
('[', ''),
(']', ''),
(' < Lao', ''),
(' ', '_'),
("ʔək__̄", "ʔək"),
("anaŋ__᷅ ", "anaŋ"),
("_'abdomen'", ""),
("dŋ.³³", "dəŋ³³"),
("_᷄ "[:-2], ""),
("m̀", "m"),
("ŋ᷄ "[:-1], "ŋ"),
("\u1dc4", ""),
("\u1dc5", ""),
])
def cmd_makecldf(self, args):
# add bib
args.writer.add_sources()
args.log.info("added sources")
# add concept
concepts = {}
for concept in self.concepts:
idx = concept["NUMBER"]+"_"+slug(concept["ENGLISH"])
concepts[concept["ENGLISH"]] = idx
args.writer.add_concept(
ID=idx,
Name=concept["ENGLISH"],
Concepticon_ID=concept["CONCEPTICON_ID"],
Concepticon_Gloss=concept["CONCEPTICON_GLOSS"],
)
args.log.info("added concepts")
# add language
languages = args.writer.add_languages()
sources = {
language["ID"]: language["Sources"].strip().replace(" ", "")
for language in self.languages}
args.log.info("added languages")
# read in data
data = self.raw_dir.read_csv(
"data.tsv", delimiter="\t",
)
header = data[0]
header[0] = "Gloss"
cognates = {}
cogidx = 1
for i in range(2, len(data), 2):
words = dict(zip(header, data[i]))
cognates = dict(zip(header, data[i+1]))
concept = data[i][0]
for language in languages:
entry = words.get(language).strip()
cog = cognates.get(language).strip()
if entry.replace('#', '').strip():
if concept+'-'+cog not in cognates:
cognates[concept+'-'+cog] = cogidx
cogidx += 1
cogid = cognates[concept+'-'+cog]
for lex in args.writer.add_forms_from_value(
Language_ID=language,
Parameter_ID=concepts[concept],
Value=entry,
Source=sources[language],
Cognacy=cogid
):
args.writer.add_cognate(
lexeme=lex,
Cognateset_ID=cogid,
Source="Sidwell2021"
)
|
en
| 0.336063
|
#'], first_form_only=True, #("'", 'ʰ'), # add bib # add concept # add language # read in data
| 2.074481
| 2
|
Project-Euler/python/10.py
|
marshallhumble/Project_Euler
| 3
|
6627205
|
#!/usr/bin/env python
import time
start = time.time()
"""
Summation of primes
Problem 10
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
def primes_sieve(limit):
limitn = limit + 1
primes = dict()
for i in range(2, limitn):
primes[i] = True
for i in primes:
factors = range(i, limitn, i)
for f in factors[1:]:
primes[f] = False
return [i for i in primes if primes[i] == True]
print(sum(primes_sieve(2000000)))
def sum_primes(x):
return sum(primes_sieve(x))
def test_function():
assert sum_primes(2000000) == 142913828922
|
#!/usr/bin/env python
import time
start = time.time()
"""
Summation of primes
Problem 10
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
def primes_sieve(limit):
limitn = limit + 1
primes = dict()
for i in range(2, limitn):
primes[i] = True
for i in primes:
factors = range(i, limitn, i)
for f in factors[1:]:
primes[f] = False
return [i for i in primes if primes[i] == True]
print(sum(primes_sieve(2000000)))
def sum_primes(x):
return sum(primes_sieve(x))
def test_function():
assert sum_primes(2000000) == 142913828922
|
en
| 0.669047
|
#!/usr/bin/env python Summation of primes Problem 10 The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17. Find the sum of all the primes below two million.
| 3.809303
| 4
|
lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py
|
mysticmirages/GalaxyProject
| 0
|
6627206
|
<filename>lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py
"""
Migration script to create tables and columns for sharing visualizations.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
# Sharing visualizations.
VisualizationUserShareAssociation_table = Table( "visualization_user_share_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True )
)
# Tagging visualizations.
VisualizationTagAssociation_table = Table( "visualization_tag_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "user_tname", Unicode(255), index=True),
Column( "value", Unicode(255), index=True),
Column( "user_value", Unicode(255), index=True) )
# Annotating visualizations.
VisualizationAnnotationAssociation_table = Table( "visualization_annotation_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "annotation", TEXT, index=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
metadata.reflect()
Visualiation_table = Table( "visualization", metadata, autoload=True )
# Create visualization_user_share_association table.
try:
VisualizationUserShareAssociation_table.create()
except Exception, e:
print "Creating visualization_user_share_association table failed: %s" % str( e )
log.debug( "Creating visualization_user_share_association table failed: %s" % str( e ) )
# Get default boolean value 'false' so that columns can be initialized.
if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
default_false = "0"
elif migrate_engine.name == 'postgresql':
default_false = "false"
# Add columns & create indices for supporting sharing to visualization table.
deleted_column = Column( "deleted", Boolean, default=False, index=True )
importable_column = Column( "importable", Boolean, default=False, index=True )
slug_column = Column( "slug", TEXT )
published_column = Column( "published", Boolean, index=True )
try:
# Add column.
deleted_column.create( Visualiation_table, index_name = "ix_visualization_deleted")
assert deleted_column is Visualiation_table.c.deleted
# Fill column with default value.
cmd = "UPDATE visualization SET deleted = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding deleted column to visualization table failed: %s" % str( e )
log.debug( "Adding deleted column to visualization table failed: %s" % str( e ) )
try:
# Add column.
importable_column.create( Visualiation_table, index_name='ix_visualization_importable')
assert importable_column is Visualiation_table.c.importable
# Fill column with default value.
cmd = "UPDATE visualization SET importable = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding importable column to visualization table failed: %s" % str( e )
log.debug( "Adding importable column to visualization table failed: %s" % str( e ) )
try:
slug_column.create( Visualiation_table )
assert slug_column is Visualiation_table.c.slug
except Exception, e:
print "Adding slug column to visualization table failed: %s" % str( e )
log.debug( "Adding slug column to visualization table failed: %s" % str( e ) )
try:
if migrate_engine.name == 'mysql':
# Have to create index manually.
cmd = "CREATE INDEX ix_visualization_slug ON visualization ( slug ( 100 ) )"
db_session.execute( cmd )
else:
i = Index( "ix_visualization_slug", Visualiation_table.c.slug )
i.create()
except Exception, e:
print "Adding index 'ix_visualization_slug' failed: %s" % str( e )
log.debug( "Adding index 'ix_visualization_slug' failed: %s" % str( e ) )
try:
# Add column.
published_column.create( Visualiation_table, index_name='ix_visualization_published')
assert published_column is Visualiation_table.c.published
# Fill column with default value.
cmd = "UPDATE visualization SET published = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding published column to visualization table failed: %s" % str( e )
log.debug( "Adding published column to visualization table failed: %s" % str( e ) )
# Create visualization_tag_association table.
try:
VisualizationTagAssociation_table.create()
except Exception, e:
print str(e)
log.debug( "Creating visualization_tag_association table failed: %s" % str( e ) )
# Create visualization_annotation_association table.
try:
VisualizationAnnotationAssociation_table.create()
except Exception, e:
print str(e)
log.debug( "Creating visualization_annotation_association table failed: %s" % str( e ) )
# Need to create index for visualization annotation manually to deal with errors.
try:
if migrate_engine.name == 'mysql':
# Have to create index manually.
cmd = "CREATE INDEX ix_visualization_annotation_association_annotation ON visualization_annotation_association ( annotation ( 100 ) )"
db_session.execute( cmd )
else:
i = Index( "ix_visualization_annotation_association_annotation", VisualizationAnnotationAssociation_table.c.annotation )
i.create()
except Exception, e:
print "Adding index 'ix_visualization_annotation_association_annotation' failed: %s" % str( e )
log.debug( "Adding index 'ix_visualization_annotation_association_annotation' failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
Visualiation_table = Table( "visualization", metadata, autoload=True )
# Drop visualization_user_share_association table.
try:
VisualizationUserShareAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_user_share_association table failed: %s" % str( e ) )
# Drop columns for supporting sharing from visualization table.
try:
Visualiation_table.c.deleted.drop()
except Exception, e:
print "Dropping deleted column from visualization table failed: %s" % str( e )
log.debug( "Dropping deleted column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.importable.drop()
except Exception, e:
print "Dropping importable column from visualization table failed: %s" % str( e )
log.debug( "Dropping importable column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.slug.drop()
except Exception, e:
print "Dropping slug column from visualization table failed: %s" % str( e )
log.debug( "Dropping slug column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.published.drop()
except Exception, e:
print "Dropping published column from visualization table failed: %s" % str( e )
log.debug( "Dropping published column from visualization table failed: %s" % str( e ) )
# Drop visualization_tag_association table.
try:
VisualizationTagAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_tag_association table failed: %s" % str( e ) )
# Drop visualization_annotation_association table.
try:
VisualizationAnnotationAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_annotation_association table failed: %s" % str( e ) )
|
<filename>lib/galaxy/model/migrate/versions/0043_visualization_sharing_tagging_annotating.py
"""
Migration script to create tables and columns for sharing visualizations.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
# Sharing visualizations.
VisualizationUserShareAssociation_table = Table( "visualization_user_share_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True )
)
# Tagging visualizations.
VisualizationTagAssociation_table = Table( "visualization_tag_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "tag_id", Integer, ForeignKey( "tag.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "user_tname", Unicode(255), index=True),
Column( "value", Unicode(255), index=True),
Column( "user_value", Unicode(255), index=True) )
# Annotating visualizations.
VisualizationAnnotationAssociation_table = Table( "visualization_annotation_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "visualization_id", Integer, ForeignKey( "visualization.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ),
Column( "annotation", TEXT, index=False ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) )
metadata.reflect()
Visualiation_table = Table( "visualization", metadata, autoload=True )
# Create visualization_user_share_association table.
try:
VisualizationUserShareAssociation_table.create()
except Exception, e:
print "Creating visualization_user_share_association table failed: %s" % str( e )
log.debug( "Creating visualization_user_share_association table failed: %s" % str( e ) )
# Get default boolean value 'false' so that columns can be initialized.
if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
default_false = "0"
elif migrate_engine.name == 'postgresql':
default_false = "false"
# Add columns & create indices for supporting sharing to visualization table.
deleted_column = Column( "deleted", Boolean, default=False, index=True )
importable_column = Column( "importable", Boolean, default=False, index=True )
slug_column = Column( "slug", TEXT )
published_column = Column( "published", Boolean, index=True )
try:
# Add column.
deleted_column.create( Visualiation_table, index_name = "ix_visualization_deleted")
assert deleted_column is Visualiation_table.c.deleted
# Fill column with default value.
cmd = "UPDATE visualization SET deleted = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding deleted column to visualization table failed: %s" % str( e )
log.debug( "Adding deleted column to visualization table failed: %s" % str( e ) )
try:
# Add column.
importable_column.create( Visualiation_table, index_name='ix_visualization_importable')
assert importable_column is Visualiation_table.c.importable
# Fill column with default value.
cmd = "UPDATE visualization SET importable = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding importable column to visualization table failed: %s" % str( e )
log.debug( "Adding importable column to visualization table failed: %s" % str( e ) )
try:
slug_column.create( Visualiation_table )
assert slug_column is Visualiation_table.c.slug
except Exception, e:
print "Adding slug column to visualization table failed: %s" % str( e )
log.debug( "Adding slug column to visualization table failed: %s" % str( e ) )
try:
if migrate_engine.name == 'mysql':
# Have to create index manually.
cmd = "CREATE INDEX ix_visualization_slug ON visualization ( slug ( 100 ) )"
db_session.execute( cmd )
else:
i = Index( "ix_visualization_slug", Visualiation_table.c.slug )
i.create()
except Exception, e:
print "Adding index 'ix_visualization_slug' failed: %s" % str( e )
log.debug( "Adding index 'ix_visualization_slug' failed: %s" % str( e ) )
try:
# Add column.
published_column.create( Visualiation_table, index_name='ix_visualization_published')
assert published_column is Visualiation_table.c.published
# Fill column with default value.
cmd = "UPDATE visualization SET published = %s" % default_false
db_session.execute( cmd )
except Exception, e:
print "Adding published column to visualization table failed: %s" % str( e )
log.debug( "Adding published column to visualization table failed: %s" % str( e ) )
# Create visualization_tag_association table.
try:
VisualizationTagAssociation_table.create()
except Exception, e:
print str(e)
log.debug( "Creating visualization_tag_association table failed: %s" % str( e ) )
# Create visualization_annotation_association table.
try:
VisualizationAnnotationAssociation_table.create()
except Exception, e:
print str(e)
log.debug( "Creating visualization_annotation_association table failed: %s" % str( e ) )
# Need to create index for visualization annotation manually to deal with errors.
try:
if migrate_engine.name == 'mysql':
# Have to create index manually.
cmd = "CREATE INDEX ix_visualization_annotation_association_annotation ON visualization_annotation_association ( annotation ( 100 ) )"
db_session.execute( cmd )
else:
i = Index( "ix_visualization_annotation_association_annotation", VisualizationAnnotationAssociation_table.c.annotation )
i.create()
except Exception, e:
print "Adding index 'ix_visualization_annotation_association_annotation' failed: %s" % str( e )
log.debug( "Adding index 'ix_visualization_annotation_association_annotation' failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
Visualiation_table = Table( "visualization", metadata, autoload=True )
# Drop visualization_user_share_association table.
try:
VisualizationUserShareAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_user_share_association table failed: %s" % str( e ) )
# Drop columns for supporting sharing from visualization table.
try:
Visualiation_table.c.deleted.drop()
except Exception, e:
print "Dropping deleted column from visualization table failed: %s" % str( e )
log.debug( "Dropping deleted column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.importable.drop()
except Exception, e:
print "Dropping importable column from visualization table failed: %s" % str( e )
log.debug( "Dropping importable column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.slug.drop()
except Exception, e:
print "Dropping slug column from visualization table failed: %s" % str( e )
log.debug( "Dropping slug column from visualization table failed: %s" % str( e ) )
try:
Visualiation_table.c.published.drop()
except Exception, e:
print "Dropping published column from visualization table failed: %s" % str( e )
log.debug( "Dropping published column from visualization table failed: %s" % str( e ) )
# Drop visualization_tag_association table.
try:
VisualizationTagAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_tag_association table failed: %s" % str( e ) )
# Drop visualization_annotation_association table.
try:
VisualizationAnnotationAssociation_table.drop()
except Exception, e:
print str(e)
log.debug( "Dropping visualization_annotation_association table failed: %s" % str( e ) )
|
en
| 0.556159
|
Migration script to create tables and columns for sharing visualizations. # Sharing visualizations. # Tagging visualizations. # Annotating visualizations. # Create visualization_user_share_association table. # Get default boolean value 'false' so that columns can be initialized. # Add columns & create indices for supporting sharing to visualization table. # Add column. # Fill column with default value. # Add column. # Fill column with default value. # Have to create index manually. # Add column. # Fill column with default value. # Create visualization_tag_association table. # Create visualization_annotation_association table. # Need to create index for visualization annotation manually to deal with errors. # Have to create index manually. # Drop visualization_user_share_association table. # Drop columns for supporting sharing from visualization table. # Drop visualization_tag_association table. # Drop visualization_annotation_association table.
| 2.371375
| 2
|
scotch/doc_handler_base.py
|
ng3rdstmadgke/scotch
| 0
|
6627207
|
from typing import List, Tuple, Dict, Any
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
class DocHandlerBase:
def __init__(self, arguments: List[str], options: Dict[str, Any], config: Dict[str, Any]):
"""コンストラクタ。ヘッダなどを出力する場合はこのメソッドで。
Args:
arguments (List[str]): 引数の配列。["プロジェクト名"]
options (Dict[str, Any]): 辞書形式のオプション。{"continue": True, ...}
config (Dict[str, Any]): 辞書形式のconfig.yml。{"logfile": "path/to/log", ...}
"""
self.arguments = arguments
self.options = options
self.config = config
@abstractmethod
def seeds(self) -> List[Tuple[str, int]]:
"""クロール対象のシードURLとホップ数を返す
Returns:
List[Tuple[str, int]]: (シードURL, ホップ数)のリスト
"""
pass
@abstractmethod
def filter(self, curr_url: str, next_url: str) -> bool:
"""ネクストURLをクロール対象とするかを判定する。
Args:
curr_url (str): 現在アクセスしているURL
next_url (str): ネクストURL。このURLをクロールするかを判定する。
Returns:
bool: True: next_urlをクロールする, False: next_urlをクロールしない
"""
pass
@abstractmethod
def handle(self, url: str, depth: int, doc: BeautifulSoup):
"""取得したdocumentを処理する
Args:
url (str): 取得したURL
depth (int): シードURLからの階層
doc (BeautifulSoup): ドキュメント
Returns:
void:
"""
pass
|
from typing import List, Tuple, Dict, Any
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
class DocHandlerBase:
def __init__(self, arguments: List[str], options: Dict[str, Any], config: Dict[str, Any]):
"""コンストラクタ。ヘッダなどを出力する場合はこのメソッドで。
Args:
arguments (List[str]): 引数の配列。["プロジェクト名"]
options (Dict[str, Any]): 辞書形式のオプション。{"continue": True, ...}
config (Dict[str, Any]): 辞書形式のconfig.yml。{"logfile": "path/to/log", ...}
"""
self.arguments = arguments
self.options = options
self.config = config
@abstractmethod
def seeds(self) -> List[Tuple[str, int]]:
"""クロール対象のシードURLとホップ数を返す
Returns:
List[Tuple[str, int]]: (シードURL, ホップ数)のリスト
"""
pass
@abstractmethod
def filter(self, curr_url: str, next_url: str) -> bool:
"""ネクストURLをクロール対象とするかを判定する。
Args:
curr_url (str): 現在アクセスしているURL
next_url (str): ネクストURL。このURLをクロールするかを判定する。
Returns:
bool: True: next_urlをクロールする, False: next_urlをクロールしない
"""
pass
@abstractmethod
def handle(self, url: str, depth: int, doc: BeautifulSoup):
"""取得したdocumentを処理する
Args:
url (str): 取得したURL
depth (int): シードURLからの階層
doc (BeautifulSoup): ドキュメント
Returns:
void:
"""
pass
|
ja
| 0.991817
|
コンストラクタ。ヘッダなどを出力する場合はこのメソッドで。 Args: arguments (List[str]): 引数の配列。["プロジェクト名"] options (Dict[str, Any]): 辞書形式のオプション。{"continue": True, ...} config (Dict[str, Any]): 辞書形式のconfig.yml。{"logfile": "path/to/log", ...} クロール対象のシードURLとホップ数を返す Returns: List[Tuple[str, int]]: (シードURL, ホップ数)のリスト ネクストURLをクロール対象とするかを判定する。 Args: curr_url (str): 現在アクセスしているURL next_url (str): ネクストURL。このURLをクロールするかを判定する。 Returns: bool: True: next_urlをクロールする, False: next_urlをクロールしない 取得したdocumentを処理する Args: url (str): 取得したURL depth (int): シードURLからの階層 doc (BeautifulSoup): ドキュメント Returns: void:
| 2.618881
| 3
|
apps/log_trace/handlers/trace_config_handlers.py
|
qqqqqie/bk-log
| 75
|
6627208
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from apps.log_search.models import LogIndexSet
from apps.log_search.exceptions import IndexTraceProjectIDException
from apps.utils.function import ignored
from bk_dataview.grafana import client
from bk_dataview.grafana.settings import grafana_settings
class TraceConfigHandlers(object):
def __init__(self):
pass
@classmethod
def get_user_trace_index_set(cls, project_id, bk_biz_id, request, scenarios=None):
if not project_id:
raise IndexTraceProjectIDException()
index_set_ids = LogIndexSet.objects.filter(project_id=project_id).values_list("index_set_id", flat=True)
index_sets = LogIndexSet.get_index_set(index_set_ids, scenarios, is_trace_log=True)
with ignored(Exception):
cls.refresh_grafana(bk_biz_id, request)
return index_sets
@classmethod
def refresh_grafana(cls, bk_biz_id, request):
grafana_handler = grafana_settings.BACKEND_CLASS()
from apps.grafana.provisioning import TraceProvisioning
trace_provisioning = TraceProvisioning()
org_id = cls.get_grafana_org_id(bk_biz_id)
ds_list = trace_provisioning.datasources(request, bk_biz_id, org_id)
grafana_handler.handle_datasources(request, bk_biz_id, org_id, ds_list, trace_provisioning)
@staticmethod
def get_grafana_org_id(org_name):
resp = client.get_organization_by_name(org_name)
if resp.status_code == 200:
_org = resp.json()
return _org["id"]
if resp.status_code == 404:
resp = client.create_organization(org_name)
_org = resp.json()
return _org["orgId"]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from apps.log_search.models import LogIndexSet
from apps.log_search.exceptions import IndexTraceProjectIDException
from apps.utils.function import ignored
from bk_dataview.grafana import client
from bk_dataview.grafana.settings import grafana_settings
class TraceConfigHandlers(object):
def __init__(self):
pass
@classmethod
def get_user_trace_index_set(cls, project_id, bk_biz_id, request, scenarios=None):
if not project_id:
raise IndexTraceProjectIDException()
index_set_ids = LogIndexSet.objects.filter(project_id=project_id).values_list("index_set_id", flat=True)
index_sets = LogIndexSet.get_index_set(index_set_ids, scenarios, is_trace_log=True)
with ignored(Exception):
cls.refresh_grafana(bk_biz_id, request)
return index_sets
@classmethod
def refresh_grafana(cls, bk_biz_id, request):
grafana_handler = grafana_settings.BACKEND_CLASS()
from apps.grafana.provisioning import TraceProvisioning
trace_provisioning = TraceProvisioning()
org_id = cls.get_grafana_org_id(bk_biz_id)
ds_list = trace_provisioning.datasources(request, bk_biz_id, org_id)
grafana_handler.handle_datasources(request, bk_biz_id, org_id, ds_list, trace_provisioning)
@staticmethod
def get_grafana_org_id(org_name):
resp = client.get_organization_by_name(org_name)
if resp.status_code == 200:
_org = resp.json()
return _org["id"]
if resp.status_code == 404:
resp = client.create_organization(org_name)
_org = resp.json()
return _org["orgId"]
|
en
| 0.766649
|
# -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-LOG 蓝鲸日志平台 is licensed under the MIT License. License for BK-LOG 蓝鲸日志平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| 1.274874
| 1
|
ten-apps/03-birthday-countdown/program.py
|
ryentzer/talkpython-courses
| 0
|
6627209
|
<gh_stars>0
# App 3 birthday countdown
|
# App 3 birthday countdown
|
en
| 0.663781
|
# App 3 birthday countdown
| 1.023485
| 1
|
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Core/test_attribute.py
|
aleasims/Peach
| 0
|
6627210
|
import re
from Ft.Xml.Xslt import XsltException, Error
from Xml.Xslt import test_harness
from Ft.Xml.Xslt.XmlWriter import DEFAULT_GENERATED_PREFIX
# an unsophisticated comparer of XML strings that just checks to see if
# both strings have the same set of substrings that look like attributes.
_attrPattern = re.compile(r'[\w:]+="[^"]+"')
def _cmp_rawattrs(a, b):
a_attrs = _attrPattern.findall(a)
b_attrs = _attrPattern.findall(b)
a_attrs.sort()
b_attrs.sort()
return a_attrs != b_attrs
source_1 = """<dummy/>"""
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>"""
sheet_2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:if test="true()">
<xsl:attribute name="foo">bar</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_2 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>"""
# "xsl:attribute with namespace"
sheet_3 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="y:foo" namespace="http://some-other-ns/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_3 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(gp)s0="http://some-ns/" xmlns:y="http://some-other-ns/" %(gp)s0:foo="bar" y:foo="bar"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_4 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">bar</xsl:attribute>
<xsl:attribute name="foo">baz</xsl:attribute>
<xsl:attribute name="foo">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_4 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="maz"/>"""
sheet_5 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result foo="bar">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_5 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
sheet_6 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
<!-- duplicate attrs override previous -->
<!-- we use xsl:if to obscure it a bit -->
<xsl:if test="true()">
<xsl:attribute name="foo">baz</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_6 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
#"adding attributes with the same expanded-name 4"
sheet_7 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="x:foo" xmlns:x="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_7 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:org.4suite.4xslt.ns0="http://some-ns/" org.4suite.4xslt.ns0:foo="baz"/>"""
# "adding attributes with the same expanded-name 5"
sheet_8 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result x:foo="bar" xmlns:x="http://some-ns/">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_8 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:x="http://some-ns/" x:foo="baz"/>"""
sheet_9 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- linefeed must be serialized as -->
<xsl:attribute name="a">x
y</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_9 = """<?xml version="1.0" encoding="UTF-8"?>
<result a="x y"/>"""
sheet_10 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- if an attribute prefix would be xmlns, it must be changed to something else -->
<xsl:attribute name="xmlns:foo" namespace="http://some-ns/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_10 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(gp)s0="http://some-ns/" %(gp)s0:foo="bar"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
# "attributes in various namespaces"
sheet_11 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- correct results are indicated in the attribute values -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
<xsl:attribute name="pre:foo" xmlns:pre="http://ns-for-pre/">local-name foo, namespace http://ns-for-pre/, preferred prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_11 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:pre="http://ns-for-pre/" xmlns:%(gp)s0="http://foo-ns/" xmlns:%(gp)s1="http://explicit-ns/" %(gp)s1:bar="local-name bar, namespace http://explicit-ns/, generated prefix" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" pre:foo="local-name foo, namespace http://ns-for-pre/, preferred prefix pre" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_12 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- the element should be in the http://foo-ns/ namespace. -->
<!-- the element *may*, but most likely won't, bear the same generated prefix as the in-foo-ns attribute. -->
<result xmlns="http://foo-ns/">
<!-- A default namespace is in scope, but this does not affect the value of 'name' in xsl:attribute. -->
<!-- in-foo-ns attribute does not inherit the default namespace. It *must* have a prefix, bound to http://foo-ns/ -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_12 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns="http://foo-ns/" xmlns:%(gp)s0="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
# "attributes in empty and in-scope default namespaces"
sheet_13 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<foo:result xmlns:foo="http://foo-ns/">
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix foo</xsl:attribute>
</foo:result>
</xsl:template>
</xsl:stylesheet>"""
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the foo.
#
expected_13 = """<?xml version="1.0" encoding="UTF-8"?>
<foo:result xmlns:foo="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" foo:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix foo"/>"""
sheet_14 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<pre:result xmlns:pre="http://foo-ns/">
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</pre:result>
</xsl:template>
</xsl:stylesheet>"""
# the bar attribute must have a generated prefix.
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the pre.
#
expected_14 = """<?xml version="1.0" encoding="UTF-8"?>
<pre:result xmlns:pre="http://foo-ns/" xmlns:%(gp)s0="http://explicit-ns/" pre:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix pre" %(gp)s0:bar="local-name bar, namespace http://explicit-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_e1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:text>hello world</xsl:text>
<!-- error: children added to element before attribute (recovery optional) -->
<xsl:attribute name="att">foo</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- error: adding attribute to non-element (recovery optional) -->
<xsl:attribute name="att">foo</xsl:attribute>
</xsl:template>
</xsl:stylesheet>"""
sheet_e3 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">
<!-- error: creating non-text in attribute (recovery optional) -->
<xsl:comment>uh-oh</xsl:comment>
</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e4 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: creating attribute with illegal name 'xmlns' (recovery optional) -->
<xsl:attribute name="{concat('xml','ns')}">http://some-ns/</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e5 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: creating attribute with illegal name (recovery optional) -->
<xsl:attribute name="99foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_nre1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: illegal namespace name (Namespaces in XML restriction) -->
<!-- XPath relies on Namespaces in XML, XSLT relies on XPath -->
<xsl:attribute name="foo" namespace="http://www.w3.org/XML/1998/namespace">bar</xsl:attribute>
<xsl:attribute name="baz" namespace="http://www.w3.org/2000/xmlns/">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
def Test(tester):
tester.startGroup("xsl:attribute")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title="xsl:attribute as child of literal result element")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_2,
title="xsl:attribute as child of xsl:if child of l.r.e.")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_3)
test_harness.XsltTest(tester, source, [sheet], expected_3,
title="xsl:attribute with namespace")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_4)
test_harness.XsltTest(tester, source, [sheet], expected_4,
title="adding attributes with the same expanded-name 1")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_5)
test_harness.XsltTest(tester, source, [sheet], expected_5,
title="adding attributes with the same expanded-name 2")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_6)
test_harness.XsltTest(tester, source, [sheet], expected_6,
title="adding attributes with the same expanded-name 3")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_7)
test_harness.XsltTest(tester, source, [sheet], expected_7,
title="adding attributes with the same expanded-name 4")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_8)
test_harness.XsltTest(tester, source, [sheet], expected_8,
title="adding attributes with the same expanded-name 5")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_9)
test_harness.XsltTest(tester, source, [sheet], expected_9,
title="serialization of linefeed in attribute value")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_10)
test_harness.XsltTest(tester, source, [sheet], expected_10,
title="substitution of xmlns prefix in attribute name")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_11)
test_harness.XsltTest(tester, source, [sheet], expected_11,
title="attributes in various namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_12)
test_harness.XsltTest(tester, source, [sheet], expected_12,
title="attributes in empty and in-scope default namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_13)
test_harness.XsltTest(tester, source, [sheet], expected_13,
title="attributes in empty and in-scope non-default namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_14)
test_harness.XsltTest(tester, source, [sheet], expected_14,
title="attributes in in-scope namespaces and with dup prefixes")
tester.groupDone()
tester.startGroup("recoverable xsl:attribute errors")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e1)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.ATTRIBUTE_ADDED_TOO_LATE,
title="adding attribute after non-attributes")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e2)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.ATTRIBUTE_ADDED_TO_NON_ELEMENT,
title="adding attribute to non-element")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e3)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.NONTEXT_IN_ATTRIBUTE,
title="creating non-text during xsl:attribute instantiation")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e4)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.BAD_ATTRIBUTE_NAME,
title="illegal attribute name ('xmlns')")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e5)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.INVALID_QNAME_ATTR,
title="illegal attribute name (non-QName)")
tester.groupDone()
tester.startGroup("non-recoverable xsl:attribute errors")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_nre1)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.INVALID_NS_URIREF_ATTR,
title="illegal namespace URI")
tester.groupDone()
return
|
import re
from Ft.Xml.Xslt import XsltException, Error
from Xml.Xslt import test_harness
from Ft.Xml.Xslt.XmlWriter import DEFAULT_GENERATED_PREFIX
# an unsophisticated comparer of XML strings that just checks to see if
# both strings have the same set of substrings that look like attributes.
_attrPattern = re.compile(r'[\w:]+="[^"]+"')
def _cmp_rawattrs(a, b):
a_attrs = _attrPattern.findall(a)
b_attrs = _attrPattern.findall(b)
a_attrs.sort()
b_attrs.sort()
return a_attrs != b_attrs
source_1 = """<dummy/>"""
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>"""
sheet_2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:if test="true()">
<xsl:attribute name="foo">bar</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_2 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="bar"/>"""
# "xsl:attribute with namespace"
sheet_3 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="y:foo" namespace="http://some-other-ns/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_3 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(gp)s0="http://some-ns/" xmlns:y="http://some-other-ns/" %(gp)s0:foo="bar" y:foo="bar"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_4 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">bar</xsl:attribute>
<xsl:attribute name="foo">baz</xsl:attribute>
<xsl:attribute name="foo">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_4 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="maz"/>"""
sheet_5 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result foo="bar">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_5 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
sheet_6 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">bar</xsl:attribute>
<!-- duplicate attrs override previous -->
<!-- we use xsl:if to obscure it a bit -->
<xsl:if test="true()">
<xsl:attribute name="foo">baz</xsl:attribute>
</xsl:if>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_6 = """<?xml version="1.0" encoding="UTF-8"?>
<result foo="baz"/>"""
#"adding attributes with the same expanded-name 4"
sheet_7 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute>
<xsl:attribute name="x:foo" xmlns:x="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_7 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:org.4suite.4xslt.ns0="http://some-ns/" org.4suite.4xslt.ns0:foo="baz"/>"""
# "adding attributes with the same expanded-name 5"
sheet_8 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result x:foo="bar" xmlns:x="http://some-ns/">
<!-- duplicate attrs override previous -->
<xsl:attribute name="foo" namespace="http://some-ns/">baz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_8 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:x="http://some-ns/" x:foo="baz"/>"""
sheet_9 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- linefeed must be serialized as -->
<xsl:attribute name="a">x
y</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_9 = """<?xml version="1.0" encoding="UTF-8"?>
<result a="x y"/>"""
sheet_10 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- if an attribute prefix would be xmlns, it must be changed to something else -->
<xsl:attribute name="xmlns:foo" namespace="http://some-ns/">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_10 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:%(gp)s0="http://some-ns/" %(gp)s0:foo="bar"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
# "attributes in various namespaces"
sheet_11 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- correct results are indicated in the attribute values -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
<xsl:attribute name="pre:foo" xmlns:pre="http://ns-for-pre/">local-name foo, namespace http://ns-for-pre/, preferred prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_11 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns:pre="http://ns-for-pre/" xmlns:%(gp)s0="http://foo-ns/" xmlns:%(gp)s1="http://explicit-ns/" %(gp)s1:bar="local-name bar, namespace http://explicit-ns/, generated prefix" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" pre:foo="local-name foo, namespace http://ns-for-pre/, preferred prefix pre" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_12 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- the element should be in the http://foo-ns/ namespace. -->
<!-- the element *may*, but most likely won't, bear the same generated prefix as the in-foo-ns attribute. -->
<result xmlns="http://foo-ns/">
<!-- A default namespace is in scope, but this does not affect the value of 'name' in xsl:attribute. -->
<!-- in-foo-ns attribute does not inherit the default namespace. It *must* have a prefix, bound to http://foo-ns/ -->
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
expected_12 = """<?xml version="1.0" encoding="UTF-8"?>
<result xmlns="http://foo-ns/" xmlns:%(gp)s0="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
# "attributes in empty and in-scope default namespaces"
sheet_13 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<foo:result xmlns:foo="http://foo-ns/">
<xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute>
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix foo</xsl:attribute>
</foo:result>
</xsl:template>
</xsl:stylesheet>"""
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the foo.
#
expected_13 = """<?xml version="1.0" encoding="UTF-8"?>
<foo:result xmlns:foo="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" foo:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix foo"/>"""
sheet_14 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- element should be in http://foo-ns/ namespace, retaining prefix foo -->
<pre:result xmlns:pre="http://foo-ns/">
<xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix pre</xsl:attribute>
<xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute>
</pre:result>
</xsl:template>
</xsl:stylesheet>"""
# the bar attribute must have a generated prefix.
# it's technically OK for the in-foo-ns attr to have a
# generated prefix, but it really should re-use the pre.
#
expected_14 = """<?xml version="1.0" encoding="UTF-8"?>
<pre:result xmlns:pre="http://foo-ns/" xmlns:%(gp)s0="http://explicit-ns/" pre:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix pre" %(gp)s0:bar="local-name bar, namespace http://explicit-ns/, generated prefix"/>"""%{'gp': DEFAULT_GENERATED_PREFIX}
sheet_e1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:text>hello world</xsl:text>
<!-- error: children added to element before attribute (recovery optional) -->
<xsl:attribute name="att">foo</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<!-- error: adding attribute to non-element (recovery optional) -->
<xsl:attribute name="att">foo</xsl:attribute>
</xsl:template>
</xsl:stylesheet>"""
sheet_e3 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<xsl:attribute name="foo">
<!-- error: creating non-text in attribute (recovery optional) -->
<xsl:comment>uh-oh</xsl:comment>
</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e4 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: creating attribute with illegal name 'xmlns' (recovery optional) -->
<xsl:attribute name="{concat('xml','ns')}">http://some-ns/</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_e5 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: creating attribute with illegal name (recovery optional) -->
<xsl:attribute name="99foo">bar</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
sheet_nre1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/">
<result>
<!-- error: illegal namespace name (Namespaces in XML restriction) -->
<!-- XPath relies on Namespaces in XML, XSLT relies on XPath -->
<xsl:attribute name="foo" namespace="http://www.w3.org/XML/1998/namespace">bar</xsl:attribute>
<xsl:attribute name="baz" namespace="http://www.w3.org/2000/xmlns/">maz</xsl:attribute>
</result>
</xsl:template>
</xsl:stylesheet>"""
def Test(tester):
tester.startGroup("xsl:attribute")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1,
title="xsl:attribute as child of literal result element")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_2)
test_harness.XsltTest(tester, source, [sheet], expected_2,
title="xsl:attribute as child of xsl:if child of l.r.e.")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_3)
test_harness.XsltTest(tester, source, [sheet], expected_3,
title="xsl:attribute with namespace")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_4)
test_harness.XsltTest(tester, source, [sheet], expected_4,
title="adding attributes with the same expanded-name 1")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_5)
test_harness.XsltTest(tester, source, [sheet], expected_5,
title="adding attributes with the same expanded-name 2")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_6)
test_harness.XsltTest(tester, source, [sheet], expected_6,
title="adding attributes with the same expanded-name 3")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_7)
test_harness.XsltTest(tester, source, [sheet], expected_7,
title="adding attributes with the same expanded-name 4")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_8)
test_harness.XsltTest(tester, source, [sheet], expected_8,
title="adding attributes with the same expanded-name 5")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_9)
test_harness.XsltTest(tester, source, [sheet], expected_9,
title="serialization of linefeed in attribute value")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_10)
test_harness.XsltTest(tester, source, [sheet], expected_10,
title="substitution of xmlns prefix in attribute name")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_11)
test_harness.XsltTest(tester, source, [sheet], expected_11,
title="attributes in various namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_12)
test_harness.XsltTest(tester, source, [sheet], expected_12,
title="attributes in empty and in-scope default namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_13)
test_harness.XsltTest(tester, source, [sheet], expected_13,
title="attributes in empty and in-scope non-default namespaces")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_14)
test_harness.XsltTest(tester, source, [sheet], expected_14,
title="attributes in in-scope namespaces and with dup prefixes")
tester.groupDone()
tester.startGroup("recoverable xsl:attribute errors")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e1)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.ATTRIBUTE_ADDED_TOO_LATE,
title="adding attribute after non-attributes")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e2)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.ATTRIBUTE_ADDED_TO_NON_ELEMENT,
title="adding attribute to non-element")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e3)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.NONTEXT_IN_ATTRIBUTE,
title="creating non-text during xsl:attribute instantiation")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e4)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.BAD_ATTRIBUTE_NAME,
title="illegal attribute name ('xmlns')")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_e5)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.INVALID_QNAME_ATTR,
title="illegal attribute name (non-QName)")
tester.groupDone()
tester.startGroup("non-recoverable xsl:attribute errors")
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_nre1)
test_harness.XsltTest(tester, source, [sheet], source_1,
exceptionCode=Error.INVALID_NS_URIREF_ATTR,
title="illegal namespace URI")
tester.groupDone()
return
|
en
| 0.182512
|
# an unsophisticated comparer of XML strings that just checks to see if # both strings have the same set of substrings that look like attributes. <dummy/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:attribute name="foo">bar</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result foo="bar"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:if test="true()"> <xsl:attribute name="foo">bar</xsl:attribute> </xsl:if> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result foo="bar"/> # "xsl:attribute with namespace" <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute> <xsl:attribute name="y:foo" namespace="http://some-other-ns/">bar</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns:%(gp)s0="http://some-ns/" xmlns:y="http://some-other-ns/" %(gp)s0:foo="bar" y:foo="bar"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- duplicate attrs override previous --> <xsl:attribute name="foo">bar</xsl:attribute> <xsl:attribute name="foo">baz</xsl:attribute> <xsl:attribute name="foo">maz</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result foo="maz"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result foo="bar"> <!-- duplicate attrs override previous --> <xsl:attribute name="foo">baz</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result foo="baz"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:attribute name="foo">bar</xsl:attribute> <!-- duplicate attrs override previous --> <!-- we use xsl:if to obscure it a bit --> <xsl:if test="true()"> <xsl:attribute name="foo">baz</xsl:attribute> </xsl:if> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result foo="baz"/> #"adding attributes with the same expanded-name 4" <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- duplicate attrs override previous --> <xsl:attribute name="foo" namespace="http://some-ns/">bar</xsl:attribute> <xsl:attribute name="x:foo" xmlns:x="http://some-ns/">baz</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns:org.4suite.4xslt.ns0="http://some-ns/" org.4suite.4xslt.ns0:foo="baz"/> # "adding attributes with the same expanded-name 5" <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result x:foo="bar" xmlns:x="http://some-ns/"> <!-- duplicate attrs override previous --> <xsl:attribute name="foo" namespace="http://some-ns/">baz</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns:x="http://some-ns/" x:foo="baz"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- linefeed must be serialized as --> <xsl:attribute name="a">x y</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result a="x y"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- if an attribute prefix would be xmlns, it must be changed to something else --> <xsl:attribute name="xmlns:foo" namespace="http://some-ns/">bar</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns:%(gp)s0="http://some-ns/" %(gp)s0:foo="bar"/> # "attributes in various namespaces" <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- correct results are indicated in the attribute values --> <xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute> <xsl:attribute name="pre:foo" xmlns:pre="http://ns-for-pre/">local-name foo, namespace http://ns-for-pre/, preferred prefix pre</xsl:attribute> <xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns:pre="http://ns-for-pre/" xmlns:%(gp)s0="http://foo-ns/" xmlns:%(gp)s1="http://explicit-ns/" %(gp)s1:bar="local-name bar, namespace http://explicit-ns/, generated prefix" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" pre:foo="local-name foo, namespace http://ns-for-pre/, preferred prefix pre" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <!-- the element should be in the http://foo-ns/ namespace. --> <!-- the element *may*, but most likely won't, bear the same generated prefix as the in-foo-ns attribute. --> <result xmlns="http://foo-ns/"> <!-- A default namespace is in scope, but this does not affect the value of 'name' in xsl:attribute. --> <!-- in-foo-ns attribute does not inherit the default namespace. It *must* have a prefix, bound to http://foo-ns/ --> <xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, generated prefix</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="UTF-8"?> <result xmlns="http://foo-ns/" xmlns:%(gp)s0="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" %(gp)s0:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, generated prefix"/> # "attributes in empty and in-scope default namespaces" <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <!-- element should be in http://foo-ns/ namespace, retaining prefix foo --> <foo:result xmlns:foo="http://foo-ns/"> <xsl:attribute name="foo">local-name foo, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-empty-ns" namespace="">local-name in-empty-ns, no namespace, no prefix</xsl:attribute> <xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix foo</xsl:attribute> </foo:result> </xsl:template> </xsl:stylesheet> # it's technically OK for the in-foo-ns attr to have a # generated prefix, but it really should re-use the foo. # <?xml version="1.0" encoding="UTF-8"?> <foo:result xmlns:foo="http://foo-ns/" foo="local-name foo, no namespace, no prefix" in-empty-ns="local-name in-empty-ns, no namespace, no prefix" foo:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix foo"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <!-- element should be in http://foo-ns/ namespace, retaining prefix foo --> <pre:result xmlns:pre="http://foo-ns/"> <xsl:attribute name="in-foo-ns" namespace="http://foo-ns/">local-name in-foo-ns, namespace http://foo-ns/, prefix pre</xsl:attribute> <xsl:attribute name="pre:bar" xmlns:pre="http://ns-for-pre/" namespace="http://explicit-ns/">local-name bar, namespace http://explicit-ns/, generated prefix</xsl:attribute> </pre:result> </xsl:template> </xsl:stylesheet> # the bar attribute must have a generated prefix. # it's technically OK for the in-foo-ns attr to have a # generated prefix, but it really should re-use the pre. # <?xml version="1.0" encoding="UTF-8"?> <pre:result xmlns:pre="http://foo-ns/" xmlns:%(gp)s0="http://explicit-ns/" pre:in-foo-ns="local-name in-foo-ns, namespace http://foo-ns/, prefix pre" %(gp)s0:bar="local-name bar, namespace http://explicit-ns/, generated prefix"/> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:text>hello world</xsl:text> <!-- error: children added to element before attribute (recovery optional) --> <xsl:attribute name="att">foo</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <!-- error: adding attribute to non-element (recovery optional) --> <xsl:attribute name="att">foo</xsl:attribute> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <xsl:attribute name="foo"> <!-- error: creating non-text in attribute (recovery optional) --> <xsl:comment>uh-oh</xsl:comment> </xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- error: creating attribute with illegal name 'xmlns' (recovery optional) --> <xsl:attribute name="{concat('xml','ns')}">http://some-ns/</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- error: creating attribute with illegal name (recovery optional) --> <xsl:attribute name="99foo">bar</xsl:attribute> </result> </xsl:template> </xsl:stylesheet> <?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:output method="xml" indent="no"/> <xsl:template match="/"> <result> <!-- error: illegal namespace name (Namespaces in XML restriction) --> <!-- XPath relies on Namespaces in XML, XSLT relies on XPath --> <xsl:attribute name="foo" namespace="http://www.w3.org/XML/1998/namespace">bar</xsl:attribute> <xsl:attribute name="baz" namespace="http://www.w3.org/2000/xmlns/">maz</xsl:attribute> </result> </xsl:template> </xsl:stylesheet>
| 2.433593
| 2
|
ctutlz/scripts/ctloglist.py
|
josephnoir/ctutlz
| 33
|
6627211
|
<reponame>josephnoir/ctutlz
'''Download, merge and summarize known logs for Certificate Transparency (CT).
Print output to stdout, warnings and errors to stderr.
The source of information is:
https://www.gstatic.com/ct/log_list/v2/all_logs_list.json
from page https://www.certificate-transparency.org/known-logs
'''
import argparse
import datetime
import json
import logging
from utlz import first_paragraph, red
from ctutlz.ctlog import download_log_list
from ctutlz.ctlog import set_operator_names, print_schema
from ctutlz.ctlog import URL_ALL_LOGS, Logs
from ctutlz.utils.logger import VERBOSE, init_logger, setup_logging, logger
from ctutlz._version import __version__
def create_parser():
parser = argparse.ArgumentParser(description=first_paragraph(__doc__))
parser.epilog = __doc__.split('\n', 1)[-1]
parser.add_argument('-v', '--version',
action='version',
default=False,
version=__version__,
help='print version number')
me1 = parser.add_mutually_exclusive_group()
me1.add_argument('--short',
dest='loglevel',
action='store_const',
const=logging.INFO,
default=VERBOSE, # default loglevel if nothing set
help='show short results')
me1.add_argument('--debug',
dest='loglevel',
action='store_const',
const=logging.DEBUG,
help='show more for diagnostic purposes')
me2 = parser.add_mutually_exclusive_group()
me2.add_argument('--json',
action='store_true',
dest='print_json',
help='print merged log lists as json')
me2.add_argument('--schema',
action='store_true',
dest='print_schema',
help='print json schema')
return parser
def warn_inconsistency(url, val_a, val_b):
# suppress warning doubles (i know it's hacky)
key = url + ''.join(sorted('%s%s' % (val_a, val_b)))
if not hasattr(warn_inconsistency, 'seen'):
warn_inconsistency.seen = {}
if not warn_inconsistency.seen.get(key, False):
warn_inconsistency.seen[key] = True
else:
return
logger.warning(red('inconsistent data for log %s: %s != %s' % (url, val_a, val_b)))
def data_structure_from_log(log):
log_data = dict(log._asdict())
log_data['id_b64'] = log.id_b64
log_data['pubkey'] = log.pubkey
log_data['scts_accepted_by_chrome'] = \
log.scts_accepted_by_chrome
return log_data
def list_from_lists(log_lists):
log_list = []
for item_dict in log_lists:
for log in item_dict['logs']:
log_data = data_structure_from_log(log)
log_list.append(log_data)
return log_list
def show_log(log, order=3):
logger.verbose('#' * order + ' %s\n' % log.url)
logdict = log._asdict()
for key, value in logdict.items():
if key == 'id_b64_non_calculated' and value == log.id_b64:
value = None # don't log this value
if key == 'operated_by':
value = ', '.join(value)
# avoid markdown syntax interpretation and improve readablity
key = key.replace('_', ' ')
if value is not None:
logger.verbose('* __%s__: `%s`' % (key, value))
logger.verbose('* __scts accepted by chrome__: '
'%s' % log.scts_accepted_by_chrome)
if log.key is not None:
logger.verbose('* __id b64__: `%s`' % log.log_id)
logger.verbose('* __pubkey__:\n```\n%s\n```' % log.pubkey)
logger.verbose('')
def show_logs(logs, heading, order=2):
if len(logs) <= 0:
return
logger.info('#' * order + '%s\n' % ' ' + heading if heading else '')
s_or_not = 's'
if len(logs) == 1:
s_or_not = ''
# show log size
logger.info('%i log%s\n' % (len(logs), s_or_not))
# list log urls
for log in logs:
if logger.level < logging.INFO:
anchor = log.url.replace('/', '')
logger.verbose('* [%s](#%s)' % (log.url, anchor))
else:
logger.info('* %s' % log.url)
logger.info('')
for log in logs:
show_log(log)
logger.info('End of list')
def ctloglist(print_json=None):
'''Gather ct-log lists and print the merged log list.
Args:
print_json(boolean): If True, print merged log list as json data.
Else print as markdown.
'''
if not print_json:
today = datetime.date.today()
now = datetime.datetime.now()
logger.info('# Known Certificate Transparency (CT) Logs\n')
logger.verbose('Created with [ctloglist]'
'(https://github.com/theno/ctutlz#ctloglist)\n')
logger.verbose('* [all_logs_list.json]('
'https://www.gstatic.com/ct/log_list/v2/all_logs_list.json)'
'\n')
logger.info('Version (Date): %s\n' % today)
logger.verbose('Datetime: %s\n' % now)
logger.info('') # formatting: insert empty line
# all_logs_list.json
all_dict = download_log_list(URL_ALL_LOGS)
orig_all_dict = dict(all_dict)
set_operator_names(all_dict)
all_logs = Logs([all_dict])
if print_json:
json_str = json.dumps(orig_all_dict, indent=4, sort_keys=True)
print(json_str)
else:
show_logs(all_logs, '')
def main():
init_logger()
parser = create_parser()
args = parser.parse_args()
setup_logging(args.loglevel)
logger.debug(args)
if args.print_schema:
print_schema()
else:
ctloglist(args.print_json)
if __name__ == '__main__':
main()
|
'''Download, merge and summarize known logs for Certificate Transparency (CT).
Print output to stdout, warnings and errors to stderr.
The source of information is:
https://www.gstatic.com/ct/log_list/v2/all_logs_list.json
from page https://www.certificate-transparency.org/known-logs
'''
import argparse
import datetime
import json
import logging
from utlz import first_paragraph, red
from ctutlz.ctlog import download_log_list
from ctutlz.ctlog import set_operator_names, print_schema
from ctutlz.ctlog import URL_ALL_LOGS, Logs
from ctutlz.utils.logger import VERBOSE, init_logger, setup_logging, logger
from ctutlz._version import __version__
def create_parser():
parser = argparse.ArgumentParser(description=first_paragraph(__doc__))
parser.epilog = __doc__.split('\n', 1)[-1]
parser.add_argument('-v', '--version',
action='version',
default=False,
version=__version__,
help='print version number')
me1 = parser.add_mutually_exclusive_group()
me1.add_argument('--short',
dest='loglevel',
action='store_const',
const=logging.INFO,
default=VERBOSE, # default loglevel if nothing set
help='show short results')
me1.add_argument('--debug',
dest='loglevel',
action='store_const',
const=logging.DEBUG,
help='show more for diagnostic purposes')
me2 = parser.add_mutually_exclusive_group()
me2.add_argument('--json',
action='store_true',
dest='print_json',
help='print merged log lists as json')
me2.add_argument('--schema',
action='store_true',
dest='print_schema',
help='print json schema')
return parser
def warn_inconsistency(url, val_a, val_b):
# suppress warning doubles (i know it's hacky)
key = url + ''.join(sorted('%s%s' % (val_a, val_b)))
if not hasattr(warn_inconsistency, 'seen'):
warn_inconsistency.seen = {}
if not warn_inconsistency.seen.get(key, False):
warn_inconsistency.seen[key] = True
else:
return
logger.warning(red('inconsistent data for log %s: %s != %s' % (url, val_a, val_b)))
def data_structure_from_log(log):
log_data = dict(log._asdict())
log_data['id_b64'] = log.id_b64
log_data['pubkey'] = log.pubkey
log_data['scts_accepted_by_chrome'] = \
log.scts_accepted_by_chrome
return log_data
def list_from_lists(log_lists):
log_list = []
for item_dict in log_lists:
for log in item_dict['logs']:
log_data = data_structure_from_log(log)
log_list.append(log_data)
return log_list
def show_log(log, order=3):
logger.verbose('#' * order + ' %s\n' % log.url)
logdict = log._asdict()
for key, value in logdict.items():
if key == 'id_b64_non_calculated' and value == log.id_b64:
value = None # don't log this value
if key == 'operated_by':
value = ', '.join(value)
# avoid markdown syntax interpretation and improve readablity
key = key.replace('_', ' ')
if value is not None:
logger.verbose('* __%s__: `%s`' % (key, value))
logger.verbose('* __scts accepted by chrome__: '
'%s' % log.scts_accepted_by_chrome)
if log.key is not None:
logger.verbose('* __id b64__: `%s`' % log.log_id)
logger.verbose('* __pubkey__:\n```\n%s\n```' % log.pubkey)
logger.verbose('')
def show_logs(logs, heading, order=2):
if len(logs) <= 0:
return
logger.info('#' * order + '%s\n' % ' ' + heading if heading else '')
s_or_not = 's'
if len(logs) == 1:
s_or_not = ''
# show log size
logger.info('%i log%s\n' % (len(logs), s_or_not))
# list log urls
for log in logs:
if logger.level < logging.INFO:
anchor = log.url.replace('/', '')
logger.verbose('* [%s](#%s)' % (log.url, anchor))
else:
logger.info('* %s' % log.url)
logger.info('')
for log in logs:
show_log(log)
logger.info('End of list')
def ctloglist(print_json=None):
'''Gather ct-log lists and print the merged log list.
Args:
print_json(boolean): If True, print merged log list as json data.
Else print as markdown.
'''
if not print_json:
today = datetime.date.today()
now = datetime.datetime.now()
logger.info('# Known Certificate Transparency (CT) Logs\n')
logger.verbose('Created with [ctloglist]'
'(https://github.com/theno/ctutlz#ctloglist)\n')
logger.verbose('* [all_logs_list.json]('
'https://www.gstatic.com/ct/log_list/v2/all_logs_list.json)'
'\n')
logger.info('Version (Date): %s\n' % today)
logger.verbose('Datetime: %s\n' % now)
logger.info('') # formatting: insert empty line
# all_logs_list.json
all_dict = download_log_list(URL_ALL_LOGS)
orig_all_dict = dict(all_dict)
set_operator_names(all_dict)
all_logs = Logs([all_dict])
if print_json:
json_str = json.dumps(orig_all_dict, indent=4, sort_keys=True)
print(json_str)
else:
show_logs(all_logs, '')
def main():
init_logger()
parser = create_parser()
args = parser.parse_args()
setup_logging(args.loglevel)
logger.debug(args)
if args.print_schema:
print_schema()
else:
ctloglist(args.print_json)
if __name__ == '__main__':
main()
|
en
| 0.685046
|
Download, merge and summarize known logs for Certificate Transparency (CT). Print output to stdout, warnings and errors to stderr. The source of information is: https://www.gstatic.com/ct/log_list/v2/all_logs_list.json from page https://www.certificate-transparency.org/known-logs # default loglevel if nothing set # suppress warning doubles (i know it's hacky) # don't log this value # avoid markdown syntax interpretation and improve readablity # show log size # list log urls #%s)' % (log.url, anchor)) Gather ct-log lists and print the merged log list. Args: print_json(boolean): If True, print merged log list as json data. Else print as markdown. #ctloglist)\n') # formatting: insert empty line # all_logs_list.json
| 2.549903
| 3
|
copy.py
|
yijiull/loopix-1
| 0
|
6627212
|
<filename>copy.py
import sys
import os
m = 100
port = 3100
for i in range(m):
cur = port + i * 3
if not os.path.exists("./loopix/run_client_%d.py" % cur):
os.system('cp ./loopix/run_client.py ./loopix/run_client_%d.py' % cur)
|
<filename>copy.py
import sys
import os
m = 100
port = 3100
for i in range(m):
cur = port + i * 3
if not os.path.exists("./loopix/run_client_%d.py" % cur):
os.system('cp ./loopix/run_client.py ./loopix/run_client_%d.py' % cur)
|
none
| 1
| 2.301317
| 2
|
|
djangopypi2/apps/pypi_packages/tests.py
|
dgabrielson/djangopypi2
| 0
|
6627213
|
#######################
from __future__ import unicode_literals, print_function
#######################
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.core.files import File
from django.core.urlresolvers import resolve, reverse
from django.test import Client, TestCase
from mock import *
from . import package_views, release_views
from .models import *
class ListViewTest(TestCase):
fixtures = ["test_pypi_package"]
list_package_url = reverse("djangopypi2-packages-index")
def test_handler(self):
self.assertEqual(resolve(self.list_package_url).func.func_name, package_views.Index.as_view().func_name)
def test_list(self):
client = Client()
response = client.get(self.list_package_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertContains(response, "testPackage2")
def test_search(self):
client = Client()
response = client.get(self.list_package_url, {"query": "package1"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
def test_search_not_exist(self):
client = Client()
response = client.get(self.list_package_url, {"query": "packageNotExist"})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
class AdvancedSearchTest(TestCase):
fixtures = ["test_pypi_package"]
search_package_url = reverse("djangopypi2-packages-search")
def test_handler(self):
self.assertEqual(resolve(self.search_package_url).func.func_name, package_views.advanced_search.func_name)
def test_get(self):
client = Client()
response = client.get(self.search_package_url)
self.assertEqual(response.status_code, 200)
def test_advanced_search_name(self):
client = Client()
response = client.post(self.search_package_url, {"name": "testPackage1"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
def test_advanced_search_name_not_exist(self):
client = Client()
response = client.post(self.search_package_url, {"name": "packageNotExist"})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
class PackageDetailTest(TestCase):
fixtures = ["test_pypi_package"]
def test_handler(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_url).func.func_name, package_views.PackageDetails.as_view().func_name)
def test_get(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "testPackage1"})
client = Client()
response = client.get(package_url)
self.assertEqual(response.status_code, 200)
def test_package_not_exist(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "packageNotExist"})
client = Client()
response = client.get(package_url)
self.assertEqual(response.status_code, 404)
class PackageDeleteTest(TestCase):
fixtures = ["test_pypi_package"]
def test_handler(self):
package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_url).func.func_name, package_views.DeletePackage.as_view().func_name)
def test_not_logged_in(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "anyPackage"})
client = Client()
response = client.get(delete_package_url)
self.assertEqual(response.status_code, 403)
def test_not_owners(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "testPackage1"})
client = Client()
client.login(username="user2", password="password")
response = client.get(delete_package_url)
self.assertEqual(response.status_code, 403)
def test_package_not_exist(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "packageNotExist"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_package_url)
# 403 because of user_owns_package implementation
self.assertEqual(response.status_code, 403)
def test_delete_success(self):
# Need to mock os.remove because the package file doesn't actually exist
patcher = patch("os.remove")
self.addCleanup(patcher.stop)
self.mock_remove = patcher.start()
self.assertTrue(Package.objects.filter(name__exact = "testPackage1").exists())
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "testPackage1"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_package_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(Package.objects.filter(name__exact = "testPackage1").exists())
self.assertTrue(self.mock_remove.called)
class PackagePermissionTest(TestCase):
fixtures = ["test_pypi_package"]
package1_permission_url = reverse("djangopypi2-package-permission", kwargs = {"package_name": "testPackage1"})
def test_handler(self):
package_permission_url = reverse("djangopypi2-package-permission", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_permission_url).func.func_name, package_views.PackagePermission.as_view().func_name)
# Test add owner
def test_add_owner_not_logged_in(self):
client = Client()
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_add_owner_not_in_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_add_owner_user_not_exist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 404)
def test_add_owner_success(self):
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").owners.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").owners.distinct())
# Test delete owner, cannot delete last owner
def test_delete_owner_last_owner(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
# Need at least 2 owners to be able to delete 1
def _add_user2_to_owners(self):
user2 = User.objects.get(username__exact="user2")
p1 = Package.objects.get(name="testPackage1")
p1.owners.add(user2)
self.assertTrue(user2 in p1.owners.distinct())
def test_delete_owner_not_logged_in(self):
self._add_user2_to_owners()
client = Client()
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_delete_owner_not_in_owners(self):
self._add_user2_to_owners()
client = Client()
client.login(username="user3", password="password")
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_delete_owner_user_not_exist(self):
self._add_user2_to_owners()
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 404)
def test_delete_owner_success(self):
self._add_user2_to_owners()
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").owners.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").owners.distinct())
# Test add maintainer
def test_add_maintainer_not_logged_in(self):
client = Client()
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_add_maintainer_not_in_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_add_maintainer_user_not_exist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 404)
def test_add_maintainer_success(self):
self.assertTrue(User.objects.get(username__exact="user3") not in Package.objects.get(name="testPackage1").maintainers.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user3") in Package.objects.get(name="testPackage1").maintainers.distinct())
# Test delete maintainer
def test_delete_maintainer_not_logged_in(self):
self._add_user2_to_owners()
client = Client()
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_delete_maintainer_not_in_owners(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_delete_maintainer_user_notExist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 404)
def test_delete_maintainer_success(self):
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").maintainers.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").maintainers.distinct())
class ReleaseDetailTest(TestCase):
fixtures = ["test_pypi_package"]
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.release_url).func.func_name, release_views.ReleaseDetails.as_view().func_name)
def test_get(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
response = client.get(self.release_url)
self.assertEqual(response.status_code, 200)
def test_package_not_exist(self):
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
response = client.get(release_url)
self.assertEqual(response.status_code, 404)
def test_release_not_exist(self):
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "packageNotExist", "version": "99.99.99"})
client = Client()
response = client.get(release_url)
self.assertEqual(response.status_code, 404)
class ReleaseDeleteTest(TestCase):
fixtures = ["test_pypi_package"]
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.delete_release_url).func.func_name, release_views.DeleteRelease.as_view().func_name)
def test_not_logged_in(self):
client = Client()
response = client.get(self.delete_release_url)
self.assertEqual(response.status_code, 403)
def test_not_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.get(self.delete_release_url)
self.assertEqual(response.status_code, 403)
def test_package_not_exist(self):
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_release_url)
self.assertEqual(response.status_code, 403)
def test_release_not_exist(self):
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_release_url)
self.assertEqual(response.status_code, 404)
def test_delete_success(self):
# Need to mock os.remove because the package file doesn't actually exist
patcher = patch("os.remove")
self.addCleanup(patcher.stop)
self.mock_remove = patcher.start()
self.assertTrue(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0").exists())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.delete_release_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0").exists())
self.assertTrue(self.mock_remove.called)
class ReleaseManageTest(TestCase):
fixtures = ["test_pypi_package"]
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.edit_details_url).func.func_name, release_views.ManageRelease.as_view().func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_details_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 404)
def test_post_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.edit_details_url, {"metadata_version": "1.2"})
self.assertEquals(response.status_code, 302)
self.assertEquals(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0")[0].metadata_version, "1.2")
class ReleaseManageMetadata(TestCase):
fixtures = ["test_pypi_package"]
edit_metadata_url = reverse("djangopypi2-release-edit-metadata", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.edit_metadata_url).func.func_name, release_views.manage_metadata.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_metadata_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_metadata_url)
self.assertEquals(response.status_code, 404)
def test_post_success(self):
client = Client()
client.login(username="user1", password="password")
data = {
"keywords": "test",
"author_email": "<EMAIL>",
"license": "BSD",
"summary": "test summary"
}
response = client.post(self.edit_metadata_url, data)
self.assertEquals(response.status_code, 302)
self.assertEquals(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0")[0].summary, "test summary")
class ReleaseManageFile(TestCase):
fixtures = ["test_pypi_package"]
manage_file_url = reverse("djangopypi2-release-manage-files", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.manage_file_url).func.func_name, release_views.manage_files.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(manage_file_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
client.login(username="user1", password="password")
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(manage_file_url)
self.assertEquals(response.status_code, 404)
def test_post_success(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
client.login(username="user1", password="password")
data = {
"distributions-TOTAL_FORMS": 1,
"distributions-INITIAL_FORMS": 1,
"distributions-MAX_NUM_FORMS": 1000,
"distributions-0-id": 1,
"distributions-0-release": 1,
"distributions-0-comment": "test comment"
}
response = client.post(self.manage_file_url, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(Distribution.objects.get(id=1).comment, "test comment")
class ReleaseUploadFile(TestCase):
fixtures = ["test_pypi_package"]
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.upload_file_url).func.func_name, release_views.upload_file.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(upload_file_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(upload_file_url)
self.assertEquals(response.status_code, 404)
# Need to mock save() function so no file will actually get saved to disk
@patch.object(Distribution, "save")
def test_post_success(self, mock_save):
client = Client()
client.login(username="user1", password="password")
# Hack to make the setting available on unit tests
from django.conf import settings
settings.DJANGOPYPI_ALLOW_VERSION_OVERWRITE = False
# Create mock File
mock_file = Mock(spec=File)
mock_file.name = "mock object"
mock_file.read.return_value = "fake file contents"
data = {
"content": mock_file,
"comment": "test comment",
"filetype": "sdist",
"pyversion": 1
}
response = client.post(self.upload_file_url, data)
self.assertEquals(response.status_code, 302)
|
#######################
from __future__ import unicode_literals, print_function
#######################
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.core.files import File
from django.core.urlresolvers import resolve, reverse
from django.test import Client, TestCase
from mock import *
from . import package_views, release_views
from .models import *
class ListViewTest(TestCase):
fixtures = ["test_pypi_package"]
list_package_url = reverse("djangopypi2-packages-index")
def test_handler(self):
self.assertEqual(resolve(self.list_package_url).func.func_name, package_views.Index.as_view().func_name)
def test_list(self):
client = Client()
response = client.get(self.list_package_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertContains(response, "testPackage2")
def test_search(self):
client = Client()
response = client.get(self.list_package_url, {"query": "package1"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
def test_search_not_exist(self):
client = Client()
response = client.get(self.list_package_url, {"query": "packageNotExist"})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
class AdvancedSearchTest(TestCase):
fixtures = ["test_pypi_package"]
search_package_url = reverse("djangopypi2-packages-search")
def test_handler(self):
self.assertEqual(resolve(self.search_package_url).func.func_name, package_views.advanced_search.func_name)
def test_get(self):
client = Client()
response = client.get(self.search_package_url)
self.assertEqual(response.status_code, 200)
def test_advanced_search_name(self):
client = Client()
response = client.post(self.search_package_url, {"name": "testPackage1"})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
def test_advanced_search_name_not_exist(self):
client = Client()
response = client.post(self.search_package_url, {"name": "packageNotExist"})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "testPackage1")
self.assertNotContains(response, "testPackage2")
class PackageDetailTest(TestCase):
fixtures = ["test_pypi_package"]
def test_handler(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_url).func.func_name, package_views.PackageDetails.as_view().func_name)
def test_get(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "testPackage1"})
client = Client()
response = client.get(package_url)
self.assertEqual(response.status_code, 200)
def test_package_not_exist(self):
package_url = reverse("djangopypi2-package", kwargs = {"package_name": "packageNotExist"})
client = Client()
response = client.get(package_url)
self.assertEqual(response.status_code, 404)
class PackageDeleteTest(TestCase):
fixtures = ["test_pypi_package"]
def test_handler(self):
package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_url).func.func_name, package_views.DeletePackage.as_view().func_name)
def test_not_logged_in(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "anyPackage"})
client = Client()
response = client.get(delete_package_url)
self.assertEqual(response.status_code, 403)
def test_not_owners(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "testPackage1"})
client = Client()
client.login(username="user2", password="password")
response = client.get(delete_package_url)
self.assertEqual(response.status_code, 403)
def test_package_not_exist(self):
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "packageNotExist"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_package_url)
# 403 because of user_owns_package implementation
self.assertEqual(response.status_code, 403)
def test_delete_success(self):
# Need to mock os.remove because the package file doesn't actually exist
patcher = patch("os.remove")
self.addCleanup(patcher.stop)
self.mock_remove = patcher.start()
self.assertTrue(Package.objects.filter(name__exact = "testPackage1").exists())
delete_package_url = reverse("djangopypi2-package-delete", kwargs = {"package_name": "testPackage1"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_package_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(Package.objects.filter(name__exact = "testPackage1").exists())
self.assertTrue(self.mock_remove.called)
class PackagePermissionTest(TestCase):
fixtures = ["test_pypi_package"]
package1_permission_url = reverse("djangopypi2-package-permission", kwargs = {"package_name": "testPackage1"})
def test_handler(self):
package_permission_url = reverse("djangopypi2-package-permission", kwargs = {"package_name": "anyPackage"})
self.assertEqual(resolve(package_permission_url).func.func_name, package_views.PackagePermission.as_view().func_name)
# Test add owner
def test_add_owner_not_logged_in(self):
client = Client()
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_add_owner_not_in_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_add_owner_user_not_exist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 404)
def test_add_owner_success(self):
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").owners.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "add", "relation": "owner"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").owners.distinct())
# Test delete owner, cannot delete last owner
def test_delete_owner_last_owner(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
# Need at least 2 owners to be able to delete 1
def _add_user2_to_owners(self):
user2 = User.objects.get(username__exact="user2")
p1 = Package.objects.get(name="testPackage1")
p1.owners.add(user2)
self.assertTrue(user2 in p1.owners.distinct())
def test_delete_owner_not_logged_in(self):
self._add_user2_to_owners()
client = Client()
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_delete_owner_not_in_owners(self):
self._add_user2_to_owners()
client = Client()
client.login(username="user3", password="password")
response = client.post(self.package1_permission_url, {"username": "user1", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 403)
def test_delete_owner_user_not_exist(self):
self._add_user2_to_owners()
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 404)
def test_delete_owner_success(self):
self._add_user2_to_owners()
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").owners.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "owner"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").owners.distinct())
# Test add maintainer
def test_add_maintainer_not_logged_in(self):
client = Client()
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_add_maintainer_not_in_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_add_maintainer_user_not_exist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 404)
def test_add_maintainer_success(self):
self.assertTrue(User.objects.get(username__exact="user3") not in Package.objects.get(name="testPackage1").maintainers.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user3", "action": "add", "relation": "maintainer"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user3") in Package.objects.get(name="testPackage1").maintainers.distinct())
# Test delete maintainer
def test_delete_maintainer_not_logged_in(self):
self._add_user2_to_owners()
client = Client()
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_delete_maintainer_not_in_owners(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 403)
def test_delete_maintainer_user_notExist(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "userNotExist", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 404)
def test_delete_maintainer_success(self):
self.assertTrue(User.objects.get(username__exact="user2") in Package.objects.get(name="testPackage1").maintainers.distinct())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.package1_permission_url, {"username": "user2", "action": "delete", "relation": "maintainer"})
self.assertEqual(response.status_code, 200)
self.assertTrue(User.objects.get(username__exact="user2") not in Package.objects.get(name="testPackage1").maintainers.distinct())
class ReleaseDetailTest(TestCase):
fixtures = ["test_pypi_package"]
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.release_url).func.func_name, release_views.ReleaseDetails.as_view().func_name)
def test_get(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
response = client.get(self.release_url)
self.assertEqual(response.status_code, 200)
def test_package_not_exist(self):
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
response = client.get(release_url)
self.assertEqual(response.status_code, 404)
def test_release_not_exist(self):
release_url = reverse("djangopypi2-release", kwargs = {"package_name": "packageNotExist", "version": "99.99.99"})
client = Client()
response = client.get(release_url)
self.assertEqual(response.status_code, 404)
class ReleaseDeleteTest(TestCase):
fixtures = ["test_pypi_package"]
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.delete_release_url).func.func_name, release_views.DeleteRelease.as_view().func_name)
def test_not_logged_in(self):
client = Client()
response = client.get(self.delete_release_url)
self.assertEqual(response.status_code, 403)
def test_not_owners(self):
client = Client()
client.login(username="user2", password="password")
response = client.get(self.delete_release_url)
self.assertEqual(response.status_code, 403)
def test_package_not_exist(self):
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_release_url)
self.assertEqual(response.status_code, 403)
def test_release_not_exist(self):
delete_release_url = reverse("djangopypi2-release-delete", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(delete_release_url)
self.assertEqual(response.status_code, 404)
def test_delete_success(self):
# Need to mock os.remove because the package file doesn't actually exist
patcher = patch("os.remove")
self.addCleanup(patcher.stop)
self.mock_remove = patcher.start()
self.assertTrue(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0").exists())
client = Client()
client.login(username="user1", password="password")
response = client.post(self.delete_release_url)
self.assertEqual(response.status_code, 302)
self.assertFalse(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0").exists())
self.assertTrue(self.mock_remove.called)
class ReleaseManageTest(TestCase):
fixtures = ["test_pypi_package"]
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.edit_details_url).func.func_name, release_views.ManageRelease.as_view().func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_details_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_details_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.edit_details_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.edit_details_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
edit_details_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_details_url, {"hidden": "1"})
self.assertEquals(response.status_code, 404)
def test_post_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.post(self.edit_details_url, {"metadata_version": "1.2"})
self.assertEquals(response.status_code, 302)
self.assertEquals(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0")[0].metadata_version, "1.2")
class ReleaseManageMetadata(TestCase):
fixtures = ["test_pypi_package"]
edit_metadata_url = reverse("djangopypi2-release-edit-metadata", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.edit_metadata_url).func.func_name, release_views.manage_metadata.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(edit_metadata_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.edit_metadata_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_metadata_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
edit_metadata_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(edit_metadata_url)
self.assertEquals(response.status_code, 404)
def test_post_success(self):
client = Client()
client.login(username="user1", password="password")
data = {
"keywords": "test",
"author_email": "<EMAIL>",
"license": "BSD",
"summary": "test summary"
}
response = client.post(self.edit_metadata_url, data)
self.assertEquals(response.status_code, 302)
self.assertEquals(Release.objects.filter(package__name__exact="testPackage1", version__exact="1.0.0")[0].summary, "test summary")
class ReleaseManageFile(TestCase):
fixtures = ["test_pypi_package"]
manage_file_url = reverse("djangopypi2-release-manage-files", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.manage_file_url).func.func_name, release_views.manage_files.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(manage_file_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(manage_file_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
client.login(username="user1", password="password")
response = client.get(self.manage_file_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(manage_file_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
manage_file_url = reverse("djangopypi2-release-edit-details", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(manage_file_url)
self.assertEquals(response.status_code, 404)
def test_post_success(self):
# Needed because the actual file is not there for unit tests.
patcher = patch("os.path.getsize")
self.addCleanup(patcher.stop)
self.mock_getsize = patcher.start()
self.mock_getsize.return_value = "mockSize"
client = Client()
client.login(username="user1", password="password")
data = {
"distributions-TOTAL_FORMS": 1,
"distributions-INITIAL_FORMS": 1,
"distributions-MAX_NUM_FORMS": 1000,
"distributions-0-id": 1,
"distributions-0-release": 1,
"distributions-0-comment": "test comment"
}
response = client.post(self.manage_file_url, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(Distribution.objects.get(id=1).comment, "test comment")
class ReleaseUploadFile(TestCase):
fixtures = ["test_pypi_package"]
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "1.0.0"})
def test_handler(self):
self.assertEqual(resolve(self.upload_file_url).func.func_name, release_views.upload_file.func_name)
def test_get_not_logged_in(self):
client = Client()
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_package_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.get(upload_file_url)
self.assertEquals(response.status_code, 403)
def test_get_release_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.get(upload_file_url)
self.assertEquals(response.status_code, 404)
def test_get_success(self):
client = Client()
client.login(username="user1", password="password")
response = client.get(self.upload_file_url)
self.assertEquals(response.status_code, 200)
def test_post_not_logged_in(self):
client = Client()
response = client.post(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_not_maintainers(self):
client = Client()
client.login(username="user3", password="password")
response = client.post(self.upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_package_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "packageNotExist", "version": "1.0.0"})
client = Client()
client.login(username="user1", password="password")
response = client.post(upload_file_url)
self.assertEquals(response.status_code, 403)
def test_post_release_not_exist(self):
upload_file_url = reverse("djangopypi2-release-upload-file", kwargs = {"package_name": "testPackage1", "version": "99.99.99"})
client = Client()
client.login(username="user1", password="password")
response = client.post(upload_file_url)
self.assertEquals(response.status_code, 404)
# Need to mock save() function so no file will actually get saved to disk
@patch.object(Distribution, "save")
def test_post_success(self, mock_save):
client = Client()
client.login(username="user1", password="password")
# Hack to make the setting available on unit tests
from django.conf import settings
settings.DJANGOPYPI_ALLOW_VERSION_OVERWRITE = False
# Create mock File
mock_file = Mock(spec=File)
mock_file.name = "mock object"
mock_file.read.return_value = "fake file contents"
data = {
"content": mock_file,
"comment": "test comment",
"filetype": "sdist",
"pyversion": 1
}
response = client.post(self.upload_file_url, data)
self.assertEquals(response.status_code, 302)
|
en
| 0.866842
|
####################### ####################### # 403 because of user_owns_package implementation # Need to mock os.remove because the package file doesn't actually exist # Test add owner # Test delete owner, cannot delete last owner # Need at least 2 owners to be able to delete 1 # Test add maintainer # Test delete maintainer # Needed because the actual file is not there for unit tests. # Need to mock os.remove because the package file doesn't actually exist # Needed because the actual file is not there for unit tests. # Needed because the actual file is not there for unit tests. # Need to mock save() function so no file will actually get saved to disk # Hack to make the setting available on unit tests # Create mock File
| 2.275617
| 2
|
tests/config.py
|
bayartsogt-ya/mn-polarity
| 4
|
6627214
|
<filename>tests/config.py
from mnpolarity.config import config
def test_config():
assert "package_dir" in config
assert "data_dir" in config
assert "twint" in config
|
<filename>tests/config.py
from mnpolarity.config import config
def test_config():
assert "package_dir" in config
assert "data_dir" in config
assert "twint" in config
|
none
| 1
| 1.888416
| 2
|
|
pred.py
|
HelloWorld8080/ocr
| 0
|
6627215
|
<gh_stars>0
import numpy as np
import tensorflow as tf
import cv2 as cv
from matplotlib import pyplot as plt
import tkinter as tk
def resizeandgray(img):
x=[]
for i in img:
gray=cv.cvtColor(i,cv.COLOR_BGR2GRAY)
x.append(cv.resize(gray,(30,46)))
for xx in x:
cv.imshow('xx',xx)
return x
def pred(img):
|
import numpy as np
import tensorflow as tf
import cv2 as cv
from matplotlib import pyplot as plt
import tkinter as tk
def resizeandgray(img):
x=[]
for i in img:
gray=cv.cvtColor(i,cv.COLOR_BGR2GRAY)
x.append(cv.resize(gray,(30,46)))
for xx in x:
cv.imshow('xx',xx)
return x
def pred(img):
|
none
| 1
| 3.143806
| 3
|
|
molly/maps/management/commands/generate_markers.py
|
mollyproject/mollyproject
| 7
|
6627216
|
import itertools
import subprocess
import os.path
import tempfile
import os
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.conf import settings
from molly.maps.osm import MARKER_COLORS, MARKER_RANGE
from molly.maps.osm.models import get_marker_dir
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--lazy',
action='store_true',
dest='lazy',
default=False,
help="Only generate makers if they don't already exist"),
)
def handle_noargs(self, **options):
template = open(os.path.join(os.path.dirname(__file__), 'markers', 'base.svg')).read()
marker_dir = get_marker_dir()
if not os.path.exists(marker_dir):
os.makedirs(marker_dir)
for color, index in itertools.product(MARKER_COLORS, MARKER_RANGE):
if os.path.exists(os.path.join(marker_dir, '%s_%d.png' % (color[0], index))):
continue
out = template % {
'label': str(index),
'fill': color[1],
'stroke': color[2],
'text_color': color[3],
}
f, infile = tempfile.mkstemp()
os.write(f, out)
os.close(f)
filename = os.path.join(marker_dir, '%s_%d.png' % (color[0], index))
subprocess.check_call('convert -background none "%s" "%s"' % (infile, filename), shell=True)
os.unlink(infile)
template = open(os.path.join(os.path.dirname(__file__), 'markers', 'star-base.svg')).read()
for color in MARKER_COLORS:
if os.path.exists(os.path.join(marker_dir, '%s_star.png' % color[0])):
continue
out = template % {'fill': color[1], 'stroke': color[2]}
f, infile = tempfile.mkstemp()
os.write(f, out)
os.close(f)
filename = os.path.join(marker_dir, '%s_star.png' % color[0])
subprocess.check_call('convert -background none "%s" "%s"' % (infile, filename), shell=True)
os.unlink(infile)
|
import itertools
import subprocess
import os.path
import tempfile
import os
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.conf import settings
from molly.maps.osm import MARKER_COLORS, MARKER_RANGE
from molly.maps.osm.models import get_marker_dir
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--lazy',
action='store_true',
dest='lazy',
default=False,
help="Only generate makers if they don't already exist"),
)
def handle_noargs(self, **options):
template = open(os.path.join(os.path.dirname(__file__), 'markers', 'base.svg')).read()
marker_dir = get_marker_dir()
if not os.path.exists(marker_dir):
os.makedirs(marker_dir)
for color, index in itertools.product(MARKER_COLORS, MARKER_RANGE):
if os.path.exists(os.path.join(marker_dir, '%s_%d.png' % (color[0], index))):
continue
out = template % {
'label': str(index),
'fill': color[1],
'stroke': color[2],
'text_color': color[3],
}
f, infile = tempfile.mkstemp()
os.write(f, out)
os.close(f)
filename = os.path.join(marker_dir, '%s_%d.png' % (color[0], index))
subprocess.check_call('convert -background none "%s" "%s"' % (infile, filename), shell=True)
os.unlink(infile)
template = open(os.path.join(os.path.dirname(__file__), 'markers', 'star-base.svg')).read()
for color in MARKER_COLORS:
if os.path.exists(os.path.join(marker_dir, '%s_star.png' % color[0])):
continue
out = template % {'fill': color[1], 'stroke': color[2]}
f, infile = tempfile.mkstemp()
os.write(f, out)
os.close(f)
filename = os.path.join(marker_dir, '%s_star.png' % color[0])
subprocess.check_call('convert -background none "%s" "%s"' % (infile, filename), shell=True)
os.unlink(infile)
|
none
| 1
| 2.140195
| 2
|
|
src/quart/signals.py
|
Dunkledore/quart
| 3
|
6627217
|
from __future__ import annotations
from typing import Any, Callable, List, Optional, Tuple
from blinker import NamedSignal, Namespace
from .utils import ensure_coroutine
signals_available = True
class AsyncNamedSignal(NamedSignal): # type: ignore
def __init__(self, name: str, doc: Optional[str] = None) -> None:
super().__init__(name, doc)
async def send(self, *sender: Any, **kwargs: Any) -> List[Tuple[Callable, Any]]:
coroutines = super().send(*sender, **kwargs)
result: List[Tuple[Callable, Any]] = []
for handler, coroutine in coroutines:
result.append((handler, await coroutine))
return result
def connect(self, receiver: Callable, *args: Any, **kwargs: Any) -> Callable:
handler = ensure_coroutine(receiver)
if handler is not receiver and kwargs.get("weak", True):
# Blinker will take a weakref to handler, which goes out
# of scope with this method as it is a wrapper around the
# receiver. Whereas we'd want it to go out of scope when
# receiver does. Therefore we can place it on the receiver
# function. (Ideally I'll think of a better way).
receiver._quart_wrapper_func = handler # type: ignore
return super().connect(handler, *args, **kwargs)
class AsyncNamespace(Namespace): # type: ignore
def signal(self, name: str, doc: Optional[str] = None) -> AsyncNamedSignal:
try:
return self[name]
except KeyError:
return self.setdefault(name, AsyncNamedSignal(name, doc))
_signals = AsyncNamespace()
#: Called before a template is rendered, connection functions
# should have a signature of Callable[[Quart, Template, dict], None]
before_render_template = _signals.signal("before-render-template")
#: Called when a template has been rendered, connected functions
# should have a signature of Callable[[Quart, Template, dict], None]
template_rendered = _signals.signal("template-rendered")
#: Called just after the request context has been created, connected
# functions should have a signature of Callable[[Quart], None]
request_started = _signals.signal("request-started")
#: Called after a response is fully finalised, connected functions
# should have a signature of Callable[[Quart, Response], None]
request_finished = _signals.signal("request-finished")
#: Called as the request context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
request_tearing_down = _signals.signal("request-tearing-down")
#: Called if there is an exception handling the request, connected
# functions should have a signature of Callable[[Quart, Exception], None]
got_request_exception = _signals.signal("got-request-exception")
#: Called just after the websocket context has been created, connected
# functions should have a signature of Callable[[Quart], None]
websocket_started = _signals.signal("websocket-started")
#: Called after a response is fully finalised, connected functions
# should have a signature of Callable[[Quart, Optional[Response]], None]
websocket_finished = _signals.signal("websocket-finished")
#: Called as the websocket context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
websocket_tearing_down = _signals.signal("websocket-tearing-down")
#: Called if there is an exception handling the websocket, connected
# functions should have a signature of Callable[[Quart, Exception], None]
got_websocket_exception = _signals.signal("got-websocket-exception")
#: Called as the application context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
appcontext_tearing_down = _signals.signal("appcontext-tearing-down")
#: Called when the app context is pushed, connected functions should
# have a signature of Callable[[Quart], None]
appcontext_pushed = _signals.signal("appcontext-pushed")
#: Called when the app context is poped, connected functions should
# have a signature of Callable[[Quart], None]
appcontext_popped = _signals.signal("appcontext-popped")
#: Called on a flash invocation, connection functions
# should have a signature of Callable[[Quart, str, str], None]
message_flashed = _signals.signal("message-flashed")
|
from __future__ import annotations
from typing import Any, Callable, List, Optional, Tuple
from blinker import NamedSignal, Namespace
from .utils import ensure_coroutine
signals_available = True
class AsyncNamedSignal(NamedSignal): # type: ignore
def __init__(self, name: str, doc: Optional[str] = None) -> None:
super().__init__(name, doc)
async def send(self, *sender: Any, **kwargs: Any) -> List[Tuple[Callable, Any]]:
coroutines = super().send(*sender, **kwargs)
result: List[Tuple[Callable, Any]] = []
for handler, coroutine in coroutines:
result.append((handler, await coroutine))
return result
def connect(self, receiver: Callable, *args: Any, **kwargs: Any) -> Callable:
handler = ensure_coroutine(receiver)
if handler is not receiver and kwargs.get("weak", True):
# Blinker will take a weakref to handler, which goes out
# of scope with this method as it is a wrapper around the
# receiver. Whereas we'd want it to go out of scope when
# receiver does. Therefore we can place it on the receiver
# function. (Ideally I'll think of a better way).
receiver._quart_wrapper_func = handler # type: ignore
return super().connect(handler, *args, **kwargs)
class AsyncNamespace(Namespace): # type: ignore
def signal(self, name: str, doc: Optional[str] = None) -> AsyncNamedSignal:
try:
return self[name]
except KeyError:
return self.setdefault(name, AsyncNamedSignal(name, doc))
_signals = AsyncNamespace()
#: Called before a template is rendered, connection functions
# should have a signature of Callable[[Quart, Template, dict], None]
before_render_template = _signals.signal("before-render-template")
#: Called when a template has been rendered, connected functions
# should have a signature of Callable[[Quart, Template, dict], None]
template_rendered = _signals.signal("template-rendered")
#: Called just after the request context has been created, connected
# functions should have a signature of Callable[[Quart], None]
request_started = _signals.signal("request-started")
#: Called after a response is fully finalised, connected functions
# should have a signature of Callable[[Quart, Response], None]
request_finished = _signals.signal("request-finished")
#: Called as the request context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
request_tearing_down = _signals.signal("request-tearing-down")
#: Called if there is an exception handling the request, connected
# functions should have a signature of Callable[[Quart, Exception], None]
got_request_exception = _signals.signal("got-request-exception")
#: Called just after the websocket context has been created, connected
# functions should have a signature of Callable[[Quart], None]
websocket_started = _signals.signal("websocket-started")
#: Called after a response is fully finalised, connected functions
# should have a signature of Callable[[Quart, Optional[Response]], None]
websocket_finished = _signals.signal("websocket-finished")
#: Called as the websocket context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
websocket_tearing_down = _signals.signal("websocket-tearing-down")
#: Called if there is an exception handling the websocket, connected
# functions should have a signature of Callable[[Quart, Exception], None]
got_websocket_exception = _signals.signal("got-websocket-exception")
#: Called as the application context is teared down, connected functions
# should have a signature of Callable[[Quart, Exception], None]
appcontext_tearing_down = _signals.signal("appcontext-tearing-down")
#: Called when the app context is pushed, connected functions should
# have a signature of Callable[[Quart], None]
appcontext_pushed = _signals.signal("appcontext-pushed")
#: Called when the app context is poped, connected functions should
# have a signature of Callable[[Quart], None]
appcontext_popped = _signals.signal("appcontext-popped")
#: Called on a flash invocation, connection functions
# should have a signature of Callable[[Quart, str, str], None]
message_flashed = _signals.signal("message-flashed")
|
en
| 0.925232
|
# type: ignore # Blinker will take a weakref to handler, which goes out # of scope with this method as it is a wrapper around the # receiver. Whereas we'd want it to go out of scope when # receiver does. Therefore we can place it on the receiver # function. (Ideally I'll think of a better way). # type: ignore # type: ignore #: Called before a template is rendered, connection functions # should have a signature of Callable[[Quart, Template, dict], None] #: Called when a template has been rendered, connected functions # should have a signature of Callable[[Quart, Template, dict], None] #: Called just after the request context has been created, connected # functions should have a signature of Callable[[Quart], None] #: Called after a response is fully finalised, connected functions # should have a signature of Callable[[Quart, Response], None] #: Called as the request context is teared down, connected functions # should have a signature of Callable[[Quart, Exception], None] #: Called if there is an exception handling the request, connected # functions should have a signature of Callable[[Quart, Exception], None] #: Called just after the websocket context has been created, connected # functions should have a signature of Callable[[Quart], None] #: Called after a response is fully finalised, connected functions # should have a signature of Callable[[Quart, Optional[Response]], None] #: Called as the websocket context is teared down, connected functions # should have a signature of Callable[[Quart, Exception], None] #: Called if there is an exception handling the websocket, connected # functions should have a signature of Callable[[Quart, Exception], None] #: Called as the application context is teared down, connected functions # should have a signature of Callable[[Quart, Exception], None] #: Called when the app context is pushed, connected functions should # have a signature of Callable[[Quart], None] #: Called when the app context is poped, connected functions should # have a signature of Callable[[Quart], None] #: Called on a flash invocation, connection functions # should have a signature of Callable[[Quart, str, str], None]
| 2.271841
| 2
|
doconv/__init__.py
|
jmourelos/doconv
| 3
|
6627218
|
<reponame>jmourelos/doconv<filename>doconv/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
VERSION = '0.1.7'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
VERSION = '0.1.7'
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 1.031436
| 1
|
allauth/socialaccount/providers/spotify/provider.py
|
mina-gaid/scp
| 1
|
6627219
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount import app_settings
class SpotifyAccount(ProviderAccount):
pass
class SpotifyOAuth2Provider(OAuth2Provider):
id = 'spotify'
name = 'Spotify'
account_class = SpotifyAccount
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(name=data.get('display_name'),
email=data.get('email'))
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('user-read-email')
return scope
providers.registry.register(SpotifyOAuth2Provider)
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount import app_settings
class SpotifyAccount(ProviderAccount):
pass
class SpotifyOAuth2Provider(OAuth2Provider):
id = 'spotify'
name = 'Spotify'
account_class = SpotifyAccount
def extract_uid(self, data):
return data['id']
def extract_common_fields(self, data):
return dict(name=data.get('display_name'),
email=data.get('email'))
def get_default_scope(self):
scope = []
if app_settings.QUERY_EMAIL:
scope.append('user-read-email')
return scope
providers.registry.register(SpotifyOAuth2Provider)
|
none
| 1
| 2.119276
| 2
|
|
nanome/util/stream.py
|
rramji/nanome-lib
| 0
|
6627220
|
from .enum import IntEnum
class StreamCreationError(IntEnum):
NoError = 0
AtomNotFound = 1
UnsupportedStream = 2
class StreamInterruptReason(IntEnum):
StreamNotFound = 0
Crashed = 1
|
from .enum import IntEnum
class StreamCreationError(IntEnum):
NoError = 0
AtomNotFound = 1
UnsupportedStream = 2
class StreamInterruptReason(IntEnum):
StreamNotFound = 0
Crashed = 1
|
none
| 1
| 2.246752
| 2
|
|
scapy/test/tls/example_server.py
|
jreynders/BLESuite-1
| 0
|
6627221
|
<filename>scapy/test/tls/example_server.py
#!/usr/bin/env python
## This file is part of Scapy
## This program is published under a GPLv2 license
"""
Basic TLS server. A preferred ciphersuite may be provided as first argument.
For instance, "sudo ./server_simple.py c014" will start a server accepting
any TLS client connection. If provided, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
will be preferred to any other suite the client might propose.
"""
import os
import sys
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),"../../"))
sys.path=[basedir]+sys.path
from scapy.layers.tls.automaton_srv import TLSServerAutomaton
if len(sys.argv) == 2:
pcs = int(sys.argv[1], 16)
else:
pcs = None
t = TLSServerAutomaton(mycert=basedir+'/test/tls/pki/srv_cert.pem',
mykey=basedir+'/test/tls/pki/srv_key.pem',
preferred_ciphersuite=pcs)
t.run()
|
<filename>scapy/test/tls/example_server.py
#!/usr/bin/env python
## This file is part of Scapy
## This program is published under a GPLv2 license
"""
Basic TLS server. A preferred ciphersuite may be provided as first argument.
For instance, "sudo ./server_simple.py c014" will start a server accepting
any TLS client connection. If provided, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
will be preferred to any other suite the client might propose.
"""
import os
import sys
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),"../../"))
sys.path=[basedir]+sys.path
from scapy.layers.tls.automaton_srv import TLSServerAutomaton
if len(sys.argv) == 2:
pcs = int(sys.argv[1], 16)
else:
pcs = None
t = TLSServerAutomaton(mycert=basedir+'/test/tls/pki/srv_cert.pem',
mykey=basedir+'/test/tls/pki/srv_key.pem',
preferred_ciphersuite=pcs)
t.run()
|
en
| 0.738863
|
#!/usr/bin/env python ## This file is part of Scapy ## This program is published under a GPLv2 license Basic TLS server. A preferred ciphersuite may be provided as first argument. For instance, "sudo ./server_simple.py c014" will start a server accepting any TLS client connection. If provided, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA will be preferred to any other suite the client might propose.
| 2.324031
| 2
|
apitest/api_test/service/publishService.py
|
willhuang1206/apitest
| 0
|
6627222
|
<reponame>willhuang1206/apitest
import logging
import threading
import ast
import traceback
import time
from datetime import datetime
from api_test.service.runService import RunService
from api_test.service.reportService import ReportService
from api_test.service.configService import getProjectConfigValueByName
from api_test.service import scheduleService
from api_test.models import AutomationTask
from api_test.common.paramUtil import ParamUtil
from api_test.common.jsonUtil import json
from api_test.models import ProjectConfig,Automation
class RunPublishTestThread (threading.Thread):
def __init__(self, publish, id,context):
threading.Thread.__init__(self)
self.publish = publish
self.id = id
self.context = context
self.result={}
def run(self):
publishName="{name}{env}环境上线单{id}".format(name=self.publish.name,env=self.publish.env,id=self.id)
publishTestWaitTime=int(getProjectConfigValueByName(self.publish.project.id,"PublishTestWaitTime",0))
if publishTestWaitTime>0:
logging.info("Wait {waitTime} seconds before publish test {publish}".format(waitTime=publishTestWaitTime,publish=publishName))
time.sleep(publishTestWaitTime)
logging.info("Run publish test {publish} at {runtime}".format(publish=publishName,runtime=datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')))
try:
automations=Automation.objects.filter(id__in=ast.literal_eval(self.publish.automations))
if len(automations)>0:
thread=RunService.run_automation_publish(self.publish,self.id,automations,[],self.context)
thread.join()
self.result=thread.result
if self.publish.sendEmail:
emails=self.publish.emails if isinstance(self.publish.emails,list) else ast.literal_eval(self.publish.emails)
self.publish.name=publishName
ReportService().sendTaskReport(self.publish,self.result,emails)
except:
logging.error(traceback.format_exc())
logging.info("Complete publish test {publish} at {runtime}".format(publish=publishName,runtime=datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')))
class PublishService():
@staticmethod
def run_test(publish,id,context):
thread = RunPublishTestThread(publish,id,context)
thread.start()
return thread.result
|
import logging
import threading
import ast
import traceback
import time
from datetime import datetime
from api_test.service.runService import RunService
from api_test.service.reportService import ReportService
from api_test.service.configService import getProjectConfigValueByName
from api_test.service import scheduleService
from api_test.models import AutomationTask
from api_test.common.paramUtil import ParamUtil
from api_test.common.jsonUtil import json
from api_test.models import ProjectConfig,Automation
class RunPublishTestThread (threading.Thread):
def __init__(self, publish, id,context):
threading.Thread.__init__(self)
self.publish = publish
self.id = id
self.context = context
self.result={}
def run(self):
publishName="{name}{env}环境上线单{id}".format(name=self.publish.name,env=self.publish.env,id=self.id)
publishTestWaitTime=int(getProjectConfigValueByName(self.publish.project.id,"PublishTestWaitTime",0))
if publishTestWaitTime>0:
logging.info("Wait {waitTime} seconds before publish test {publish}".format(waitTime=publishTestWaitTime,publish=publishName))
time.sleep(publishTestWaitTime)
logging.info("Run publish test {publish} at {runtime}".format(publish=publishName,runtime=datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')))
try:
automations=Automation.objects.filter(id__in=ast.literal_eval(self.publish.automations))
if len(automations)>0:
thread=RunService.run_automation_publish(self.publish,self.id,automations,[],self.context)
thread.join()
self.result=thread.result
if self.publish.sendEmail:
emails=self.publish.emails if isinstance(self.publish.emails,list) else ast.literal_eval(self.publish.emails)
self.publish.name=publishName
ReportService().sendTaskReport(self.publish,self.result,emails)
except:
logging.error(traceback.format_exc())
logging.info("Complete publish test {publish} at {runtime}".format(publish=publishName,runtime=datetime.strftime(datetime.now(),'%Y-%m-%d %H:%M:%S')))
class PublishService():
@staticmethod
def run_test(publish,id,context):
thread = RunPublishTestThread(publish,id,context)
thread.start()
return thread.result
|
none
| 1
| 2.286841
| 2
|
|
Cas_1/Kinetic Energy/A_Kinetic_energy.py
|
Herpinemmanuel/Oceanography
| 1
|
6627223
|
<filename>Cas_1/Kinetic Energy/A_Kinetic_energy.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import xgcm
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from matplotlib.mlab import bivariate_normal
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
dir0 = '/homedata/bderembl/runmit/test_southatlgyre'
ds0 = open_mdsdataset(dir0,iters='all',prefix=['U','V'])
grid = xgcm.Grid(ds0)
print(grid)
Kinetic_energy_X = grid.interp((ds0.U.where(ds0.hFacW>0)*ds0.hFacW)**2, 'X')
Kinetic_energy_Y = grid.interp((ds0.V.where(ds0.hFacS>0)*ds0.hFacS)**2, 'Y')
Kinetic_energy = Kinetic_energy_X + Kinetic_energy_Y
print('Kinetic_energy')
print(Kinetic_energy)
i = 0
nz = 0
while (i < 50):
i=i+1
print(i)
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
Kinetic_energy[i,nz,:,:].plot.pcolormesh('XC', 'YC', ax=ax,vmin=0,norm=colors.PowerNorm(gamma=1./2.),vmax=10,cmap='ocean');
plt.title('Kinetic Energy m²/s²')
plt.text(5,5,i,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (i < 10):
plt.savefig('Kinetic Energy-'+'00'+str(i)+'.png')
plt.clf()
elif (i > 9) and (i < 100):
plt.savefig('Kinetic Energy-'+'0'+str(i)+'.png')
plt.clf()
else:
plt.savefig('Kinetic Energy-'+str(i)+'.png')
plt.clf()
|
<filename>Cas_1/Kinetic Energy/A_Kinetic_energy.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import xgcm
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from matplotlib.mlab import bivariate_normal
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
dir0 = '/homedata/bderembl/runmit/test_southatlgyre'
ds0 = open_mdsdataset(dir0,iters='all',prefix=['U','V'])
grid = xgcm.Grid(ds0)
print(grid)
Kinetic_energy_X = grid.interp((ds0.U.where(ds0.hFacW>0)*ds0.hFacW)**2, 'X')
Kinetic_energy_Y = grid.interp((ds0.V.where(ds0.hFacS>0)*ds0.hFacS)**2, 'Y')
Kinetic_energy = Kinetic_energy_X + Kinetic_energy_Y
print('Kinetic_energy')
print(Kinetic_energy)
i = 0
nz = 0
while (i < 50):
i=i+1
print(i)
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
Kinetic_energy[i,nz,:,:].plot.pcolormesh('XC', 'YC', ax=ax,vmin=0,norm=colors.PowerNorm(gamma=1./2.),vmax=10,cmap='ocean');
plt.title('Kinetic Energy m²/s²')
plt.text(5,5,i,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (i < 10):
plt.savefig('Kinetic Energy-'+'00'+str(i)+'.png')
plt.clf()
elif (i > 9) and (i < 100):
plt.savefig('Kinetic Energy-'+'0'+str(i)+'.png')
plt.clf()
else:
plt.savefig('Kinetic Energy-'+str(i)+'.png')
plt.clf()
|
none
| 1
| 2.12316
| 2
|
|
aiida_quantumespresso/parsers/pw.py
|
giovannipizzi/aiida-quantumespresso
| 2
|
6627224
|
# -*- coding: utf-8 -*-
"""`Parser` implementation for the `PwCalculation` calculation job class."""
import traceback
import numpy
from aiida import orm
from aiida.common import exceptions
from aiida_quantumespresso.utils.mapping import get_logging_container
from .base import Parser
from .parse_raw.pw import reduce_symmetries
class PwParser(Parser):
"""`Parser` implementation for the `PwCalculation` calculation job class."""
def parse(self, **kwargs):
"""Parse the retrieved files of a completed `PwCalculation` into output nodes.
Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
which should contain the temporary retrieved files.
"""
dir_with_bands = None
self.exit_code_xml = None
self.exit_code_stdout = None
self.exit_code_parser = None
try:
settings = self.node.inputs.settings.get_dict()
except exceptions.NotExistent:
settings = {}
# Look for optional settings input node and potential 'parser_options' dictionary within it
parser_options = settings.get(self.get_parser_settings_key(), None)
# Verify that the retrieved_temporary_folder is within the arguments if temporary files were specified
if self.node.get_attribute('retrieve_temporary_list', None):
try:
dir_with_bands = kwargs['retrieved_temporary_folder']
except KeyError:
return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER)
parameters = self.node.inputs.parameters.get_dict()
parsed_xml, logs_xml = self.parse_xml(dir_with_bands, parser_options)
parsed_stdout, logs_stdout = self.parse_stdout(parameters, parser_options, parsed_xml)
parsed_bands = parsed_stdout.pop('bands', {})
parsed_structure = parsed_stdout.pop('structure', {})
parsed_trajectory = parsed_stdout.pop('trajectory', {})
parsed_parameters = self.build_output_parameters(parsed_stdout, parsed_xml)
# Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
self.final_trajectory_frame_to_parameters(parsed_parameters, parsed_trajectory)
# If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
all_symmetries = False if parser_options is None else parser_options.get('all_symmetries', False)
if not all_symmetries and 'cell' in parsed_structure:
reduce_symmetries(parsed_parameters, parsed_structure, self.logger)
structure = self.build_output_structure(parsed_structure)
kpoints = self.build_output_kpoints(parsed_parameters, structure)
bands = self.build_output_bands(parsed_bands, kpoints)
trajectory = self.build_output_trajectory(parsed_trajectory, structure)
# Determine whether the input kpoints were defined as a mesh or as an explicit list
try:
self.node.inputs.kpoints.get_kpoints()
except AttributeError:
input_kpoints_explicit = False
else:
input_kpoints_explicit = True
# Only attach the `KpointsData` as output if there will be no `BandsData` output and inputs were defined as mesh
if kpoints and not bands and not input_kpoints_explicit:
self.out('output_kpoints', kpoints)
if bands:
self.out('output_band', bands)
if trajectory:
self.out('output_trajectory', trajectory)
if not structure.is_stored:
self.out('output_structure', structure)
# Separate the atomic_occupations dictionary in its own node if it is present
atomic_occupations = parsed_parameters.pop('atomic_occupations', None)
if atomic_occupations:
self.out('output_atomic_occupations', orm.Dict(dict=atomic_occupations))
self.out('output_parameters', orm.Dict(dict=parsed_parameters))
# Emit the logs returned by the XML and stdout parsing through the logger
# If the calculation was an initialization run, reset the XML logs because they will contain a lot of verbose
# warnings from the schema parser about incomplete data, but that is to be expected in an initialization run.
if settings.get('ONLY_INITIALIZATION', False):
logs_xml.pop('error')
ignore = ['Error while parsing ethr.', 'DEPRECATED: symmetry with ibrav=0, use correct ibrav instead']
self.emit_logs([logs_stdout, logs_xml], ignore=ignore)
# First check for specific known problems that can cause a pre-mature termination of the calculation
exit_code = self.validate_premature_exit(logs_stdout)
if exit_code:
return self.exit(exit_code)
# If the both stdout and xml exit codes are set, there was a basic problem with both output files and there
# is no need to investigate any further.
if self.exit_code_stdout and self.exit_code_xml:
return self.exit(self.exit_codes.ERROR_OUTPUT_FILES)
if self.exit_code_stdout:
return self.exit(self.exit_code_stdout)
if self.exit_code_xml:
return self.exit(self.exit_code_xml)
# First determine issues that can occurr for all calculation types. Note that the generic errors, that are
# common to all types are done first. If a problem is found there, we return the exit code and don't continue
for validator in [self.validate_electronic, self.validate_dynamics, self.validate_ionic]:
exit_code = validator(trajectory, parsed_parameters, logs_stdout)
if exit_code:
return self.exit(exit_code)
def get_calculation_type(self):
"""Return the type of the calculation."""
return self.node.inputs.parameters.get_attribute('CONTROL', {}).get('calculation', 'scf')
def validate_premature_exit(self, logs):
"""Analyze problems that will cause a pre-mature termination of the calculation, controlled or not."""
if 'ERROR_OUT_OF_WALLTIME' in logs['error'] and 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']:
return self.exit_codes.ERROR_OUT_OF_WALLTIME_INTERRUPTED
for error_label in [
'ERROR_OUT_OF_WALLTIME',
'ERROR_CHARGE_IS_WRONG',
'ERROR_SYMMETRY_NON_ORTHOGONAL_OPERATION',
'ERROR_DEXX_IS_NEGATIVE',
'ERROR_COMPUTING_CHOLESKY',
'ERROR_NPOOLS_TOO_HIGH',
]:
if error_label in logs['error']:
return self.exit_codes.get(error_label)
def validate_electronic(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `electronic` type calculations: i.e. `scf`, `nscf` and `bands`."""
if self.get_calculation_type() not in ['scf', 'nscf', 'bands']:
return
if 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED' in logs['error']:
return self.exit_codes.ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED
def validate_dynamics(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `dynamics` type calculations: i.e. `md` and `vc-md`."""
if self.get_calculation_type() not in ['md', 'vc-md']:
return
def validate_ionic(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `ionic` type calculations: i.e. `relax` and `vc-relax`."""
if self.get_calculation_type() not in ['relax', 'vc-relax']:
return
electronic_convergence_reached = 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED' not in logs.error
ionic_convergence_reached = 'ERROR_IONIC_CONVERGENCE_NOT_REACHED' not in logs.error
bfgs_history_failure = 'ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE' in logs.error
maximum_ionic_steps_reached = 'ERROR_MAXIMUM_IONIC_STEPS_REACHED' in logs.warning
final_scf = parameters.get('final_scf', False)
# The electronic self-consistency cycle failed before reaching ionic convergence
if not ionic_convergence_reached and not electronic_convergence_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_ELECTRONIC_CONVERGENCE_NOT_REACHED
# Ionic convergence was not reached because maximum number of steps was exceeded
if not ionic_convergence_reached and maximum_ionic_steps_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_EXCEEDED_NSTEP
# BFGS fails twice in a row in which case QE will print that convergence is reached while it is not necessarily
if bfgs_history_failure:
# If electronic convergence was not reached, this had to have been a `vc-relax` where final SCF failed
if not electronic_convergence_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_BFGS_HISTORY_AND_FINAL_SCF_FAILURE
# If the forces and optionally stresses are already converged, consider the calculation successful
if self.is_ionically_converged(trajectory):
return
return self.exit_codes.ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE
# Electronic convergence could not have been reached either during ionic relaxation or during final scf
if not electronic_convergence_reached:
if final_scf:
return self.exit_codes.ERROR_IONIC_CONVERGENCE_REACHED_FINAL_SCF_FAILED
return self.exit_codes.ERROR_IONIC_CYCLE_ELECTRONIC_CONVERGENCE_NOT_REACHED
# Here we have no direct warnings from Quantum ESPRESSO that suggest something went wrong, but we better make
# sure and double check manually that all forces (and optionally stresses) are converged.
if not self.is_ionically_converged(trajectory):
if self.is_ionically_converged(trajectory, except_final_scf=True):
# The forces and stresses of ionic cycle are below threshold, but those of the final SCF exceed them.
# This is not necessarily a problem since the calculation starts from scratch after the variable cell
# relaxation and the forces and stresses can be slightly different. Still it is useful to distinguish
# these calculations so we return a special exit code.
return self.exit_codes.ERROR_IONIC_CONVERGENCE_REACHED_EXCEPT_IN_FINAL_SCF
return self.exit_codes.ERROR_IONIC_CONVERGENCE_NOT_REACHED
def is_ionically_converged(self, trajectory, except_final_scf=False):
"""Verify that the calculation was ionically converged.
For a `relax` calculation this means the forces stored in the `trajectory` are all below the force convergence
threshold which is retrieved from the input parameters. For a `vc-relax` calculation, the stress should also
give a pressure that is below the pressure convergence threshold.
:param trajectory: the output trajectory data
:param except_final_scf: if True will return whether the calculation is converged except for the final scf.
"""
from aiida_quantumespresso.utils.defaults.calculation import pw
from aiida_quantumespresso.utils.validation.trajectory import verify_convergence_trajectory
relax_type = self.get_calculation_type()
parameters = self.node.inputs.parameters.get_dict()
threshold_forces = parameters.get('CONTROL', {}).get('forc_conv_thr', pw.forc_conv_thr)
threshold_stress = parameters.get('CELL', {}).get('press_conv_thr', pw.press_conv_thr)
external_pressure = parameters.get('CELL', {}).get('press', 0)
# Through the `cell_dofree` the degrees of freedom of the cell can be constrained, which makes the threshold on
# the stress hard to interpret. Therefore, unless the `cell_dofree` is set to the default `all` where the cell
# is fully unconstrained, the stress is ignored even if an explicit `press_conv_thr` is specified in the inputs.
constrained_cell = parameters.get('CELL', {}).get('cell_dofree', 'all') != 'all'
if constrained_cell:
threshold_stress = None
if relax_type == 'relax':
return verify_convergence_trajectory(trajectory, -1, *[threshold_forces, None])
if relax_type == 'vc-relax':
values = [threshold_forces, threshold_stress, external_pressure]
converged_relax = verify_convergence_trajectory(trajectory, -2, *values)
converged_final = verify_convergence_trajectory(trajectory, -1, *values)
return converged_relax and (converged_final or except_final_scf)
raise RuntimeError(f'unknown relax_type: {relax_type}')
def parse_xml(self, dir_with_bands=None, parser_options=None):
"""Parse the XML output file.
:param dir_with_bands: absolute path to directory containing individual k-point XML files for old XML format.
:param parser_options: optional dictionary with parser options
:return: tuple of two dictionaries, first with raw parsed data and second with log messages
"""
from .parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from .parse_xml.pw.parse import parse_xml
logs = get_logging_container()
parsed_data = {}
object_names = self.retrieved.list_object_names()
xml_files = [xml_file for xml_file in self.node.process_class.xml_filenames if xml_file in object_names]
if not xml_files:
if not self.node.get_option('without_xml'):
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_MISSING
return parsed_data, logs
if len(xml_files) > 1:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE
return parsed_data, logs
try:
with self.retrieved.open(xml_files[0]) as xml_file:
parsed_data, logs = parse_xml(xml_file, dir_with_bands)
except IOError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_READ
except XMLParseError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_PARSE
except XMLUnsupportedFormatError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_FORMAT
except Exception:
logs.critical.append(traceback.format_exc())
self.exit_code_xml = self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION
return parsed_data, logs
def parse_stdout(self, parameters, parser_options=None, parsed_xml=None):
"""Parse the stdout output file.
:param parameters: the input parameters dictionary
:param parser_options: optional dictionary with parser options
:param parsed_xml: the raw parsed data from the XML output
:return: tuple of two dictionaries, first with raw parsed data and second with log messages
"""
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout
logs = get_logging_container()
parsed_data = {}
filename_stdout = self.node.get_attribute('output_filename')
if filename_stdout not in self.retrieved.list_object_names():
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING
return parsed_data, logs
try:
stdout = self.retrieved.get_object_content(filename_stdout)
except IOError:
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_READ
return parsed_data, logs
try:
parsed_data, logs = parse_stdout(stdout, parameters, parser_options, parsed_xml)
except Exception:
logs.critical.append(traceback.format_exc())
self.exit_code_stdout = self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION
# If the stdout was incomplete, most likely the job was interrupted before it could cleanly finish, so the
# output files are most likely corrupt and cannot be restarted from
if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']:
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE
# Under certain conditions, such as the XML missing or being incorrect, the structure data might be incomplete.
# Since following code depends on it, we replace missing information taken from the input structure.
structure = self.node.inputs.structure
parsed_data.setdefault('structure', {}).setdefault('cell', {})
if 'lattice_vectors' not in parsed_data['structure']['cell']:
parsed_data['structure']['cell']['lattice_vectors'] = structure.cell
if 'atoms' not in parsed_data['structure']['cell']:
symbols = {s.kind_name: structure.get_kind(s.kind_name).symbol for s in structure.sites}
parsed_data['structure']['cell']['atoms'] = [(symbols[s.kind_name], s.position) for s in structure.sites]
return parsed_data, logs
@staticmethod
def build_output_parameters(parsed_stdout, parsed_xml):
"""Build the dictionary of output parameters from the raw parsed data.
The output parameters are based on the union of raw parsed data from the XML and stdout output files.
Currently, if both raw parsed data dictionaries contain the same key, the stdout version takes precedence, but
this should not occur as the `parse_stdout` method should already have solved these conflicts.
:param parsed_stdout: the raw parsed data dictionary from the stdout output file
:param parsed_xml: the raw parsed data dictionary from the XML output file
:return: the union of the two raw parsed data dictionaries
"""
for key in list(parsed_stdout.keys()):
if key in list(parsed_xml.keys()):
if parsed_stdout[key] != parsed_xml[key]:
raise AssertionError(
'{} found in both dictionaries with different values: {} vs. {}'.format(
key, parsed_stdout[key], parsed_xml[key]
)
)
parameters = dict(list(parsed_xml.items()) + list(parsed_stdout.items()))
return parameters
def build_output_structure(self, parsed_structure):
"""Build the output structure from the raw parsed data.
:param parsed_structure: the dictionary with raw parsed structure data
:return: a new `StructureData` created from the parsed data iff the calculation type produces a new structure
and the parsed data contained a cell definition. In all other cases, the input structure will be returned.
"""
from aiida_quantumespresso.parsers.parse_raw import convert_qe2aiida_structure
type_calc = self.node.inputs.parameters.get_dict()['CONTROL']['calculation']
if type_calc not in ['relax', 'vc-relax', 'md', 'vc-md'] or 'cell' not in list(parsed_structure.keys()):
return self.node.inputs.structure
return convert_qe2aiida_structure(parsed_structure, self.node.inputs.structure)
@staticmethod
def build_output_trajectory(parsed_trajectory, structure):
"""Build the output trajectory from the raw parsed trajectory data.
:param parsed_trajectory: the raw parsed trajectory data
:return: a `TrajectoryData` or None
"""
fractional = False
if 'atomic_positions_relax' in parsed_trajectory:
positions = numpy.array(parsed_trajectory.pop('atomic_positions_relax'))
elif 'atomic_fractionals_relax' in parsed_trajectory:
fractional = True
positions = numpy.array(parsed_trajectory.pop('atomic_fractionals_relax'))
else:
# The positions were never printed, the calculation did not change the structure
positions = numpy.array([[site.position for site in structure.sites]])
try:
cells = numpy.array(parsed_trajectory.pop('lattice_vectors_relax'))
except KeyError:
# The cell is never printed, the calculation was at fixed cell
cells = numpy.array([structure.cell])
# Ensure there are as many frames for cell as positions, even when the calculation was done at fixed cell
if len(cells) == 1 and len(positions) > 1:
cells = numpy.array([cells[0]] * len(positions))
if fractional:
# convert positions to cartesian
positions = numpy.einsum('ijk, ikm -> ijm', positions, cells)
symbols = [str(site.kind_name) for site in structure.sites]
stepids = numpy.arange(len(positions))
trajectory = orm.TrajectoryData()
trajectory.set_trajectory(
stepids=stepids,
cells=cells,
symbols=symbols,
positions=positions,
)
for key, value in parsed_trajectory.items():
trajectory.set_array(key, numpy.array(value))
return trajectory
def build_output_kpoints(self, parsed_parameters, structure):
"""Build the output kpoints from the raw parsed data.
:param parsed_parameters: the raw parsed data
:return: a `KpointsData` or None
"""
k_points_list = parsed_parameters.pop('k_points', None)
k_points_units = parsed_parameters.pop('k_points_units', None)
k_points_weights_list = parsed_parameters.pop('k_points_weights', None)
if k_points_list is None or k_points_weights_list is None:
return None
if k_points_units != '1 / angstrom':
self.logger.error('Error in kpoints units (should be cartesian)')
self.exit_code_parser = self.exit_codes.ERROR_INVALID_KPOINT_UNITS
return None
kpoints = orm.KpointsData()
kpoints.set_cell_from_structure(structure)
kpoints.set_kpoints(k_points_list, cartesian=True, weights=k_points_weights_list)
return kpoints
def build_output_bands(self, parsed_bands, parsed_kpoints=None):
"""Build the output bands from the raw parsed bands data.
:param parsed_bands: the raw parsed bands data
:param parsed_kpoints: the `KpointsData` to use for the bands
:return: a `BandsData` or None
"""
if not parsed_bands or not parsed_kpoints:
return
# In the case of input kpoints that define a list of k-points, i.e. along high-symmetry path, and explicit
# labels, set those labels also on the output kpoints to be used for the bands. This will allow plotting
# utilities to place k-point labels along the x-axis.
try:
self.node.inputs.kpoints.get_kpoints()
parsed_kpoints.labels = self.node.inputs.kpoints.labels
except (AttributeError, ValueError, TypeError):
# AttributeError: input kpoints defines a mesh, not an explicit list
# TypeError: inputs kpoints do not define any labels
# ValueError: input kpoints labels are not commensurate with `parsed_kpoints`
pass
# Correct the occupation for nspin=1 calculations where Quantum ESPRESSO populates each band only halfway
if len(parsed_bands['occupations']) > 1:
occupations = parsed_bands['occupations']
else:
occupations = 2. * numpy.array(parsed_bands['occupations'][0])
if len(parsed_bands['bands']) > 1:
bands_energies = parsed_bands['bands']
else:
bands_energies = parsed_bands['bands'][0]
bands = orm.BandsData()
bands.set_kpointsdata(parsed_kpoints)
bands.set_bands(bands_energies, units=parsed_bands['bands_units'], occupations=occupations)
return bands
@staticmethod
def get_parser_settings_key():
"""Return the key that contains the optional parser options in the `settings` input node."""
return 'parser_options'
@staticmethod
def final_trajectory_frame_to_parameters(parameters, parsed_trajectory):
"""Copy the last frame of certain properties from the `TrajectoryData` to the outputs parameters.
This makes these properties queryable.
"""
include_keys = [
'energy',
'energy_accuracy',
'energy_ewald',
'energy_hartree',
'energy_hubbard',
'energy_one_electron',
'energy_threshold',
'energy_vdw',
'energy_xc',
'energy_smearing',
'energy_one_center_paw',
'energy_est_exchange',
'energy_fock',
'scf_iterations',
'fermi_energy',
'total_force',
'total_magnetization',
'absolute_magnetization',
]
for property_key, property_values in parsed_trajectory.items():
if property_key not in include_keys:
continue
parameters[property_key] = property_values[-1]
def get_extended_symmetries(self):
"""Return the extended dictionary of symmetries based on reduced symmetries stored in output parameters."""
from aiida_quantumespresso.parsers.parse_raw.pw import get_symmetry_mapping
possible_symmetries = get_symmetry_mapping()
parameters = self.node.get_outgoing(node_class=orm.Dict).get_node_by_label('output_parameters')
symmetries_extended = []
symmetries_reduced = parameters.get_dict()['symmetries'] # rimetti lo zero
for element in symmetries_reduced:
symmetry = {}
for keys in ['t_rev', 'equivalent_ions', 'fractional_translation']:
try:
symmetry[keys] = element[keys]
except KeyError:
pass
# expand the rest
symmetry['name'] = possible_symmetries[element['symmetry_number']]['name']
symmetry['rotation'] = possible_symmetries[element['symmetry_number']]['matrix']
symmetry['inversion'] = possible_symmetries[element['symmetry_number']]['inversion']
symmetries_extended.append(symmetry)
return symmetries_extended
|
# -*- coding: utf-8 -*-
"""`Parser` implementation for the `PwCalculation` calculation job class."""
import traceback
import numpy
from aiida import orm
from aiida.common import exceptions
from aiida_quantumespresso.utils.mapping import get_logging_container
from .base import Parser
from .parse_raw.pw import reduce_symmetries
class PwParser(Parser):
"""`Parser` implementation for the `PwCalculation` calculation job class."""
def parse(self, **kwargs):
"""Parse the retrieved files of a completed `PwCalculation` into output nodes.
Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files
permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files`
which should contain the temporary retrieved files.
"""
dir_with_bands = None
self.exit_code_xml = None
self.exit_code_stdout = None
self.exit_code_parser = None
try:
settings = self.node.inputs.settings.get_dict()
except exceptions.NotExistent:
settings = {}
# Look for optional settings input node and potential 'parser_options' dictionary within it
parser_options = settings.get(self.get_parser_settings_key(), None)
# Verify that the retrieved_temporary_folder is within the arguments if temporary files were specified
if self.node.get_attribute('retrieve_temporary_list', None):
try:
dir_with_bands = kwargs['retrieved_temporary_folder']
except KeyError:
return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER)
parameters = self.node.inputs.parameters.get_dict()
parsed_xml, logs_xml = self.parse_xml(dir_with_bands, parser_options)
parsed_stdout, logs_stdout = self.parse_stdout(parameters, parser_options, parsed_xml)
parsed_bands = parsed_stdout.pop('bands', {})
parsed_structure = parsed_stdout.pop('structure', {})
parsed_trajectory = parsed_stdout.pop('trajectory', {})
parsed_parameters = self.build_output_parameters(parsed_stdout, parsed_xml)
# Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying
self.final_trajectory_frame_to_parameters(parsed_parameters, parsed_trajectory)
# If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space
all_symmetries = False if parser_options is None else parser_options.get('all_symmetries', False)
if not all_symmetries and 'cell' in parsed_structure:
reduce_symmetries(parsed_parameters, parsed_structure, self.logger)
structure = self.build_output_structure(parsed_structure)
kpoints = self.build_output_kpoints(parsed_parameters, structure)
bands = self.build_output_bands(parsed_bands, kpoints)
trajectory = self.build_output_trajectory(parsed_trajectory, structure)
# Determine whether the input kpoints were defined as a mesh or as an explicit list
try:
self.node.inputs.kpoints.get_kpoints()
except AttributeError:
input_kpoints_explicit = False
else:
input_kpoints_explicit = True
# Only attach the `KpointsData` as output if there will be no `BandsData` output and inputs were defined as mesh
if kpoints and not bands and not input_kpoints_explicit:
self.out('output_kpoints', kpoints)
if bands:
self.out('output_band', bands)
if trajectory:
self.out('output_trajectory', trajectory)
if not structure.is_stored:
self.out('output_structure', structure)
# Separate the atomic_occupations dictionary in its own node if it is present
atomic_occupations = parsed_parameters.pop('atomic_occupations', None)
if atomic_occupations:
self.out('output_atomic_occupations', orm.Dict(dict=atomic_occupations))
self.out('output_parameters', orm.Dict(dict=parsed_parameters))
# Emit the logs returned by the XML and stdout parsing through the logger
# If the calculation was an initialization run, reset the XML logs because they will contain a lot of verbose
# warnings from the schema parser about incomplete data, but that is to be expected in an initialization run.
if settings.get('ONLY_INITIALIZATION', False):
logs_xml.pop('error')
ignore = ['Error while parsing ethr.', 'DEPRECATED: symmetry with ibrav=0, use correct ibrav instead']
self.emit_logs([logs_stdout, logs_xml], ignore=ignore)
# First check for specific known problems that can cause a pre-mature termination of the calculation
exit_code = self.validate_premature_exit(logs_stdout)
if exit_code:
return self.exit(exit_code)
# If the both stdout and xml exit codes are set, there was a basic problem with both output files and there
# is no need to investigate any further.
if self.exit_code_stdout and self.exit_code_xml:
return self.exit(self.exit_codes.ERROR_OUTPUT_FILES)
if self.exit_code_stdout:
return self.exit(self.exit_code_stdout)
if self.exit_code_xml:
return self.exit(self.exit_code_xml)
# First determine issues that can occurr for all calculation types. Note that the generic errors, that are
# common to all types are done first. If a problem is found there, we return the exit code and don't continue
for validator in [self.validate_electronic, self.validate_dynamics, self.validate_ionic]:
exit_code = validator(trajectory, parsed_parameters, logs_stdout)
if exit_code:
return self.exit(exit_code)
def get_calculation_type(self):
"""Return the type of the calculation."""
return self.node.inputs.parameters.get_attribute('CONTROL', {}).get('calculation', 'scf')
def validate_premature_exit(self, logs):
"""Analyze problems that will cause a pre-mature termination of the calculation, controlled or not."""
if 'ERROR_OUT_OF_WALLTIME' in logs['error'] and 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']:
return self.exit_codes.ERROR_OUT_OF_WALLTIME_INTERRUPTED
for error_label in [
'ERROR_OUT_OF_WALLTIME',
'ERROR_CHARGE_IS_WRONG',
'ERROR_SYMMETRY_NON_ORTHOGONAL_OPERATION',
'ERROR_DEXX_IS_NEGATIVE',
'ERROR_COMPUTING_CHOLESKY',
'ERROR_NPOOLS_TOO_HIGH',
]:
if error_label in logs['error']:
return self.exit_codes.get(error_label)
def validate_electronic(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `electronic` type calculations: i.e. `scf`, `nscf` and `bands`."""
if self.get_calculation_type() not in ['scf', 'nscf', 'bands']:
return
if 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED' in logs['error']:
return self.exit_codes.ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED
def validate_dynamics(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `dynamics` type calculations: i.e. `md` and `vc-md`."""
if self.get_calculation_type() not in ['md', 'vc-md']:
return
def validate_ionic(self, trajectory, parameters, logs):
"""Analyze problems that are specific to `ionic` type calculations: i.e. `relax` and `vc-relax`."""
if self.get_calculation_type() not in ['relax', 'vc-relax']:
return
electronic_convergence_reached = 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED' not in logs.error
ionic_convergence_reached = 'ERROR_IONIC_CONVERGENCE_NOT_REACHED' not in logs.error
bfgs_history_failure = 'ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE' in logs.error
maximum_ionic_steps_reached = 'ERROR_MAXIMUM_IONIC_STEPS_REACHED' in logs.warning
final_scf = parameters.get('final_scf', False)
# The electronic self-consistency cycle failed before reaching ionic convergence
if not ionic_convergence_reached and not electronic_convergence_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_ELECTRONIC_CONVERGENCE_NOT_REACHED
# Ionic convergence was not reached because maximum number of steps was exceeded
if not ionic_convergence_reached and maximum_ionic_steps_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_EXCEEDED_NSTEP
# BFGS fails twice in a row in which case QE will print that convergence is reached while it is not necessarily
if bfgs_history_failure:
# If electronic convergence was not reached, this had to have been a `vc-relax` where final SCF failed
if not electronic_convergence_reached:
return self.exit_codes.ERROR_IONIC_CYCLE_BFGS_HISTORY_AND_FINAL_SCF_FAILURE
# If the forces and optionally stresses are already converged, consider the calculation successful
if self.is_ionically_converged(trajectory):
return
return self.exit_codes.ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE
# Electronic convergence could not have been reached either during ionic relaxation or during final scf
if not electronic_convergence_reached:
if final_scf:
return self.exit_codes.ERROR_IONIC_CONVERGENCE_REACHED_FINAL_SCF_FAILED
return self.exit_codes.ERROR_IONIC_CYCLE_ELECTRONIC_CONVERGENCE_NOT_REACHED
# Here we have no direct warnings from Quantum ESPRESSO that suggest something went wrong, but we better make
# sure and double check manually that all forces (and optionally stresses) are converged.
if not self.is_ionically_converged(trajectory):
if self.is_ionically_converged(trajectory, except_final_scf=True):
# The forces and stresses of ionic cycle are below threshold, but those of the final SCF exceed them.
# This is not necessarily a problem since the calculation starts from scratch after the variable cell
# relaxation and the forces and stresses can be slightly different. Still it is useful to distinguish
# these calculations so we return a special exit code.
return self.exit_codes.ERROR_IONIC_CONVERGENCE_REACHED_EXCEPT_IN_FINAL_SCF
return self.exit_codes.ERROR_IONIC_CONVERGENCE_NOT_REACHED
def is_ionically_converged(self, trajectory, except_final_scf=False):
"""Verify that the calculation was ionically converged.
For a `relax` calculation this means the forces stored in the `trajectory` are all below the force convergence
threshold which is retrieved from the input parameters. For a `vc-relax` calculation, the stress should also
give a pressure that is below the pressure convergence threshold.
:param trajectory: the output trajectory data
:param except_final_scf: if True will return whether the calculation is converged except for the final scf.
"""
from aiida_quantumespresso.utils.defaults.calculation import pw
from aiida_quantumespresso.utils.validation.trajectory import verify_convergence_trajectory
relax_type = self.get_calculation_type()
parameters = self.node.inputs.parameters.get_dict()
threshold_forces = parameters.get('CONTROL', {}).get('forc_conv_thr', pw.forc_conv_thr)
threshold_stress = parameters.get('CELL', {}).get('press_conv_thr', pw.press_conv_thr)
external_pressure = parameters.get('CELL', {}).get('press', 0)
# Through the `cell_dofree` the degrees of freedom of the cell can be constrained, which makes the threshold on
# the stress hard to interpret. Therefore, unless the `cell_dofree` is set to the default `all` where the cell
# is fully unconstrained, the stress is ignored even if an explicit `press_conv_thr` is specified in the inputs.
constrained_cell = parameters.get('CELL', {}).get('cell_dofree', 'all') != 'all'
if constrained_cell:
threshold_stress = None
if relax_type == 'relax':
return verify_convergence_trajectory(trajectory, -1, *[threshold_forces, None])
if relax_type == 'vc-relax':
values = [threshold_forces, threshold_stress, external_pressure]
converged_relax = verify_convergence_trajectory(trajectory, -2, *values)
converged_final = verify_convergence_trajectory(trajectory, -1, *values)
return converged_relax and (converged_final or except_final_scf)
raise RuntimeError(f'unknown relax_type: {relax_type}')
def parse_xml(self, dir_with_bands=None, parser_options=None):
"""Parse the XML output file.
:param dir_with_bands: absolute path to directory containing individual k-point XML files for old XML format.
:param parser_options: optional dictionary with parser options
:return: tuple of two dictionaries, first with raw parsed data and second with log messages
"""
from .parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from .parse_xml.pw.parse import parse_xml
logs = get_logging_container()
parsed_data = {}
object_names = self.retrieved.list_object_names()
xml_files = [xml_file for xml_file in self.node.process_class.xml_filenames if xml_file in object_names]
if not xml_files:
if not self.node.get_option('without_xml'):
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_MISSING
return parsed_data, logs
if len(xml_files) > 1:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE
return parsed_data, logs
try:
with self.retrieved.open(xml_files[0]) as xml_file:
parsed_data, logs = parse_xml(xml_file, dir_with_bands)
except IOError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_READ
except XMLParseError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_PARSE
except XMLUnsupportedFormatError:
self.exit_code_xml = self.exit_codes.ERROR_OUTPUT_XML_FORMAT
except Exception:
logs.critical.append(traceback.format_exc())
self.exit_code_xml = self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION
return parsed_data, logs
def parse_stdout(self, parameters, parser_options=None, parsed_xml=None):
"""Parse the stdout output file.
:param parameters: the input parameters dictionary
:param parser_options: optional dictionary with parser options
:param parsed_xml: the raw parsed data from the XML output
:return: tuple of two dictionaries, first with raw parsed data and second with log messages
"""
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout
logs = get_logging_container()
parsed_data = {}
filename_stdout = self.node.get_attribute('output_filename')
if filename_stdout not in self.retrieved.list_object_names():
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING
return parsed_data, logs
try:
stdout = self.retrieved.get_object_content(filename_stdout)
except IOError:
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_READ
return parsed_data, logs
try:
parsed_data, logs = parse_stdout(stdout, parameters, parser_options, parsed_xml)
except Exception:
logs.critical.append(traceback.format_exc())
self.exit_code_stdout = self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION
# If the stdout was incomplete, most likely the job was interrupted before it could cleanly finish, so the
# output files are most likely corrupt and cannot be restarted from
if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']:
self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE
# Under certain conditions, such as the XML missing or being incorrect, the structure data might be incomplete.
# Since following code depends on it, we replace missing information taken from the input structure.
structure = self.node.inputs.structure
parsed_data.setdefault('structure', {}).setdefault('cell', {})
if 'lattice_vectors' not in parsed_data['structure']['cell']:
parsed_data['structure']['cell']['lattice_vectors'] = structure.cell
if 'atoms' not in parsed_data['structure']['cell']:
symbols = {s.kind_name: structure.get_kind(s.kind_name).symbol for s in structure.sites}
parsed_data['structure']['cell']['atoms'] = [(symbols[s.kind_name], s.position) for s in structure.sites]
return parsed_data, logs
@staticmethod
def build_output_parameters(parsed_stdout, parsed_xml):
"""Build the dictionary of output parameters from the raw parsed data.
The output parameters are based on the union of raw parsed data from the XML and stdout output files.
Currently, if both raw parsed data dictionaries contain the same key, the stdout version takes precedence, but
this should not occur as the `parse_stdout` method should already have solved these conflicts.
:param parsed_stdout: the raw parsed data dictionary from the stdout output file
:param parsed_xml: the raw parsed data dictionary from the XML output file
:return: the union of the two raw parsed data dictionaries
"""
for key in list(parsed_stdout.keys()):
if key in list(parsed_xml.keys()):
if parsed_stdout[key] != parsed_xml[key]:
raise AssertionError(
'{} found in both dictionaries with different values: {} vs. {}'.format(
key, parsed_stdout[key], parsed_xml[key]
)
)
parameters = dict(list(parsed_xml.items()) + list(parsed_stdout.items()))
return parameters
def build_output_structure(self, parsed_structure):
"""Build the output structure from the raw parsed data.
:param parsed_structure: the dictionary with raw parsed structure data
:return: a new `StructureData` created from the parsed data iff the calculation type produces a new structure
and the parsed data contained a cell definition. In all other cases, the input structure will be returned.
"""
from aiida_quantumespresso.parsers.parse_raw import convert_qe2aiida_structure
type_calc = self.node.inputs.parameters.get_dict()['CONTROL']['calculation']
if type_calc not in ['relax', 'vc-relax', 'md', 'vc-md'] or 'cell' not in list(parsed_structure.keys()):
return self.node.inputs.structure
return convert_qe2aiida_structure(parsed_structure, self.node.inputs.structure)
@staticmethod
def build_output_trajectory(parsed_trajectory, structure):
"""Build the output trajectory from the raw parsed trajectory data.
:param parsed_trajectory: the raw parsed trajectory data
:return: a `TrajectoryData` or None
"""
fractional = False
if 'atomic_positions_relax' in parsed_trajectory:
positions = numpy.array(parsed_trajectory.pop('atomic_positions_relax'))
elif 'atomic_fractionals_relax' in parsed_trajectory:
fractional = True
positions = numpy.array(parsed_trajectory.pop('atomic_fractionals_relax'))
else:
# The positions were never printed, the calculation did not change the structure
positions = numpy.array([[site.position for site in structure.sites]])
try:
cells = numpy.array(parsed_trajectory.pop('lattice_vectors_relax'))
except KeyError:
# The cell is never printed, the calculation was at fixed cell
cells = numpy.array([structure.cell])
# Ensure there are as many frames for cell as positions, even when the calculation was done at fixed cell
if len(cells) == 1 and len(positions) > 1:
cells = numpy.array([cells[0]] * len(positions))
if fractional:
# convert positions to cartesian
positions = numpy.einsum('ijk, ikm -> ijm', positions, cells)
symbols = [str(site.kind_name) for site in structure.sites]
stepids = numpy.arange(len(positions))
trajectory = orm.TrajectoryData()
trajectory.set_trajectory(
stepids=stepids,
cells=cells,
symbols=symbols,
positions=positions,
)
for key, value in parsed_trajectory.items():
trajectory.set_array(key, numpy.array(value))
return trajectory
def build_output_kpoints(self, parsed_parameters, structure):
"""Build the output kpoints from the raw parsed data.
:param parsed_parameters: the raw parsed data
:return: a `KpointsData` or None
"""
k_points_list = parsed_parameters.pop('k_points', None)
k_points_units = parsed_parameters.pop('k_points_units', None)
k_points_weights_list = parsed_parameters.pop('k_points_weights', None)
if k_points_list is None or k_points_weights_list is None:
return None
if k_points_units != '1 / angstrom':
self.logger.error('Error in kpoints units (should be cartesian)')
self.exit_code_parser = self.exit_codes.ERROR_INVALID_KPOINT_UNITS
return None
kpoints = orm.KpointsData()
kpoints.set_cell_from_structure(structure)
kpoints.set_kpoints(k_points_list, cartesian=True, weights=k_points_weights_list)
return kpoints
def build_output_bands(self, parsed_bands, parsed_kpoints=None):
"""Build the output bands from the raw parsed bands data.
:param parsed_bands: the raw parsed bands data
:param parsed_kpoints: the `KpointsData` to use for the bands
:return: a `BandsData` or None
"""
if not parsed_bands or not parsed_kpoints:
return
# In the case of input kpoints that define a list of k-points, i.e. along high-symmetry path, and explicit
# labels, set those labels also on the output kpoints to be used for the bands. This will allow plotting
# utilities to place k-point labels along the x-axis.
try:
self.node.inputs.kpoints.get_kpoints()
parsed_kpoints.labels = self.node.inputs.kpoints.labels
except (AttributeError, ValueError, TypeError):
# AttributeError: input kpoints defines a mesh, not an explicit list
# TypeError: inputs kpoints do not define any labels
# ValueError: input kpoints labels are not commensurate with `parsed_kpoints`
pass
# Correct the occupation for nspin=1 calculations where Quantum ESPRESSO populates each band only halfway
if len(parsed_bands['occupations']) > 1:
occupations = parsed_bands['occupations']
else:
occupations = 2. * numpy.array(parsed_bands['occupations'][0])
if len(parsed_bands['bands']) > 1:
bands_energies = parsed_bands['bands']
else:
bands_energies = parsed_bands['bands'][0]
bands = orm.BandsData()
bands.set_kpointsdata(parsed_kpoints)
bands.set_bands(bands_energies, units=parsed_bands['bands_units'], occupations=occupations)
return bands
@staticmethod
def get_parser_settings_key():
"""Return the key that contains the optional parser options in the `settings` input node."""
return 'parser_options'
@staticmethod
def final_trajectory_frame_to_parameters(parameters, parsed_trajectory):
"""Copy the last frame of certain properties from the `TrajectoryData` to the outputs parameters.
This makes these properties queryable.
"""
include_keys = [
'energy',
'energy_accuracy',
'energy_ewald',
'energy_hartree',
'energy_hubbard',
'energy_one_electron',
'energy_threshold',
'energy_vdw',
'energy_xc',
'energy_smearing',
'energy_one_center_paw',
'energy_est_exchange',
'energy_fock',
'scf_iterations',
'fermi_energy',
'total_force',
'total_magnetization',
'absolute_magnetization',
]
for property_key, property_values in parsed_trajectory.items():
if property_key not in include_keys:
continue
parameters[property_key] = property_values[-1]
def get_extended_symmetries(self):
"""Return the extended dictionary of symmetries based on reduced symmetries stored in output parameters."""
from aiida_quantumespresso.parsers.parse_raw.pw import get_symmetry_mapping
possible_symmetries = get_symmetry_mapping()
parameters = self.node.get_outgoing(node_class=orm.Dict).get_node_by_label('output_parameters')
symmetries_extended = []
symmetries_reduced = parameters.get_dict()['symmetries'] # rimetti lo zero
for element in symmetries_reduced:
symmetry = {}
for keys in ['t_rev', 'equivalent_ions', 'fractional_translation']:
try:
symmetry[keys] = element[keys]
except KeyError:
pass
# expand the rest
symmetry['name'] = possible_symmetries[element['symmetry_number']]['name']
symmetry['rotation'] = possible_symmetries[element['symmetry_number']]['matrix']
symmetry['inversion'] = possible_symmetries[element['symmetry_number']]['inversion']
symmetries_extended.append(symmetry)
return symmetries_extended
|
en
| 0.855484
|
# -*- coding: utf-8 -*- `Parser` implementation for the `PwCalculation` calculation job class. `Parser` implementation for the `PwCalculation` calculation job class. Parse the retrieved files of a completed `PwCalculation` into output nodes. Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files` which should contain the temporary retrieved files. # Look for optional settings input node and potential 'parser_options' dictionary within it # Verify that the retrieved_temporary_folder is within the arguments if temporary files were specified # Append the last frame of some of the smaller trajectory arrays to the parameters for easy querying # If the parser option 'all_symmetries' is False, we reduce the raw parsed symmetries to save space # Determine whether the input kpoints were defined as a mesh or as an explicit list # Only attach the `KpointsData` as output if there will be no `BandsData` output and inputs were defined as mesh # Separate the atomic_occupations dictionary in its own node if it is present # Emit the logs returned by the XML and stdout parsing through the logger # If the calculation was an initialization run, reset the XML logs because they will contain a lot of verbose # warnings from the schema parser about incomplete data, but that is to be expected in an initialization run. # First check for specific known problems that can cause a pre-mature termination of the calculation # If the both stdout and xml exit codes are set, there was a basic problem with both output files and there # is no need to investigate any further. # First determine issues that can occurr for all calculation types. Note that the generic errors, that are # common to all types are done first. If a problem is found there, we return the exit code and don't continue Return the type of the calculation. Analyze problems that will cause a pre-mature termination of the calculation, controlled or not. Analyze problems that are specific to `electronic` type calculations: i.e. `scf`, `nscf` and `bands`. Analyze problems that are specific to `dynamics` type calculations: i.e. `md` and `vc-md`. Analyze problems that are specific to `ionic` type calculations: i.e. `relax` and `vc-relax`. # The electronic self-consistency cycle failed before reaching ionic convergence # Ionic convergence was not reached because maximum number of steps was exceeded # BFGS fails twice in a row in which case QE will print that convergence is reached while it is not necessarily # If electronic convergence was not reached, this had to have been a `vc-relax` where final SCF failed # If the forces and optionally stresses are already converged, consider the calculation successful # Electronic convergence could not have been reached either during ionic relaxation or during final scf # Here we have no direct warnings from Quantum ESPRESSO that suggest something went wrong, but we better make # sure and double check manually that all forces (and optionally stresses) are converged. # The forces and stresses of ionic cycle are below threshold, but those of the final SCF exceed them. # This is not necessarily a problem since the calculation starts from scratch after the variable cell # relaxation and the forces and stresses can be slightly different. Still it is useful to distinguish # these calculations so we return a special exit code. Verify that the calculation was ionically converged. For a `relax` calculation this means the forces stored in the `trajectory` are all below the force convergence threshold which is retrieved from the input parameters. For a `vc-relax` calculation, the stress should also give a pressure that is below the pressure convergence threshold. :param trajectory: the output trajectory data :param except_final_scf: if True will return whether the calculation is converged except for the final scf. # Through the `cell_dofree` the degrees of freedom of the cell can be constrained, which makes the threshold on # the stress hard to interpret. Therefore, unless the `cell_dofree` is set to the default `all` where the cell # is fully unconstrained, the stress is ignored even if an explicit `press_conv_thr` is specified in the inputs. Parse the XML output file. :param dir_with_bands: absolute path to directory containing individual k-point XML files for old XML format. :param parser_options: optional dictionary with parser options :return: tuple of two dictionaries, first with raw parsed data and second with log messages Parse the stdout output file. :param parameters: the input parameters dictionary :param parser_options: optional dictionary with parser options :param parsed_xml: the raw parsed data from the XML output :return: tuple of two dictionaries, first with raw parsed data and second with log messages # If the stdout was incomplete, most likely the job was interrupted before it could cleanly finish, so the # output files are most likely corrupt and cannot be restarted from # Under certain conditions, such as the XML missing or being incorrect, the structure data might be incomplete. # Since following code depends on it, we replace missing information taken from the input structure. Build the dictionary of output parameters from the raw parsed data. The output parameters are based on the union of raw parsed data from the XML and stdout output files. Currently, if both raw parsed data dictionaries contain the same key, the stdout version takes precedence, but this should not occur as the `parse_stdout` method should already have solved these conflicts. :param parsed_stdout: the raw parsed data dictionary from the stdout output file :param parsed_xml: the raw parsed data dictionary from the XML output file :return: the union of the two raw parsed data dictionaries Build the output structure from the raw parsed data. :param parsed_structure: the dictionary with raw parsed structure data :return: a new `StructureData` created from the parsed data iff the calculation type produces a new structure and the parsed data contained a cell definition. In all other cases, the input structure will be returned. Build the output trajectory from the raw parsed trajectory data. :param parsed_trajectory: the raw parsed trajectory data :return: a `TrajectoryData` or None # The positions were never printed, the calculation did not change the structure # The cell is never printed, the calculation was at fixed cell # Ensure there are as many frames for cell as positions, even when the calculation was done at fixed cell # convert positions to cartesian Build the output kpoints from the raw parsed data. :param parsed_parameters: the raw parsed data :return: a `KpointsData` or None Build the output bands from the raw parsed bands data. :param parsed_bands: the raw parsed bands data :param parsed_kpoints: the `KpointsData` to use for the bands :return: a `BandsData` or None # In the case of input kpoints that define a list of k-points, i.e. along high-symmetry path, and explicit # labels, set those labels also on the output kpoints to be used for the bands. This will allow plotting # utilities to place k-point labels along the x-axis. # AttributeError: input kpoints defines a mesh, not an explicit list # TypeError: inputs kpoints do not define any labels # ValueError: input kpoints labels are not commensurate with `parsed_kpoints` # Correct the occupation for nspin=1 calculations where Quantum ESPRESSO populates each band only halfway Return the key that contains the optional parser options in the `settings` input node. Copy the last frame of certain properties from the `TrajectoryData` to the outputs parameters. This makes these properties queryable. Return the extended dictionary of symmetries based on reduced symmetries stored in output parameters. # rimetti lo zero # expand the rest
| 2.300038
| 2
|
app/wqFull/dev/subset.py
|
fkwai/geolearn
| 0
|
6627225
|
from sklearn.preprocessing import QuantileTransformer, PowerTransformer
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
importlib.reload(axplot)
importlib.reload(figplot)
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
dictSiteName = 'dictWeathering.json'
with open(os.path.join(dirSel, dictSiteName)) as f:
dictSite = json.load(f)
siteNoLst = dictSite['k12']
dataName = 'weathering'
sd = '1982-01-01'
ed = '2018-12-31'
dataName = 'weathering'
freq = 'D'
DF = dbBasin.DataFrameBasin.new(
dataName, siteNoLst, sdStr=sd, edStr=ed, freq=freq)
siteNoTemp = DF.siteNoLst[:5]
importlib.reload(dbBasin)
DF.saveSubset('B10', ed='2009-12-31')
DF.saveSubset('A10', sd='2010-01-01')
# pick by year
yrIn = np.arange(1985, 2020, 5).tolist()
t1 = dbBasin.func.pickByYear(DF.t, yrIn)
t2 = dbBasin.func.pickByYear(DF.t, yrIn, pick=False)
DF.createSubset('pkYr5', dateLst=t1)
DF.createSubset('rmYr5', dateLst=t2)
# pick by day
t1 = dbBasin.func.pickByDay(DF.t, dBase=5, dSel=1)
t2 = dbBasin.func.pickByDay(DF.t, dBase=5, dSel=1, pick=False)
DF.createSubset('pkD5', dateLst=t1)
DF.createSubset('rmD5', dateLst=t2)
# pick by random
t1 = dbBasin.func.pickRandT(DF.t, 0.2)
t2 = dbBasin.func.pickRandT(DF.t, 0.2, pick=False)
DF.createSubset('pkRT20', dateLst=t1)
DF.createSubset('rmRT20', dateLst=t2)
# plot
codeSel = ['00915', '00925', '00930', '00935', '00940', '00945', '00955']
d1 = dbBasin.DataModelBasin(DF, subset='pkR20', varY=codeSel)
d2 = dbBasin.DataModelBasin(DF, subset='rmR20', varY=codeSel)
k = 0
fig, axes = figplot.multiTS(
d2.t, [d2.Y[:, k, :], d1.Y[:, k, :]], cLst='br', styLst='..')
fig.show()
|
from sklearn.preprocessing import QuantileTransformer, PowerTransformer
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
importlib.reload(axplot)
importlib.reload(figplot)
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
dictSiteName = 'dictWeathering.json'
with open(os.path.join(dirSel, dictSiteName)) as f:
dictSite = json.load(f)
siteNoLst = dictSite['k12']
dataName = 'weathering'
sd = '1982-01-01'
ed = '2018-12-31'
dataName = 'weathering'
freq = 'D'
DF = dbBasin.DataFrameBasin.new(
dataName, siteNoLst, sdStr=sd, edStr=ed, freq=freq)
siteNoTemp = DF.siteNoLst[:5]
importlib.reload(dbBasin)
DF.saveSubset('B10', ed='2009-12-31')
DF.saveSubset('A10', sd='2010-01-01')
# pick by year
yrIn = np.arange(1985, 2020, 5).tolist()
t1 = dbBasin.func.pickByYear(DF.t, yrIn)
t2 = dbBasin.func.pickByYear(DF.t, yrIn, pick=False)
DF.createSubset('pkYr5', dateLst=t1)
DF.createSubset('rmYr5', dateLst=t2)
# pick by day
t1 = dbBasin.func.pickByDay(DF.t, dBase=5, dSel=1)
t2 = dbBasin.func.pickByDay(DF.t, dBase=5, dSel=1, pick=False)
DF.createSubset('pkD5', dateLst=t1)
DF.createSubset('rmD5', dateLst=t2)
# pick by random
t1 = dbBasin.func.pickRandT(DF.t, 0.2)
t2 = dbBasin.func.pickRandT(DF.t, 0.2, pick=False)
DF.createSubset('pkRT20', dateLst=t1)
DF.createSubset('rmRT20', dateLst=t2)
# plot
codeSel = ['00915', '00925', '00930', '00935', '00940', '00945', '00955']
d1 = dbBasin.DataModelBasin(DF, subset='pkR20', varY=codeSel)
d2 = dbBasin.DataModelBasin(DF, subset='rmR20', varY=codeSel)
k = 0
fig, axes = figplot.multiTS(
d2.t, [d2.Y[:, k, :], d1.Y[:, k, :]], cLst='br', styLst='..')
fig.show()
|
en
| 0.993654
|
# pick by year # pick by day # pick by random # plot
| 2.433495
| 2
|
day_3.py
|
giant995/advent-of-code-2021
| 0
|
6627226
|
<gh_stars>0
word_length = 12
lines = []
with open("day3.input") as file:
for line in file:
lines.append(line.rstrip())
def count_bits(bit_str):
bits_count = {}
for i in range(len(bit_str)):
if bit_str[i] == "0":
bits_count.setdefault(i % word_length, {0: 0, 1: 0})[0] += 1
else:
bits_count.setdefault(i % word_length, {0: 0, 1: 0})[1] += 1
return bits_count
data = ''.join(line.rstrip() for line in lines)
gamma = ""
epsilon = ""
count = count_bits(data)
for key, value in count.items():
if value[0] > value[1]:
gamma += "0"
epsilon += "1"
else:
gamma += "1"
epsilon += "0"
print(f"power consumption: {int(gamma, 2) * int(epsilon, 2)}")
# Part 2
idx = 0
oxygen = lines
while len(oxygen) != 1:
oxygen_subset = []
oxygen_str = ''.join(report_line.rstrip() for report_line in oxygen)
oxygen_bits_count = count_bits(oxygen_str)
if oxygen_bits_count[idx % word_length][0] > oxygen_bits_count[idx % word_length][1]:
oxygen_subset = [report_line for report_line in oxygen if report_line[idx % word_length] == "0"]
else:
oxygen_subset = [report_line for report_line in oxygen if report_line[idx % word_length] == "1"]
oxygen = oxygen_subset
idx += 1
scrubber = lines
while len(scrubber) != 1:
scrubber_subset = []
scrubber_str = ''.join(report_line.rstrip() for report_line in scrubber)
scrubber_bits_count = count_bits(scrubber_str)
if scrubber_bits_count[idx % word_length][0] > scrubber_bits_count[idx % word_length][1]:
scrubber_subset = [report_line for report_line in scrubber if report_line[idx % word_length] == "1"]
else:
scrubber_subset = [report_line for report_line in scrubber if report_line[idx % word_length] == "0"]
scrubber = scrubber_subset
idx += 1
print(f"life support rating: {int(oxygen[0],2) * int(scrubber[0], 2)}")
|
word_length = 12
lines = []
with open("day3.input") as file:
for line in file:
lines.append(line.rstrip())
def count_bits(bit_str):
bits_count = {}
for i in range(len(bit_str)):
if bit_str[i] == "0":
bits_count.setdefault(i % word_length, {0: 0, 1: 0})[0] += 1
else:
bits_count.setdefault(i % word_length, {0: 0, 1: 0})[1] += 1
return bits_count
data = ''.join(line.rstrip() for line in lines)
gamma = ""
epsilon = ""
count = count_bits(data)
for key, value in count.items():
if value[0] > value[1]:
gamma += "0"
epsilon += "1"
else:
gamma += "1"
epsilon += "0"
print(f"power consumption: {int(gamma, 2) * int(epsilon, 2)}")
# Part 2
idx = 0
oxygen = lines
while len(oxygen) != 1:
oxygen_subset = []
oxygen_str = ''.join(report_line.rstrip() for report_line in oxygen)
oxygen_bits_count = count_bits(oxygen_str)
if oxygen_bits_count[idx % word_length][0] > oxygen_bits_count[idx % word_length][1]:
oxygen_subset = [report_line for report_line in oxygen if report_line[idx % word_length] == "0"]
else:
oxygen_subset = [report_line for report_line in oxygen if report_line[idx % word_length] == "1"]
oxygen = oxygen_subset
idx += 1
scrubber = lines
while len(scrubber) != 1:
scrubber_subset = []
scrubber_str = ''.join(report_line.rstrip() for report_line in scrubber)
scrubber_bits_count = count_bits(scrubber_str)
if scrubber_bits_count[idx % word_length][0] > scrubber_bits_count[idx % word_length][1]:
scrubber_subset = [report_line for report_line in scrubber if report_line[idx % word_length] == "1"]
else:
scrubber_subset = [report_line for report_line in scrubber if report_line[idx % word_length] == "0"]
scrubber = scrubber_subset
idx += 1
print(f"life support rating: {int(oxygen[0],2) * int(scrubber[0], 2)}")
|
none
| 1
| 3.26235
| 3
|
|
src/records.py
|
sbarbett/ssp-sdk-python
| 0
|
6627227
|
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class Records:
def __init__(self, connection, base_uri):
self.connection = connection
self.base_uri = base_uri+"/records"
def get(self):
"""Find items belonging to a specific Blacklist or Whitelist."""
return self.connection.get(self.base_uri)
def post(self, records):
"""Add item(s) to a specific Blacklist or Whitelist.
Argument:
records -- A list of hosts to block.
"""
properties = {"records": records}
return self.connection.post(self.base_uri, json.dumps(properties))
def delete(self):
"""Delete item(s) from a specific Blacklist or Whitelist."""
return self.connection.delete(self.base_uri)
|
# Copyright 2017 NeuStar, Inc.All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class Records:
def __init__(self, connection, base_uri):
self.connection = connection
self.base_uri = base_uri+"/records"
def get(self):
"""Find items belonging to a specific Blacklist or Whitelist."""
return self.connection.get(self.base_uri)
def post(self, records):
"""Add item(s) to a specific Blacklist or Whitelist.
Argument:
records -- A list of hosts to block.
"""
properties = {"records": records}
return self.connection.post(self.base_uri, json.dumps(properties))
def delete(self):
"""Delete item(s) from a specific Blacklist or Whitelist."""
return self.connection.delete(self.base_uri)
|
en
| 0.834479
|
# Copyright 2017 NeuStar, Inc.All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Find items belonging to a specific Blacklist or Whitelist. Add item(s) to a specific Blacklist or Whitelist. Argument: records -- A list of hosts to block. Delete item(s) from a specific Blacklist or Whitelist.
| 2.583256
| 3
|
mobile/sample/python/sign.py
|
Fordring20/taobao
| 1
|
6627228
|
<reponame>Fordring20/taobao<filename>mobile/sample/python/sign.py
import time
import random
import string
import hashlib
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': jsapi_ticket,
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
if __name__ == '__main__':
# 注意 URL 一定要动态获取,不能 hardcode
sign = Sign('jsapi_ticket', 'http://example.com')
print sign.sign()
|
import time
import random
import string
import hashlib
class Sign:
def __init__(self, jsapi_ticket, url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': jsapi_ticket,
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)])
print string
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
if __name__ == '__main__':
# 注意 URL 一定要动态获取,不能 hardcode
sign = Sign('jsapi_ticket', 'http://example.com')
print sign.sign()
|
zh
| 0.94028
|
# 注意 URL 一定要动态获取,不能 hardcode
| 2.951793
| 3
|
scripts/calculateDrivingRankings.py
|
dsbrown1331/safe-imitation-learning
| 1
|
6627229
|
import numpy as np
import matplotlib.pyplot as plt
import bound_methods
from numpy import nan, inf
#plot results for experiment7_1
#rewards are feasible in that all start states end up at goal within 25 steps
sample_flag = 4
chain_len = 2000
mcmc_step = 0.01
alpha = 10
num_reps = 20
mc_numRollouts = 200
numDemos = [1]
tol = 0.0001
demo_length = 100;
gamma = 0.95
burn = 20
skip = 2
delta_conf = 0.95
bounds = ["WFCB", "VaR 95"]
fmts = ['o-','s--','^-.', '*:','>-','d--']
filePath = "data/driving/"
evalPolicyNames = ["right_safe","on_road","nasty"]
print("\t\t" + bounds[0] +"\t\t" + bounds[1])
for evalPolicyName in evalPolicyNames:
data_row = []
data_row.append(evalPolicyName)
for bound_type in bounds:
accuracies = []
average_bound_error = []
for numDemo in numDemos:
true_perf_ratio = []
predicted = []
bound_error = []
worst_cases = []
actuals = []
for rep in range(num_reps):
filename = "driving_" + evalPolicyName + "_alpha" + str(alpha) + "_chain" + str(chain_len) + "_step" + str(mcmc_step)+ "0000_L1sampleflag" + str(sample_flag) + "_demoLength" + str(demo_length)+ "_mcRollout" + str(mc_numRollouts) + "_rep" + str(rep)+ ".txt";
#print filename
f = open(filePath + filename,'r')
f.readline() #clear out comment from buffer
actual = (float(f.readline())) #get the true ratio
actuals.append(actual)
#print "actual", actual
f.readline() #clear out ---
wfcb = (float(f.readline())) #get the worst-case feature count bound
worst_cases.append(wfcb)
#print wfcb
f.readline() #clear out ---
samples = []
for line in f: #read in the mcmc chain
val = float(line)
samples.append(float(line))
#print samples
#burn
burned_samples = samples[burn::skip]
#print "max sample", np.max(burned_samples)
#compute confidence bound
if bound_type == "VaR 99":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.99, delta_conf)
elif bound_type == "VaR 95":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.95, delta_conf)
elif bound_type == "VaR 90":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.9, delta_conf)
elif bound_type == "WFCB":
upper_bnd = wfcb
#print "upper bound", upper_bnd
predicted.append(upper_bnd)
ave_evd_bound = np.mean(predicted)
data_row.append(ave_evd_bound)
print(data_row)
|
import numpy as np
import matplotlib.pyplot as plt
import bound_methods
from numpy import nan, inf
#plot results for experiment7_1
#rewards are feasible in that all start states end up at goal within 25 steps
sample_flag = 4
chain_len = 2000
mcmc_step = 0.01
alpha = 10
num_reps = 20
mc_numRollouts = 200
numDemos = [1]
tol = 0.0001
demo_length = 100;
gamma = 0.95
burn = 20
skip = 2
delta_conf = 0.95
bounds = ["WFCB", "VaR 95"]
fmts = ['o-','s--','^-.', '*:','>-','d--']
filePath = "data/driving/"
evalPolicyNames = ["right_safe","on_road","nasty"]
print("\t\t" + bounds[0] +"\t\t" + bounds[1])
for evalPolicyName in evalPolicyNames:
data_row = []
data_row.append(evalPolicyName)
for bound_type in bounds:
accuracies = []
average_bound_error = []
for numDemo in numDemos:
true_perf_ratio = []
predicted = []
bound_error = []
worst_cases = []
actuals = []
for rep in range(num_reps):
filename = "driving_" + evalPolicyName + "_alpha" + str(alpha) + "_chain" + str(chain_len) + "_step" + str(mcmc_step)+ "0000_L1sampleflag" + str(sample_flag) + "_demoLength" + str(demo_length)+ "_mcRollout" + str(mc_numRollouts) + "_rep" + str(rep)+ ".txt";
#print filename
f = open(filePath + filename,'r')
f.readline() #clear out comment from buffer
actual = (float(f.readline())) #get the true ratio
actuals.append(actual)
#print "actual", actual
f.readline() #clear out ---
wfcb = (float(f.readline())) #get the worst-case feature count bound
worst_cases.append(wfcb)
#print wfcb
f.readline() #clear out ---
samples = []
for line in f: #read in the mcmc chain
val = float(line)
samples.append(float(line))
#print samples
#burn
burned_samples = samples[burn::skip]
#print "max sample", np.max(burned_samples)
#compute confidence bound
if bound_type == "VaR 99":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.99, delta_conf)
elif bound_type == "VaR 95":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.95, delta_conf)
elif bound_type == "VaR 90":
upper_bnd = bound_methods.percentile_confidence_upper_bnd(burned_samples, 0.9, delta_conf)
elif bound_type == "WFCB":
upper_bnd = wfcb
#print "upper bound", upper_bnd
predicted.append(upper_bnd)
ave_evd_bound = np.mean(predicted)
data_row.append(ave_evd_bound)
print(data_row)
|
en
| 0.68628
|
#plot results for experiment7_1 #rewards are feasible in that all start states end up at goal within 25 steps #print filename #clear out comment from buffer #get the true ratio #print "actual", actual #clear out --- #get the worst-case feature count bound #print wfcb #clear out --- #read in the mcmc chain #print samples #burn #print "max sample", np.max(burned_samples) #compute confidence bound #print "upper bound", upper_bnd
| 2.532793
| 3
|
resolwe/permissions/tests/test_data.py
|
JureZmrzlikar/resolwe
| 0
|
6627230
|
# pylint: disable=missing-docstring
import shutil
import unittest
from datetime import timedelta
from django.utils.timezone import now
from guardian.shortcuts import remove_perm
from rest_framework import exceptions, status
from resolwe.flow.models import Collection, Data
from resolwe.flow.serializers import ContributorSerializer
from resolwe.flow.views import DataViewSet
from resolwe.test import ResolweAPITestCase
DATE_FORMAT = r"%Y-%m-%dT%H:%M:%S.%f"
MESSAGES = {
"NOT_FOUND": "Not found.",
# 'NO_PERMISSION': 'You do not have permission to perform this action.',
"ONE_ID_REQUIRED": "Exactly one id required on create.",
}
class DataTestCase(ResolweAPITestCase):
fixtures = [
"users.yaml",
"collections.yaml",
"processes.yaml",
"data.yaml",
"permissions.yaml",
]
def setUp(self):
self.data1 = Data.objects.get(pk=1)
self.resource_name = "data"
self.viewset = DataViewSet
self.data = {
"name": "New data",
"slug": "new_data",
"collection": {"id": 1},
"process": {"slug": "test_process"},
}
super().setUp()
# Reindex data objects as they are loaded in fixtures.
# TODO: Remove this when we get rid of fixtures.
from resolwe.elastic.builder import index_builder
index_builder.build()
def tearDown(self):
for data in Data.objects.all():
if data.location:
shutil.rmtree(data.location.get_path(), ignore_errors=True)
super().tearDown()
def test_get_list(self):
resp = self._get_list(self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
def test_get_list_public_user(self):
resp = self._get_list()
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 1)
def test_get_list_admin(self):
resp = self._get_list(self.admin)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
@unittest.skipIf(
True,
"since PR308: this test uses transactions, incompatible with the separated manager",
)
def test_post(self):
# logged-in user w/ perms
collection_n = Data.objects.count()
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(Data.objects.count(), collection_n + 1)
d = Data.objects.get(pk=resp.data["id"])
self.assertTrue(now() - d.modified < timedelta(seconds=1))
self.assertTrue(now() - d.created < timedelta(seconds=1))
self.assertEqual(d.status, "OK")
self.assertTrue(now() - d.started < timedelta(seconds=1))
self.assertTrue(now() - d.finished < timedelta(seconds=1))
self.assertEqual(d.contributor_id, self.user1.pk)
def test_post_invalid_fields(self):
data_n = Data.objects.count()
self.data["collection"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
resp.data["collection"][0],
"Invalid collection value: {'id': 42} - object does not exist.",
)
self.data["collection"] = {"id": 1}
self.data["process"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
str(resp.data["process"][0]),
"Invalid process value: {'id': 42} - object does not exist.",
)
self.assertEqual(Data.objects.count(), data_n)
def test_post_no_perms(self):
collection = Collection.objects.get(pk=1)
remove_perm("edit_collection", self.user2, collection)
data_count = Data.objects.count()
resp = self._post(self.data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_public_user(self):
data_count = Data.objects.count()
resp = self._post(self.data)
# User has no permission to add Data object to the collection.
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_protected_fields(self):
date_now = now()
self.data["created"] = date_now - timedelta(days=360)
self.data["modified"] = date_now - timedelta(days=180)
self.data["started"] = date_now - timedelta(days=180)
self.data["finished"] = date_now - timedelta(days=90)
self.data["checksum"] = "fake"
self.data["status"] = "DE"
self.data["process_progress"] = 2
self.data["process_rc"] = 18
self.data["process_info"] = "Spam"
self.data["process_warning"] = "More spam"
self.data["process_error"] = "Even more spam"
self.data["contributor_id"] = 2
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(resp.data["started"], None)
self.assertEqual(resp.data["finished"], None)
self.assertEqual(
resp.data["checksum"],
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(resp.data["status"], "RE")
self.assertEqual(resp.data["process_progress"], 0)
self.assertEqual(resp.data["process_rc"], None)
self.assertEqual(resp.data["process_info"], [])
self.assertEqual(resp.data["process_warning"], [])
self.assertEqual(resp.data["process_error"], [])
self.assertEqual(
resp.data["contributor"],
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_numeric(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(self.user1.pk)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value({"id": self.user1.pk})
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_extra_data(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(
{"id": self.user1.pk, "username": "ignored", "first_name": "ignored"}
)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_invalid(self):
with self.assertRaises(exceptions.ValidationError):
ContributorSerializer().to_internal_value(
{"invalid-dictionary": True,}
)
def test_get_detail(self):
# public user w/ perms
resp = self._get_detail(1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["id"], 1)
# user w/ permissions
resp = self._get_detail(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
# user w/ public permissions
resp = self._get_detail(1, self.user2)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
def test_get_detail_no_perms(self):
# public user w/o permissions
resp = self._get_detail(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
# user w/o permissions
resp = self._get_detail(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
def test_patch(self):
data = {"name": "New data"}
resp = self._patch(1, data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
d = Data.objects.get(pk=1)
self.assertEqual(d.name, "New data")
def test_patch_no_perms(self):
data = {"name": "New data"}
resp = self._patch(2, data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_public_user(self):
data = {"name": "New data"}
resp = self._patch(2, data)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_protected(self):
date_now = now()
# `created`
resp = self._patch(1, {"created": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.created.isoformat(), self.data1.created.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `modified`
resp = self._patch(1, {"modified": date_now - timedelta(days=180)}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `started`
resp = self._patch(1, {"started": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.started.isoformat(), self.data1.started.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `finished`
resp = self._patch(1, {"finished": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.finished.isoformat(), self.data1.finished.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `checksum`
resp = self._patch(1, {"checksum": "fake"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(
d.checksum,
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `status`
resp = self._patch(1, {"status": "DE"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.status, "OK")
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_progress`
resp = self._patch(1, {"process_progress": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_progress, 0)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_rc`
resp = self._patch(1, {"process_rc": 18}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_rc, None)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_info`
resp = self._patch(1, {"process_info": "Spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_info, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_warning`
resp = self._patch(1, {"process_warning": "More spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_warning, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_error`
resp = self._patch(1, {"process_error": "Even more spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_error, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `contributor`
resp = self._patch(1, {"contributor": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.contributor_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process`
resp = self._patch(1, {"process": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
def test_delete(self):
resp = self._delete(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
query = Data.objects.filter(pk=1).exists()
self.assertFalse(query)
def test_delete_no_perms(self):
resp = self._delete(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
def test_delete_public_user(self):
resp = self._delete(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
|
# pylint: disable=missing-docstring
import shutil
import unittest
from datetime import timedelta
from django.utils.timezone import now
from guardian.shortcuts import remove_perm
from rest_framework import exceptions, status
from resolwe.flow.models import Collection, Data
from resolwe.flow.serializers import ContributorSerializer
from resolwe.flow.views import DataViewSet
from resolwe.test import ResolweAPITestCase
DATE_FORMAT = r"%Y-%m-%dT%H:%M:%S.%f"
MESSAGES = {
"NOT_FOUND": "Not found.",
# 'NO_PERMISSION': 'You do not have permission to perform this action.',
"ONE_ID_REQUIRED": "Exactly one id required on create.",
}
class DataTestCase(ResolweAPITestCase):
fixtures = [
"users.yaml",
"collections.yaml",
"processes.yaml",
"data.yaml",
"permissions.yaml",
]
def setUp(self):
self.data1 = Data.objects.get(pk=1)
self.resource_name = "data"
self.viewset = DataViewSet
self.data = {
"name": "New data",
"slug": "new_data",
"collection": {"id": 1},
"process": {"slug": "test_process"},
}
super().setUp()
# Reindex data objects as they are loaded in fixtures.
# TODO: Remove this when we get rid of fixtures.
from resolwe.elastic.builder import index_builder
index_builder.build()
def tearDown(self):
for data in Data.objects.all():
if data.location:
shutil.rmtree(data.location.get_path(), ignore_errors=True)
super().tearDown()
def test_get_list(self):
resp = self._get_list(self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
def test_get_list_public_user(self):
resp = self._get_list()
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 1)
def test_get_list_admin(self):
resp = self._get_list(self.admin)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(len(resp.data), 2)
@unittest.skipIf(
True,
"since PR308: this test uses transactions, incompatible with the separated manager",
)
def test_post(self):
# logged-in user w/ perms
collection_n = Data.objects.count()
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(Data.objects.count(), collection_n + 1)
d = Data.objects.get(pk=resp.data["id"])
self.assertTrue(now() - d.modified < timedelta(seconds=1))
self.assertTrue(now() - d.created < timedelta(seconds=1))
self.assertEqual(d.status, "OK")
self.assertTrue(now() - d.started < timedelta(seconds=1))
self.assertTrue(now() - d.finished < timedelta(seconds=1))
self.assertEqual(d.contributor_id, self.user1.pk)
def test_post_invalid_fields(self):
data_n = Data.objects.count()
self.data["collection"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
resp.data["collection"][0],
"Invalid collection value: {'id': 42} - object does not exist.",
)
self.data["collection"] = {"id": 1}
self.data["process"] = {"id": 42}
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
str(resp.data["process"][0]),
"Invalid process value: {'id': 42} - object does not exist.",
)
self.assertEqual(Data.objects.count(), data_n)
def test_post_no_perms(self):
collection = Collection.objects.get(pk=1)
remove_perm("edit_collection", self.user2, collection)
data_count = Data.objects.count()
resp = self._post(self.data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_public_user(self):
data_count = Data.objects.count()
resp = self._post(self.data)
# User has no permission to add Data object to the collection.
self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Data.objects.count(), data_count)
def test_post_protected_fields(self):
date_now = now()
self.data["created"] = date_now - timedelta(days=360)
self.data["modified"] = date_now - timedelta(days=180)
self.data["started"] = date_now - timedelta(days=180)
self.data["finished"] = date_now - timedelta(days=90)
self.data["checksum"] = "fake"
self.data["status"] = "DE"
self.data["process_progress"] = 2
self.data["process_rc"] = 18
self.data["process_info"] = "Spam"
self.data["process_warning"] = "More spam"
self.data["process_error"] = "Even more spam"
self.data["contributor_id"] = 2
resp = self._post(self.data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
self.assertEqual(resp.data["started"], None)
self.assertEqual(resp.data["finished"], None)
self.assertEqual(
resp.data["checksum"],
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(resp.data["status"], "RE")
self.assertEqual(resp.data["process_progress"], 0)
self.assertEqual(resp.data["process_rc"], None)
self.assertEqual(resp.data["process_info"], [])
self.assertEqual(resp.data["process_warning"], [])
self.assertEqual(resp.data["process_error"], [])
self.assertEqual(
resp.data["contributor"],
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_numeric(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(self.user1.pk)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value({"id": self.user1.pk})
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_extra_data(self):
response = ContributorSerializer(
ContributorSerializer().to_internal_value(
{"id": self.user1.pk, "username": "ignored", "first_name": "ignored"}
)
).data
self.assertEqual(
response,
{
"id": self.user1.pk,
"username": self.user1.username,
"first_name": self.user1.first_name,
"last_name": self.user1.last_name,
},
)
def test_post_contributor_dict_invalid(self):
with self.assertRaises(exceptions.ValidationError):
ContributorSerializer().to_internal_value(
{"invalid-dictionary": True,}
)
def test_get_detail(self):
# public user w/ perms
resp = self._get_detail(1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data["id"], 1)
# user w/ permissions
resp = self._get_detail(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
# user w/ public permissions
resp = self._get_detail(1, self.user2)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertKeys(
resp.data,
[
"slug",
"name",
"created",
"modified",
"contributor",
"started",
"finished",
"checksum",
"status",
"process",
"process_progress",
"process_rc",
"process_info",
"process_warning",
"process_error",
"input",
"output",
"descriptor_schema",
"descriptor",
"id",
"size",
"scheduled",
"current_user_permissions",
"descriptor_dirty",
"tags",
"process_memory",
"process_cores",
"collection",
"entity",
"duplicated",
],
)
def test_get_detail_no_perms(self):
# public user w/o permissions
resp = self._get_detail(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
# user w/o permissions
resp = self._get_detail(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(resp.data["detail"], MESSAGES["NOT_FOUND"])
def test_patch(self):
data = {"name": "New data"}
resp = self._patch(1, data, self.user1)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
d = Data.objects.get(pk=1)
self.assertEqual(d.name, "New data")
def test_patch_no_perms(self):
data = {"name": "New data"}
resp = self._patch(2, data, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_public_user(self):
data = {"name": "New data"}
resp = self._patch(2, data)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
d = Data.objects.get(pk=2)
self.assertEqual(d.name, "Test data 2")
def test_patch_protected(self):
date_now = now()
# `created`
resp = self._patch(1, {"created": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.created.isoformat(), self.data1.created.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `modified`
resp = self._patch(1, {"modified": date_now - timedelta(days=180)}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `started`
resp = self._patch(1, {"started": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.started.isoformat(), self.data1.started.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `finished`
resp = self._patch(1, {"finished": date_now}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.finished.isoformat(), self.data1.finished.isoformat())
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `checksum`
resp = self._patch(1, {"checksum": "fake"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(
d.checksum,
"05bc76611c382a88817389019679f35cdb32ac65fe6662210805b588c30f71e6",
)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `status`
resp = self._patch(1, {"status": "DE"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.status, "OK")
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_progress`
resp = self._patch(1, {"process_progress": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_progress, 0)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_rc`
resp = self._patch(1, {"process_rc": 18}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_rc, None)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_info`
resp = self._patch(1, {"process_info": "Spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_info, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_warning`
resp = self._patch(1, {"process_warning": "More spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_warning, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process_error`
resp = self._patch(1, {"process_error": "Even more spam"}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_error, [])
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `contributor`
resp = self._patch(1, {"contributor": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.contributor_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
# `process`
resp = self._patch(1, {"process": 2}, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
d = Data.objects.get(pk=1)
self.assertEqual(d.process_id, 1)
self.assertEqual(d.modified.isoformat(), self.data1.modified.isoformat())
def test_delete(self):
resp = self._delete(1, self.user1)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
query = Data.objects.filter(pk=1).exists()
self.assertFalse(query)
def test_delete_no_perms(self):
resp = self._delete(2, self.user2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
def test_delete_public_user(self):
resp = self._delete(2)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
query = Data.objects.filter(pk=2).exists()
self.assertTrue(query)
|
en
| 0.756224
|
# pylint: disable=missing-docstring # 'NO_PERMISSION': 'You do not have permission to perform this action.', # Reindex data objects as they are loaded in fixtures. # TODO: Remove this when we get rid of fixtures. # logged-in user w/ perms # User has no permission to add Data object to the collection. # public user w/ perms # user w/ permissions # user w/ public permissions # public user w/o permissions # user w/o permissions # `created` # `modified` # `started` # `finished` # `checksum` # `status` # `process_progress` # `process_rc` # `process_info` # `process_warning` # `process_error` # `contributor` # `process`
| 2.00811
| 2
|
gbkgrep.py
|
stevenjdunn/gbkgrep
| 0
|
6627231
|
#!/usr/bin/env python
import argparse
from Bio import SeqIO
# Version
_verion_= "0.1"
# Argparse Setup
parser = argparse.ArgumentParser(description="A tool for extracting nucleotide sequence of a given gene from a genbank file.")
parser.add_argument("-i", "--input", required=True, help="Locus tag")
parser.add_argument("-g", "--genbank", required=True, help="Path to genbank file.")
args = parser.parse_args()
input_tag = args.input
for entries in SeqIO.parse(args.genbank,"genbank"):
if entries.features:
for feature in entries.features:
if feature.type == "CDS":
if feature.qualifiers['locus_tag'][0] == input_tag:
print(">%s\n%s\n" % (
feature.qualifiers['locus_tag'][0],
feature.location.extract(entries).seq))
|
#!/usr/bin/env python
import argparse
from Bio import SeqIO
# Version
_verion_= "0.1"
# Argparse Setup
parser = argparse.ArgumentParser(description="A tool for extracting nucleotide sequence of a given gene from a genbank file.")
parser.add_argument("-i", "--input", required=True, help="Locus tag")
parser.add_argument("-g", "--genbank", required=True, help="Path to genbank file.")
args = parser.parse_args()
input_tag = args.input
for entries in SeqIO.parse(args.genbank,"genbank"):
if entries.features:
for feature in entries.features:
if feature.type == "CDS":
if feature.qualifiers['locus_tag'][0] == input_tag:
print(">%s\n%s\n" % (
feature.qualifiers['locus_tag'][0],
feature.location.extract(entries).seq))
|
hu
| 0.084323
|
#!/usr/bin/env python # Version # Argparse Setup
| 3.152916
| 3
|
tests/expectations/mr-x-cat-hs-col-stderr.py
|
Crunch-io/crunch-cube
| 3
|
6627232
|
[
[
0.1028366,
0.06789606,
0.06522613,
float("NaN"),
0.03345903,
0.04066543,
float("NaN"),
0.02659733,
],
[
0.12475421,
0.07228711,
0.06460091,
float("NaN"),
0.0532943,
0.05249552,
float("NaN"),
0.03740326,
],
[
0.12056446,
0.07802173,
0.06767673,
float("NaN"),
0.05182406,
0.04935598,
float("NaN"),
0.03577725,
],
[
0.10249407,
0.05842565,
0.05076373,
float("NaN"),
0.03127232,
0.02435786,
float("NaN"),
0.01945794,
],
[
0.07421655,
0.03989992,
0.03532412,
float("NaN"),
0.03203276,
0.02927602,
float("NaN"),
0.02162477,
],
]
|
[
[
0.1028366,
0.06789606,
0.06522613,
float("NaN"),
0.03345903,
0.04066543,
float("NaN"),
0.02659733,
],
[
0.12475421,
0.07228711,
0.06460091,
float("NaN"),
0.0532943,
0.05249552,
float("NaN"),
0.03740326,
],
[
0.12056446,
0.07802173,
0.06767673,
float("NaN"),
0.05182406,
0.04935598,
float("NaN"),
0.03577725,
],
[
0.10249407,
0.05842565,
0.05076373,
float("NaN"),
0.03127232,
0.02435786,
float("NaN"),
0.01945794,
],
[
0.07421655,
0.03989992,
0.03532412,
float("NaN"),
0.03203276,
0.02927602,
float("NaN"),
0.02162477,
],
]
|
none
| 1
| 1.471997
| 1
|
|
snsapi/third/douban_client/client.py
|
hupili/snsapi
| 51
|
6627233
|
# -*- coding: utf-8 -*-
from api.pyoauth2 import Client, AccessToken
from .api import DoubanAPI
class DoubanClient(DoubanAPI):
API_HOST = 'https://api.douban.com'
AUTH_HOST = 'https://www.douban.com'
TOKEN_URL = AUTH_HOST + '/service/auth2/token'
AUTHORIZE_URL = AUTH_HOST + '/service/auth2/auth'
def __init__(self, key, secret, redirect='', scope=''):
self.redirect_uri = redirect
self.scope = scope
self.client = Client(key, secret,
site=self.API_HOST,
authorize_url=self.AUTHORIZE_URL,
token_url=self.TOKEN_URL)
self.access_token = AccessToken(self.client, '')
def __repr__(self):
return '<DoubanClient OAuth2>'
@property
def authorize_url(self):
return self.client.auth_code.authorize_url(redirect_uri=self.redirect_uri, scope=self.scope)
def auth_with_code(self, code):
self.access_token = self.client.auth_code.get_token(code, redirect_uri=self.redirect_uri)
def auth_with_token(self, token):
self.access_token = AccessToken(self.client, token)
def auth_with_password(self, username, password, **opt):
self.access_token = self.client.password.get_token(username=username, password=password,
redirect_uri=self.redirect_uri, **opt)
@property
def token_code(self):
return self.access_token and self.access_token.token
@property
def refresh_token_code(self):
return getattr(self.access_token, 'refresh_token', None)
def refresh_token(self, refresh_token):
access_token = AccessToken(self.client, token='', refresh_token=refresh_token)
self.access_token = access_token.refresh()
|
# -*- coding: utf-8 -*-
from api.pyoauth2 import Client, AccessToken
from .api import DoubanAPI
class DoubanClient(DoubanAPI):
API_HOST = 'https://api.douban.com'
AUTH_HOST = 'https://www.douban.com'
TOKEN_URL = AUTH_HOST + '/service/auth2/token'
AUTHORIZE_URL = AUTH_HOST + '/service/auth2/auth'
def __init__(self, key, secret, redirect='', scope=''):
self.redirect_uri = redirect
self.scope = scope
self.client = Client(key, secret,
site=self.API_HOST,
authorize_url=self.AUTHORIZE_URL,
token_url=self.TOKEN_URL)
self.access_token = AccessToken(self.client, '')
def __repr__(self):
return '<DoubanClient OAuth2>'
@property
def authorize_url(self):
return self.client.auth_code.authorize_url(redirect_uri=self.redirect_uri, scope=self.scope)
def auth_with_code(self, code):
self.access_token = self.client.auth_code.get_token(code, redirect_uri=self.redirect_uri)
def auth_with_token(self, token):
self.access_token = AccessToken(self.client, token)
def auth_with_password(self, username, password, **opt):
self.access_token = self.client.password.get_token(username=username, password=password,
redirect_uri=self.redirect_uri, **opt)
@property
def token_code(self):
return self.access_token and self.access_token.token
@property
def refresh_token_code(self):
return getattr(self.access_token, 'refresh_token', None)
def refresh_token(self, refresh_token):
access_token = AccessToken(self.client, token='', refresh_token=refresh_token)
self.access_token = access_token.refresh()
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.758913
| 3
|
app-packages/tensorflow/package/scripts/params.py
|
turningme/incubator-retired-slider
| 60
|
6627234
|
<filename>app-packages/tensorflow/package/scripts/params.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
# server configurations
config = Script.get_config()
hadoop_conf = config['configurations']['global']['hadoop.conf']
yarn_cg_root = config['configurations']['global']['yarn.cgroup.root']
user_name = config['configurations']['global']['user.name']
registry_zk = config['configurations']['global']['zookeeper.quorum']
user_scripts_entry = config['configurations']['global']['user.scripts.entry']
user_checkpoint_prefix = config['configurations']['global']['user.checkpoint.prefix']
docker_image = config['configurations']['global']['docker.image']
app_root = config['configurations']['global']['app_root']
app_log_dir = config['configurations']['global']['app_log_dir']
pid_file = config['configurations']['global']['pid_file']
container_id = config['configurations']['global']['app_container_id']
ps_port = config['configurations']['global']['ps.port']
chiefworker_port = config['configurations']['global']['chiefworker.port']
worker_port = config['configurations']['global']['worker.port']
tensorboard_port = config['configurations']['global']['tensorboard.port']
ports_dict = {"port.ps": ps_port,
"port.chiefworker": chiefworker_port,
"port.worker": worker_port,
"port.tensorboard": tensorboard_port}
componentName = config['componentName']
service_name = config['serviceName']
hostname = config['hostname']
|
<filename>app-packages/tensorflow/package/scripts/params.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
# server configurations
config = Script.get_config()
hadoop_conf = config['configurations']['global']['hadoop.conf']
yarn_cg_root = config['configurations']['global']['yarn.cgroup.root']
user_name = config['configurations']['global']['user.name']
registry_zk = config['configurations']['global']['zookeeper.quorum']
user_scripts_entry = config['configurations']['global']['user.scripts.entry']
user_checkpoint_prefix = config['configurations']['global']['user.checkpoint.prefix']
docker_image = config['configurations']['global']['docker.image']
app_root = config['configurations']['global']['app_root']
app_log_dir = config['configurations']['global']['app_log_dir']
pid_file = config['configurations']['global']['pid_file']
container_id = config['configurations']['global']['app_container_id']
ps_port = config['configurations']['global']['ps.port']
chiefworker_port = config['configurations']['global']['chiefworker.port']
worker_port = config['configurations']['global']['worker.port']
tensorboard_port = config['configurations']['global']['tensorboard.port']
ports_dict = {"port.ps": ps_port,
"port.chiefworker": chiefworker_port,
"port.worker": worker_port,
"port.tensorboard": tensorboard_port}
componentName = config['componentName']
service_name = config['serviceName']
hostname = config['hostname']
|
en
| 0.851292
|
#!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # server configurations
| 1.472009
| 1
|
data/tfrecord_decoder.py
|
AaronGrainer/yolact-instance-segmentation
| 0
|
6627235
|
import tensorflow as tf
class TfExampleDecoder(object):
def __init__(self):
self._keys_to_features = {
'image/height': tf.io.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.io.FixedLenFeature([], dtype=tf.int64),
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label_id': tf.io.VarLenFeature(dtype=tf.int64),
'image/object/is_crowd': tf.io.VarLenFeature(dtype=tf.int64),
'image/object/mask': tf.io.VarLenFeature(dtype=tf.string),
}
def _decode_image(self, parsed_tensors):
print("parsed_tensors['image/encoded']:", parsed_tensors['image/encoded'])
image = tf.io.decode_jpeg(parsed_tensors['image/encoded'])
print("image:", image)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
# Denormalize the box here
xmin = parsed_tensors['image/object/bbox/xmin']
ymin = parsed_tensors['image/object/bbox/ymin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(
_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def decode(self, serialized_example):
print("serialized_example:", serialized_example)
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
print("parsed_tensors:", parsed_tensors)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value="")
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
print("parsed_tensors:", parsed_tensors)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
masks = self._decode_masks(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd']), 0),
lambda: tf.cast(
parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label_id'], dtype=tf.bool))
decoded_tensors = {
'image': image,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'gt_classes': parsed_tensors['image/object/class/label_id'],
'gt_is_crowd': is_crowds,
'gt_bboxes': boxes,
'gt_masks': masks
}
return decoded_tensors
|
import tensorflow as tf
class TfExampleDecoder(object):
def __init__(self):
self._keys_to_features = {
'image/height': tf.io.FixedLenFeature([], dtype=tf.int64),
'image/width': tf.io.FixedLenFeature([], dtype=tf.int64),
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label_id': tf.io.VarLenFeature(dtype=tf.int64),
'image/object/is_crowd': tf.io.VarLenFeature(dtype=tf.int64),
'image/object/mask': tf.io.VarLenFeature(dtype=tf.string),
}
def _decode_image(self, parsed_tensors):
print("parsed_tensors['image/encoded']:", parsed_tensors['image/encoded'])
image = tf.io.decode_jpeg(parsed_tensors['image/encoded'])
print("image:", image)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
# Denormalize the box here
xmin = parsed_tensors['image/object/bbox/xmin']
ymin = parsed_tensors['image/object/bbox/ymin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(
_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
def decode(self, serialized_example):
print("serialized_example:", serialized_example)
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
print("parsed_tensors:", parsed_tensors)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value="")
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
print("parsed_tensors:", parsed_tensors)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
masks = self._decode_masks(parsed_tensors)
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd']), 0),
lambda: tf.cast(
parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label_id'], dtype=tf.bool))
decoded_tensors = {
'image': image,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'gt_classes': parsed_tensors['image/object/class/label_id'],
'gt_is_crowd': is_crowds,
'gt_bboxes': boxes,
'gt_masks': masks
}
return decoded_tensors
|
en
| 0.583012
|
# Denormalize the box here
| 2.501764
| 3
|
eorde/eoColorbar.py
|
pletzer/eorde
| 0
|
6627236
|
import vtk
class Colorbar(object):
def __init__(self, lookupTable, pos=(0.8, 0.2), size=14):
self.lut = lookupTable
self.actor = vtk.vtkScalarBarActor()
self.actor.SetLookupTable(lookupTable)
self.actor.SetPosition(pos)
self.actor.GetLabelTextProperty().SetFontSize(size)
def update(self, key):
pass
def getActor(self):
return self.actor
###############################################################################
def test():
from eorde.eoScene import Scene
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.)
lut.Build()
colorbar = Colorbar(lut)
s = Scene()
s.addPipelines([colorbar])
s.setBackground(0.9, 0.9, 0.9)
s.start()
if __name__ == '__main__':
test()
|
import vtk
class Colorbar(object):
def __init__(self, lookupTable, pos=(0.8, 0.2), size=14):
self.lut = lookupTable
self.actor = vtk.vtkScalarBarActor()
self.actor.SetLookupTable(lookupTable)
self.actor.SetPosition(pos)
self.actor.GetLabelTextProperty().SetFontSize(size)
def update(self, key):
pass
def getActor(self):
return self.actor
###############################################################################
def test():
from eorde.eoScene import Scene
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.)
lut.Build()
colorbar = Colorbar(lut)
s = Scene()
s.addPipelines([colorbar])
s.setBackground(0.9, 0.9, 0.9)
s.start()
if __name__ == '__main__':
test()
|
de
| 0.866588
|
###############################################################################
| 2.60358
| 3
|
contentcuration/manage.py
|
benjaoming/content-curation
| 0
|
6627237
|
<gh_stars>0
#!/usr/bin/env python
import os
import sys
# Attach Python Cloud Debugger
if __name__ == "__main__":
#import warnings
#warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
import os
import sys
# Attach Python Cloud Debugger
if __name__ == "__main__":
#import warnings
#warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
en
| 0.370089
|
#!/usr/bin/env python # Attach Python Cloud Debugger #import warnings #warnings.filterwarnings('ignore', message=r'Module .*? is being added to sys\.path', append=True)
| 1.500628
| 2
|
pred.py
|
huangtiansama/CNN-ORC
| 1
|
6627238
|
<filename>pred.py
import numpy as np
import tensorflow as tf
import cv2 as cv
from matplotlib import pyplot as plt
x=[]
q=[]
def resize(img):
for i in img:
x.append(cv.resize(i,(30,46)))
def pred(img):
with tf.Session() as sess:
resize(img)
saver = tf.train.import_meta_graph('./save_mode-1707.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
y = graph.get_operation_by_name('pred').outputs[0]
t=tf.nn.softmax(y)
img_input=graph.get_operation_by_name('img_input').outputs[0]
keep_prob = graph.get_operation_by_name('keep_prob').outputs[0]
for i in sess.run(t,feed_dict={img_input:x,keep_prob:1}):
n=i.argmax(axis=0)
if n!=10:
q.append(n)
print(q)
|
<filename>pred.py
import numpy as np
import tensorflow as tf
import cv2 as cv
from matplotlib import pyplot as plt
x=[]
q=[]
def resize(img):
for i in img:
x.append(cv.resize(i,(30,46)))
def pred(img):
with tf.Session() as sess:
resize(img)
saver = tf.train.import_meta_graph('./save_mode-1707.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
y = graph.get_operation_by_name('pred').outputs[0]
t=tf.nn.softmax(y)
img_input=graph.get_operation_by_name('img_input').outputs[0]
keep_prob = graph.get_operation_by_name('keep_prob').outputs[0]
for i in sess.run(t,feed_dict={img_input:x,keep_prob:1}):
n=i.argmax(axis=0)
if n!=10:
q.append(n)
print(q)
|
none
| 1
| 2.397015
| 2
|
|
pyradi/data/images/plotSiemensStarxxx.py
|
grosskc/pyradi
| 43
|
6627239
|
<gh_stars>10-100
#code originally by <NAME>
#https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
# see also reference images at http://sipi.usc.edu/database/
import numpy as np
import Image
import pyradi.ryplot as ryplot
import pyradi.ryutils as ryutils
if __name__ == '__main__':
# im = Image.open('lena512color.png')
im = Image.open('600px-Siemens_star-blurred.png').convert('RGB')
# im = im.convert('RGB')
data = np.array(im)
"""Plots an image reprojected into polar coordinages with the origin
at "origin" (a tuple of (x0, y0), defaults to the center of the image)"""
print(data.shape)
origin = None #(300,350)
pim = ryplot.ProcessImage()
polar_grid, r, theta = pim.reproject_image_into_polar(data, origin, False)
p = ryplot.Plotter(1,1,2)
p.showImage(1, data, ptitle='Image')
p.showImage(2, polar_grid, ptitle='Image in Polar Coordinates',
xlabel='Angle',ylabel='Radial')
p.saveFig('warpedStar.png')
print('done')
|
#code originally by <NAME>
#https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
# see also reference images at http://sipi.usc.edu/database/
import numpy as np
import Image
import pyradi.ryplot as ryplot
import pyradi.ryutils as ryutils
if __name__ == '__main__':
# im = Image.open('lena512color.png')
im = Image.open('600px-Siemens_star-blurred.png').convert('RGB')
# im = im.convert('RGB')
data = np.array(im)
"""Plots an image reprojected into polar coordinages with the origin
at "origin" (a tuple of (x0, y0), defaults to the center of the image)"""
print(data.shape)
origin = None #(300,350)
pim = ryplot.ProcessImage()
polar_grid, r, theta = pim.reproject_image_into_polar(data, origin, False)
p = ryplot.Plotter(1,1,2)
p.showImage(1, data, ptitle='Image')
p.showImage(2, polar_grid, ptitle='Image in Polar Coordinates',
xlabel='Angle',ylabel='Radial')
p.saveFig('warpedStar.png')
print('done')
|
en
| 0.653597
|
#code originally by <NAME> #https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system # see also reference images at http://sipi.usc.edu/database/ # im = Image.open('lena512color.png') # im = im.convert('RGB') Plots an image reprojected into polar coordinages with the origin at "origin" (a tuple of (x0, y0), defaults to the center of the image) #(300,350)
| 3.23075
| 3
|
awx/main/tests/functional/test_rbac_label.py
|
sumit-21/awx
| 17
|
6627240
|
import pytest
from awx.main.access import (
LabelAccess,
)
@pytest.mark.django_db
def test_label_get_queryset_user(label, user):
u = user('user', False)
access = LabelAccess(u)
label.organization.member_role.members.add(u)
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_get_queryset_su(label, user):
access = LabelAccess(user('user', True))
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_read_access(label, user):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
label.organization.member_role.members.add(user('user', False))
assert access.can_read(label)
@pytest.mark.django_db
def test_label_jt_read_access(label, user, job_template):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
job_template.read_role.members.add(user('user', False))
job_template.labels.add(label)
assert access.can_read(label)
@pytest.mark.django_db
def test_label_access_superuser(label, user):
access = LabelAccess(user('admin', True))
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_admin(organization_factory):
'''can_change because I am an admin of that org'''
no_members = organization_factory("no_members")
members = organization_factory("has_members",
users=['admin'],
labels=['test'])
label = members.labels.test
admin = members.users.admin
members.organization.admin_role.members.add(admin)
access = LabelAccess(admin)
assert not access.can_change(label, {'organization': no_members.organization.id})
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_change(label, {'organization': members.organization.id})
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_user(label, user):
access = LabelAccess(user('user', False))
label.organization.member_role.members.add(user('user', False))
assert not access.can_add({'organization': None})
assert not access.can_change(label, None)
assert not access.can_delete(label)
assert access.can_read(label)
assert access.can_add({'organization': label.organization.id})
|
import pytest
from awx.main.access import (
LabelAccess,
)
@pytest.mark.django_db
def test_label_get_queryset_user(label, user):
u = user('user', False)
access = LabelAccess(u)
label.organization.member_role.members.add(u)
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_get_queryset_su(label, user):
access = LabelAccess(user('user', True))
assert access.get_queryset().count() == 1
@pytest.mark.django_db
def test_label_read_access(label, user):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
label.organization.member_role.members.add(user('user', False))
assert access.can_read(label)
@pytest.mark.django_db
def test_label_jt_read_access(label, user, job_template):
access = LabelAccess(user('user', False))
assert not access.can_read(label)
job_template.read_role.members.add(user('user', False))
job_template.labels.add(label)
assert access.can_read(label)
@pytest.mark.django_db
def test_label_access_superuser(label, user):
access = LabelAccess(user('admin', True))
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_admin(organization_factory):
'''can_change because I am an admin of that org'''
no_members = organization_factory("no_members")
members = organization_factory("has_members",
users=['admin'],
labels=['test'])
label = members.labels.test
admin = members.users.admin
members.organization.admin_role.members.add(admin)
access = LabelAccess(admin)
assert not access.can_change(label, {'organization': no_members.organization.id})
assert access.can_read(label)
assert access.can_change(label, None)
assert access.can_change(label, {'organization': members.organization.id})
assert access.can_delete(label)
@pytest.mark.django_db
def test_label_access_user(label, user):
access = LabelAccess(user('user', False))
label.organization.member_role.members.add(user('user', False))
assert not access.can_add({'organization': None})
assert not access.can_change(label, None)
assert not access.can_delete(label)
assert access.can_read(label)
assert access.can_add({'organization': label.organization.id})
|
en
| 0.932368
|
can_change because I am an admin of that org
| 2.305351
| 2
|
aiodiscover/__init__.py
|
bdraco/aiodiscoverhosts
| 0
|
6627241
|
<filename>aiodiscover/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for Async Host discovery."""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Do not edit this string manually, always use bumpversion
# Details in CONTRIBUTING.md
__version__ = "1.4.5"
from .discovery import DiscoverHosts # noqa: F401
def get_module_version():
return __version__
|
<filename>aiodiscover/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for Async Host discovery."""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
# Do not edit this string manually, always use bumpversion
# Details in CONTRIBUTING.md
__version__ = "1.4.5"
from .discovery import DiscoverHosts # noqa: F401
def get_module_version():
return __version__
|
en
| 0.697527
|
# -*- coding: utf-8 -*- Top-level package for Async Host discovery. # Do not edit this string manually, always use bumpversion # Details in CONTRIBUTING.md # noqa: F401
| 1.259166
| 1
|
airflow/include/spark_template.py
|
alecuba16/sdggroup_technical_test_spark_kafka_airflow
| 0
|
6627242
|
<reponame>alecuba16/sdggroup_technical_test_spark_kafka_airflow
import sys, csv
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit,array_except,array,when,array_except,current_timestamp
from pyspark.sql.types import StringType
spark = SparkSession.builder.appName("spark_demo").getOrCreate()
|
import sys, csv
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit,array_except,array,when,array_except,current_timestamp
from pyspark.sql.types import StringType
spark = SparkSession.builder.appName("spark_demo").getOrCreate()
|
none
| 1
| 2.240636
| 2
|
|
tools/create_yml.py
|
rai-project/mxnet
| 7
|
6627243
|
<gh_stars>1-10
#!env python
import os
import subprocess
import shutil
import hashlib
from ruamel.yaml import YAML
term_dict = {'ssd': 'SSD', 'rcnn': 'RCNN', 'resnet': 'ResNet', 'resnet50': 'ResNet50', 'resnet101': 'ResNet101',
'nas': 'NAS', 'coco': 'COCO', 'coco14': 'COCO14', 'rfcn': 'RFCN', 'mobilenet': 'MobileNet',
'ssdlite': 'SSDLite', 'fpn': 'FPN', 'ppn': 'PPN'}
# use md5sum for graph file checksum
hash_md5 = hashlib.md5()
yaml = YAML()
yaml.default_flow_style = False
# sample yml as the base and load the structures inside the sample_yml
yml_dir = os.path.join(os.getcwd(), "../builtin_models")
sample_complete_name = 'ssd_mobilenet_v2_coco_2018_03_29'
sample_pretty_name = 'SSD_MobileNet_v2_COCO'
sample_yml = os.path.join(yml_dir, sample_pretty_name + ".yml")
with open(sample_yml, 'r') as stream:
yml_data = yaml.load(stream)
# Get the clean name of each model
model_paths = subprocess.check_output(
"ls -d detectionModelZoo/*/", shell=True).decode("utf-8")
model_paths = model_paths.split('\n')[:-1]
model_names = []
pretty_names = []
for i, model_path in enumerate(model_paths):
model_name = model_path.split("/")[1]
model_names.append(model_name)
terms = model_name.split("_")[:-3]
for i in range(len(terms)):
if terms[i] in term_dict.keys():
terms[i] = term_dict[terms[i]]
elif terms[i][0] == 'v':
continue
else:
terms[i] = terms[i].capitalize()
pretty_names.append('_'.join(terms))
# for i in range(len(model_paths)):
# print(model_paths[i])
# print(pretty_names[i])
last_model_name = sample_complete_name
for model_path, complete_name, pretty_name in zip(model_paths, model_names, pretty_names):
# if the yml file already existed, continue to the next one
new_yml = os.path.join(yml_dir, pretty_name + ".yml")
if os.path.isfile(new_yml):
print(pretty_name, "has already existed")
continue
else:
print('Creating', pretty_name)
# generate checksum with the model_path
graph = os.path.join(model_path, 'frozen_inference_graph.pb')
with open(graph, 'rb') as g:
graph_bytes = g.read()
checksum = hashlib.md5(graph_bytes).hexdigest()
print(checksum)
print(pretty_name)
print(complete_name)
# fill out the new yml file with model_name, pretty_name and checksum
yml_data['name'] = pretty_name
yml_data['description'] = yml_data['description'].replace(
last_model_name, complete_name)
yml_data['model']['graph_path'] = yml_data['model']['graph_path'].replace(
last_model_name, complete_name)
yml_data['model']['graph_checksum'] = checksum
yml_data['attributes']['manifest_author'] = '<NAME>'
last_model_name = complete_name
# Uncomment for sanity check
# for key, item in yml_data.items():
# print(key, ":", item)
with open(new_yml, 'w') as f:
yaml.dump(yml_data, f)
|
#!env python
import os
import subprocess
import shutil
import hashlib
from ruamel.yaml import YAML
term_dict = {'ssd': 'SSD', 'rcnn': 'RCNN', 'resnet': 'ResNet', 'resnet50': 'ResNet50', 'resnet101': 'ResNet101',
'nas': 'NAS', 'coco': 'COCO', 'coco14': 'COCO14', 'rfcn': 'RFCN', 'mobilenet': 'MobileNet',
'ssdlite': 'SSDLite', 'fpn': 'FPN', 'ppn': 'PPN'}
# use md5sum for graph file checksum
hash_md5 = hashlib.md5()
yaml = YAML()
yaml.default_flow_style = False
# sample yml as the base and load the structures inside the sample_yml
yml_dir = os.path.join(os.getcwd(), "../builtin_models")
sample_complete_name = 'ssd_mobilenet_v2_coco_2018_03_29'
sample_pretty_name = 'SSD_MobileNet_v2_COCO'
sample_yml = os.path.join(yml_dir, sample_pretty_name + ".yml")
with open(sample_yml, 'r') as stream:
yml_data = yaml.load(stream)
# Get the clean name of each model
model_paths = subprocess.check_output(
"ls -d detectionModelZoo/*/", shell=True).decode("utf-8")
model_paths = model_paths.split('\n')[:-1]
model_names = []
pretty_names = []
for i, model_path in enumerate(model_paths):
model_name = model_path.split("/")[1]
model_names.append(model_name)
terms = model_name.split("_")[:-3]
for i in range(len(terms)):
if terms[i] in term_dict.keys():
terms[i] = term_dict[terms[i]]
elif terms[i][0] == 'v':
continue
else:
terms[i] = terms[i].capitalize()
pretty_names.append('_'.join(terms))
# for i in range(len(model_paths)):
# print(model_paths[i])
# print(pretty_names[i])
last_model_name = sample_complete_name
for model_path, complete_name, pretty_name in zip(model_paths, model_names, pretty_names):
# if the yml file already existed, continue to the next one
new_yml = os.path.join(yml_dir, pretty_name + ".yml")
if os.path.isfile(new_yml):
print(pretty_name, "has already existed")
continue
else:
print('Creating', pretty_name)
# generate checksum with the model_path
graph = os.path.join(model_path, 'frozen_inference_graph.pb')
with open(graph, 'rb') as g:
graph_bytes = g.read()
checksum = hashlib.md5(graph_bytes).hexdigest()
print(checksum)
print(pretty_name)
print(complete_name)
# fill out the new yml file with model_name, pretty_name and checksum
yml_data['name'] = pretty_name
yml_data['description'] = yml_data['description'].replace(
last_model_name, complete_name)
yml_data['model']['graph_path'] = yml_data['model']['graph_path'].replace(
last_model_name, complete_name)
yml_data['model']['graph_checksum'] = checksum
yml_data['attributes']['manifest_author'] = '<NAME>'
last_model_name = complete_name
# Uncomment for sanity check
# for key, item in yml_data.items():
# print(key, ":", item)
with open(new_yml, 'w') as f:
yaml.dump(yml_data, f)
|
en
| 0.738398
|
#!env python # use md5sum for graph file checksum # sample yml as the base and load the structures inside the sample_yml # Get the clean name of each model # for i in range(len(model_paths)): # print(model_paths[i]) # print(pretty_names[i]) # if the yml file already existed, continue to the next one # generate checksum with the model_path # fill out the new yml file with model_name, pretty_name and checksum # Uncomment for sanity check # for key, item in yml_data.items(): # print(key, ":", item)
| 2.180827
| 2
|
django/contrib/gis/tests/test_geoforms.py
|
kix/django
| 790
|
6627244
|
<gh_stars>100-1000
from django.forms import ValidationError
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import HAS_SPATIALREFSYS
from django.utils import unittest
if HAS_SPATIALREFSYS:
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
@unittest.skipUnless(HAS_GDAL and HAS_SPATIALREFSYS, "GeometryFieldTest needs gdal support and a spatial database")
class GeometryFieldTest(unittest.TestCase):
def test00_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test01_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test02_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
self.assertRaises(forms.ValidationError, fld.clean, None)
# Still not allowed if `null=False`.
fld = forms.GeometryField(required=False, null=False)
self.assertRaises(forms.ValidationError, fld.clean, None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertEqual(None, fld.clean(None))
def test03_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test04_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeometryFieldTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
|
from django.forms import ValidationError
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import HAS_SPATIALREFSYS
from django.utils import unittest
if HAS_SPATIALREFSYS:
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
@unittest.skipUnless(HAS_GDAL and HAS_SPATIALREFSYS, "GeometryFieldTest needs gdal support and a spatial database")
class GeometryFieldTest(unittest.TestCase):
def test00_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test01_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test02_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
self.assertRaises(forms.ValidationError, fld.clean, None)
# Still not allowed if `null=False`.
fld = forms.GeometryField(required=False, null=False)
self.assertRaises(forms.ValidationError, fld.clean, None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertEqual(None, fld.clean(None))
def test03_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test04_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeometryFieldTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
|
en
| 0.809754
|
# Input that doesn't specify the SRID is assumed to be in the SRID # of the input field. # Making the field in a different SRID from that of the geometry, and # asserting it transforms. # The cleaned geometry should be transformed to 32140. # Form fields, by default, are required (`required=True`) # Still not allowed if `null=False`. # This will clean None as a geometry (See #10660). # By default, all geometry types are allowed. # a WKT for any other geom_type will be properly transformed by `to_python` # but rejected by `clean` Testing to_python returns a correct GEOSGeometry object or a ValidationError # to_python returns the same GEOSGeometry for a WKT # but raises a ValidationError for any other string
| 2.172057
| 2
|
FirstCommit/venv/Firstcommit.py
|
Cyphersss/LearnGit
| 0
|
6627245
|
<reponame>Cyphersss/LearnGit
print('MYFirstCommit')
|
print('MYFirstCommit')
|
none
| 1
| 0.809589
| 1
|
|
zero/drivers/opengl/__init__.py
|
jsa4000/OpenGL-Python
| 0
|
6627246
|
<filename>zero/drivers/opengl/__init__.py
from __future__ import absolute_import, division, print_function
from .buffer import OpenGLBuffer
from .texture import OpenGLTexture
from .shader import OpenGLShader
from .render import OpenGLRender
|
<filename>zero/drivers/opengl/__init__.py
from __future__ import absolute_import, division, print_function
from .buffer import OpenGLBuffer
from .texture import OpenGLTexture
from .shader import OpenGLShader
from .render import OpenGLRender
|
none
| 1
| 1.068476
| 1
|
|
reroot_at_edge.py
|
merve-kilic/MinVar-Rooting
| 0
|
6627247
|
<filename>reroot_at_edge.py<gh_stars>0
#! /usr/bin/env python
# usage: python reroot_at_edge.py <tree_file> <head_node> <d2head> <out_file>
from Tree_extend import Tree_extend
#from sys import argv
from os.path import splitext
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input',required=True,help="input file")
parser.add_argument('-n','--hnode',required=True,help="the label of the head (child) node)")
parser.add_argument('-d','--distance',required=True,help="distance of the new root to head (child) node)")
parser.add_argument('-o','--outfile',required=False,help="specify output file")
args = vars(parser.parse_args())
tree_file = args["input"]
head = args["hnode"]
x = float(args["distance"])
if args["outfile"]:
out_file = args["outfile"]
else:
out_file = None
base_name,ext = splitext(tree_file)
a_tree = Tree_extend(tree_file=tree_file)
for edge in a_tree.ddpTree.preorder_edge_iter():
if (edge.head_node.label == head) or (edge.head_node.is_leaf() and edge.head_node.label == head):
if (edge is not None) and edge.length:
a_tree.reroot_at_edge(edge,edge.length-x,x)
break
a_tree.tree_as_newick(outfile=out_file)
|
<filename>reroot_at_edge.py<gh_stars>0
#! /usr/bin/env python
# usage: python reroot_at_edge.py <tree_file> <head_node> <d2head> <out_file>
from Tree_extend import Tree_extend
#from sys import argv
from os.path import splitext
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input',required=True,help="input file")
parser.add_argument('-n','--hnode',required=True,help="the label of the head (child) node)")
parser.add_argument('-d','--distance',required=True,help="distance of the new root to head (child) node)")
parser.add_argument('-o','--outfile',required=False,help="specify output file")
args = vars(parser.parse_args())
tree_file = args["input"]
head = args["hnode"]
x = float(args["distance"])
if args["outfile"]:
out_file = args["outfile"]
else:
out_file = None
base_name,ext = splitext(tree_file)
a_tree = Tree_extend(tree_file=tree_file)
for edge in a_tree.ddpTree.preorder_edge_iter():
if (edge.head_node.label == head) or (edge.head_node.is_leaf() and edge.head_node.label == head):
if (edge is not None) and edge.length:
a_tree.reroot_at_edge(edge,edge.length-x,x)
break
a_tree.tree_as_newick(outfile=out_file)
|
en
| 0.265939
|
#! /usr/bin/env python # usage: python reroot_at_edge.py <tree_file> <head_node> <d2head> <out_file> #from sys import argv
| 2.751808
| 3
|
snake_engine/engine.py
|
JohnBee/SnakeAi
| 0
|
6627248
|
<gh_stars>0
from random import randint
class Engine:
def __init__(self, world_width=10, world_height=10):
'''
Initialise the snake engine where all game operations will take place.
:param world_width: Width of the game world the snake should roam
:param world_height: Height of the game world the snake should roam
'''
self.world_width = world_width
self.world_height = world_height
self.food = []
## Initialise the snake
self.snake = Snake(world_width // 2, world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def reset(self):
self.food = []
## Initialise the snake
self.snake = Snake(self.world_width // 2, self.world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def make_move(self, input_move):
old_head = (self.snake.head[0], self.snake.head[1])
if input_move == 0:
self.snake.move_forward(self)
elif input_move == 1:
self.snake.turn_left(self)
elif input_move == 2:
self.snake.turn_right(self)
# add food if it's been eaten
reward = 0
if not self.food:
self.score += 1
reward += 10
if not self.add_food():
self.game_end
# return reward for making this move
# if closer to food, increase reward, else decrease
new_head = (self.snake.head[0], self.snake.head[1])
food = (self.food[0][0], self.food[0][1])
# taxicab geometry
old_dist = abs(food[0] - old_head[0]) + abs(food[1] - old_head[1])
new_dist = abs(food[0] - new_head[0]) + abs(food[1] - new_head[1])
if new_dist < old_dist:
reward += 1
else:
reward -= 1
return reward
def export_game_state(self):
'''
Exports the game state
:return: a dictionary with set values representing the game state
'''
return {"score": self.score,
"world_width": self.world_width,
"world_height": self.world_height,
"food": self.food,
"snake_direction": self.snake.direction,
"snake_body": self.snake.body,
"snake_head": self.snake.head,
"snake_size": self.snake.length,
"game_end": self.game_end}
def import_game_state(self, game_state):
'''
Import a game state to load
:param game_state: a dictionary with the defined
:return: True or false depending on if it was successful in loading the game state
'''
try:
self.score = game_state["score"]
self.world_width = game_state["world_width"]
self.world_height = game_state["world_height"]
self.food = game_state["food"]
self.snake.body = game_state["snake_body"]
self.snake.head = game_state["snake_head"]
self.snake.direction = game_state["snake_direction"]
self.snake.length = game_state["snake_length"]
self.game_end = game_state["game_end"]
except KeyError as error:
print("Missing game state argument!")
print(error)
return False
return True
def add_food(self):
'''
Add food to the game world, possible locations are only where the snake isn't
:return: True or False depending if food was able to be added, if it false then the game must be complete.
'''
possible_locations = [(x, y) for x in range(self.world_width) for y in range(self.world_height)]
for s_not_possible in self.snake.body + self.food:
if s_not_possible in possible_locations:
possible_locations.remove(s_not_possible)
if not possible_locations:
return False
else:
# select a possible location
self.food.append(possible_locations[randint(0, len(possible_locations) - 1)])
return True
def output_world(self):
'''
Output the game world as a list of list of characters to the parsed by AI or printed
:return:
'''
out = []
for y in range(self.world_height):
out.append([])
for x in range(self.world_width):
if (x, y) not in self.food and (x, y) not in self.snake.body:
out[-1].append(".")
elif (x, y) in self.food:
out[-1].append("o")
elif (x, y) in self.snake.body:
if (x, y) == self.snake.body[0]:
if self.snake.direction == 0:
out[-1].append(">")
if self.snake.direction == 1:
out[-1].append("^")
if self.snake.direction == 2:
out[-1].append("<")
if self.snake.direction == 3:
out[-1].append("v")
else:
out[-1].append("#")
return out
def pretty_print_world(self):
for y in self.output_world():
print(" ".join(y))
class Snake:
def __init__(self, pos_x=0, pos_y=0, length=3):
# init tail of given length
self.head = (pos_x, pos_y)
self.direction = 0 # 0 = right, 1 = up, 2 = left, 3 = down
self.length = length
self.body = self.gen_tail(pos_x, pos_y, self.length)
def turn_left(self, engine):
self.direction = (self.direction + 1) % 4
self.move_forward(engine)
def turn_right(self, engine):
self.direction = (self.direction - 1) % 4
self.move_forward(engine)
def move_forward(self, engine):
if self.direction == 0:
self.body = [(self.head[0] + 1, self.head[1])] + self.body
elif self.direction == 1:
self.body = [(self.head[0], self.head[1] - 1)] + self.body
elif self.direction == 2:
self.body = [(self.head[0] - 1, self.head[1])] + self.body
elif self.direction == 3:
self.body = [(self.head[0], self.head[1] + 1)] + self.body
# check if head not on food then don't increase snake length
if self.body[0] not in engine.food:
self.body.pop()
else:
# eat the food
engine.food.remove(self.body[0])
engine.score += 1
self.head = self.body[0]
# check if dead
if len([a for a in self.body if a == self.head]) > 1:
engine.game_end = True
if self.head[1] < 0 or self.head[0] < 0 or self.head[0] >= engine.world_width or self.head[1] >= engine.world_height:
engine.game_end = True
@staticmethod
def gen_tail(head_x, head_y, length=3):
return [(x, head_y) for x in range(head_x, head_x - length, -1)]
def play_game():
e = Engine()
# draw game state
while not e.game_end:
e.pretty_print_world()
move = None
while move is None:
move = int(input("Enter 0 or 1 or 2 for no change, turn left or turn right: "))
if move in [0,1,2]:
e.make_move(move)
else:
print(f"Invalid move: {move}")
move = None
if __name__ == "__main__":
play_game()
|
from random import randint
class Engine:
def __init__(self, world_width=10, world_height=10):
'''
Initialise the snake engine where all game operations will take place.
:param world_width: Width of the game world the snake should roam
:param world_height: Height of the game world the snake should roam
'''
self.world_width = world_width
self.world_height = world_height
self.food = []
## Initialise the snake
self.snake = Snake(world_width // 2, world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def reset(self):
self.food = []
## Initialise the snake
self.snake = Snake(self.world_width // 2, self.world_height // 2, 4)
self.score = 0
self.game_end = False
# place the first piece of food
self.add_food()
def make_move(self, input_move):
old_head = (self.snake.head[0], self.snake.head[1])
if input_move == 0:
self.snake.move_forward(self)
elif input_move == 1:
self.snake.turn_left(self)
elif input_move == 2:
self.snake.turn_right(self)
# add food if it's been eaten
reward = 0
if not self.food:
self.score += 1
reward += 10
if not self.add_food():
self.game_end
# return reward for making this move
# if closer to food, increase reward, else decrease
new_head = (self.snake.head[0], self.snake.head[1])
food = (self.food[0][0], self.food[0][1])
# taxicab geometry
old_dist = abs(food[0] - old_head[0]) + abs(food[1] - old_head[1])
new_dist = abs(food[0] - new_head[0]) + abs(food[1] - new_head[1])
if new_dist < old_dist:
reward += 1
else:
reward -= 1
return reward
def export_game_state(self):
'''
Exports the game state
:return: a dictionary with set values representing the game state
'''
return {"score": self.score,
"world_width": self.world_width,
"world_height": self.world_height,
"food": self.food,
"snake_direction": self.snake.direction,
"snake_body": self.snake.body,
"snake_head": self.snake.head,
"snake_size": self.snake.length,
"game_end": self.game_end}
def import_game_state(self, game_state):
'''
Import a game state to load
:param game_state: a dictionary with the defined
:return: True or false depending on if it was successful in loading the game state
'''
try:
self.score = game_state["score"]
self.world_width = game_state["world_width"]
self.world_height = game_state["world_height"]
self.food = game_state["food"]
self.snake.body = game_state["snake_body"]
self.snake.head = game_state["snake_head"]
self.snake.direction = game_state["snake_direction"]
self.snake.length = game_state["snake_length"]
self.game_end = game_state["game_end"]
except KeyError as error:
print("Missing game state argument!")
print(error)
return False
return True
def add_food(self):
'''
Add food to the game world, possible locations are only where the snake isn't
:return: True or False depending if food was able to be added, if it false then the game must be complete.
'''
possible_locations = [(x, y) for x in range(self.world_width) for y in range(self.world_height)]
for s_not_possible in self.snake.body + self.food:
if s_not_possible in possible_locations:
possible_locations.remove(s_not_possible)
if not possible_locations:
return False
else:
# select a possible location
self.food.append(possible_locations[randint(0, len(possible_locations) - 1)])
return True
def output_world(self):
'''
Output the game world as a list of list of characters to the parsed by AI or printed
:return:
'''
out = []
for y in range(self.world_height):
out.append([])
for x in range(self.world_width):
if (x, y) not in self.food and (x, y) not in self.snake.body:
out[-1].append(".")
elif (x, y) in self.food:
out[-1].append("o")
elif (x, y) in self.snake.body:
if (x, y) == self.snake.body[0]:
if self.snake.direction == 0:
out[-1].append(">")
if self.snake.direction == 1:
out[-1].append("^")
if self.snake.direction == 2:
out[-1].append("<")
if self.snake.direction == 3:
out[-1].append("v")
else:
out[-1].append("#")
return out
def pretty_print_world(self):
for y in self.output_world():
print(" ".join(y))
class Snake:
def __init__(self, pos_x=0, pos_y=0, length=3):
# init tail of given length
self.head = (pos_x, pos_y)
self.direction = 0 # 0 = right, 1 = up, 2 = left, 3 = down
self.length = length
self.body = self.gen_tail(pos_x, pos_y, self.length)
def turn_left(self, engine):
self.direction = (self.direction + 1) % 4
self.move_forward(engine)
def turn_right(self, engine):
self.direction = (self.direction - 1) % 4
self.move_forward(engine)
def move_forward(self, engine):
if self.direction == 0:
self.body = [(self.head[0] + 1, self.head[1])] + self.body
elif self.direction == 1:
self.body = [(self.head[0], self.head[1] - 1)] + self.body
elif self.direction == 2:
self.body = [(self.head[0] - 1, self.head[1])] + self.body
elif self.direction == 3:
self.body = [(self.head[0], self.head[1] + 1)] + self.body
# check if head not on food then don't increase snake length
if self.body[0] not in engine.food:
self.body.pop()
else:
# eat the food
engine.food.remove(self.body[0])
engine.score += 1
self.head = self.body[0]
# check if dead
if len([a for a in self.body if a == self.head]) > 1:
engine.game_end = True
if self.head[1] < 0 or self.head[0] < 0 or self.head[0] >= engine.world_width or self.head[1] >= engine.world_height:
engine.game_end = True
@staticmethod
def gen_tail(head_x, head_y, length=3):
return [(x, head_y) for x in range(head_x, head_x - length, -1)]
def play_game():
e = Engine()
# draw game state
while not e.game_end:
e.pretty_print_world()
move = None
while move is None:
move = int(input("Enter 0 or 1 or 2 for no change, turn left or turn right: "))
if move in [0,1,2]:
e.make_move(move)
else:
print(f"Invalid move: {move}")
move = None
if __name__ == "__main__":
play_game()
|
en
| 0.886024
|
Initialise the snake engine where all game operations will take place. :param world_width: Width of the game world the snake should roam :param world_height: Height of the game world the snake should roam ## Initialise the snake # place the first piece of food ## Initialise the snake # place the first piece of food # add food if it's been eaten # return reward for making this move # if closer to food, increase reward, else decrease # taxicab geometry Exports the game state :return: a dictionary with set values representing the game state Import a game state to load :param game_state: a dictionary with the defined :return: True or false depending on if it was successful in loading the game state Add food to the game world, possible locations are only where the snake isn't :return: True or False depending if food was able to be added, if it false then the game must be complete. # select a possible location Output the game world as a list of list of characters to the parsed by AI or printed :return: # init tail of given length # 0 = right, 1 = up, 2 = left, 3 = down # check if head not on food then don't increase snake length # eat the food # check if dead # draw game state
| 3.734424
| 4
|
lang/py/test/test_tether_task.py
|
minond/avro
| 1
|
6627249
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
import unittest
import set_avro_test_path
class TestTetherTask(unittest.TestCase):
"""
TODO: We should validate the the server response by looking at stdout
"""
def test1(self):
"""
Test that the thether_task is working. We run the mock_tether_parent in a separate
subprocess
"""
from avro import tether
from avro import io as avio
from avro import schema
from avro.tether import HTTPRequestor,inputProtocol, find_port
import StringIO
import mock_tether_parent
from word_count_task import WordCountTask
task=WordCountTask()
proc=None
try:
# launch the server in a separate process
# env["AVRO_TETHER_OUTPUT_PORT"]=output_port
env=dict()
env["PYTHONPATH"]=':'.join(sys.path)
server_port=find_port()
pyfile=mock_tether_parent.__file__
proc=subprocess.Popen(["python", pyfile,"start_server","{0}".format(server_port)])
input_port=find_port()
print "Mock server started process pid={0}".format(proc.pid)
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
task.open(input_port,clientPort=server_port)
# TODO: We should validate that open worked by grabbing the STDOUT of the subproces
# and ensuring that it outputted the correct message.
#***************************************************************
# Test the mapper
task.configure(tether.TaskType.MAP,str(task.inschema),str(task.midschema))
# Serialize some data so we can send it to the input function
datum="This is a line of text"
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.inschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling map
task.input(data,1)
# Test the reducer
task.configure(tether.TaskType.REDUCE,str(task.midschema),str(task.outschema))
# Serialize some data so we can send it to the input function
datum={"key":"word","value":2}
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.midschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling reduce
task.input(data,1)
task.complete()
# try a status
task.status("Status message")
except Exception as e:
raise
finally:
# close the process
if not(proc is None):
proc.kill()
pass
if __name__ == '__main__':
unittest.main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
import unittest
import set_avro_test_path
class TestTetherTask(unittest.TestCase):
"""
TODO: We should validate the the server response by looking at stdout
"""
def test1(self):
"""
Test that the thether_task is working. We run the mock_tether_parent in a separate
subprocess
"""
from avro import tether
from avro import io as avio
from avro import schema
from avro.tether import HTTPRequestor,inputProtocol, find_port
import StringIO
import mock_tether_parent
from word_count_task import WordCountTask
task=WordCountTask()
proc=None
try:
# launch the server in a separate process
# env["AVRO_TETHER_OUTPUT_PORT"]=output_port
env=dict()
env["PYTHONPATH"]=':'.join(sys.path)
server_port=find_port()
pyfile=mock_tether_parent.__file__
proc=subprocess.Popen(["python", pyfile,"start_server","{0}".format(server_port)])
input_port=find_port()
print "Mock server started process pid={0}".format(proc.pid)
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
task.open(input_port,clientPort=server_port)
# TODO: We should validate that open worked by grabbing the STDOUT of the subproces
# and ensuring that it outputted the correct message.
#***************************************************************
# Test the mapper
task.configure(tether.TaskType.MAP,str(task.inschema),str(task.midschema))
# Serialize some data so we can send it to the input function
datum="This is a line of text"
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.inschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling map
task.input(data,1)
# Test the reducer
task.configure(tether.TaskType.REDUCE,str(task.midschema),str(task.outschema))
# Serialize some data so we can send it to the input function
datum={"key":"word","value":2}
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.midschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling reduce
task.input(data,1)
task.complete()
# try a status
task.status("Status message")
except Exception as e:
raise
finally:
# close the process
if not(proc is None):
proc.kill()
pass
if __name__ == '__main__':
unittest.main()
|
en
| 0.821786
|
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. TODO: We should validate the the server response by looking at stdout Test that the thether_task is working. We run the mock_tether_parent in a separate subprocess # launch the server in a separate process # env["AVRO_TETHER_OUTPUT_PORT"]=output_port # Possible race condition? open tries to connect to the subprocess before the subprocess is fully started # so we give the subprocess time to start up # TODO: We should validate that open worked by grabbing the STDOUT of the subproces # and ensuring that it outputted the correct message. #*************************************************************** # Test the mapper # Serialize some data so we can send it to the input function # Call input to simulate calling map # Test the reducer # Serialize some data so we can send it to the input function # Call input to simulate calling reduce # try a status # close the process
| 2.219666
| 2
|
intmcp/envs/pe/obs.py
|
RDLLab/i-ntmcp
| 0
|
6627250
|
<reponame>RDLLab/i-ntmcp
"""Observations for the Discrete Persuit Evasion problem
The field of vision for agents looks like this:
***
****
******
>*******
******
****
*
I.e. the width increases by 1 on each side for when distance d:
- d < 3 and d == 1
- d >= 3 and d % 3 == 0
With objects blocking the view
**
# ****#
****#
>****#
**#
"""
from typing import Tuple
from intmcp.model import Observation
from intmcp.envs.pe import grid as grid_lib
WallObs = Tuple[bool, bool, bool, bool]
class PEChaserObs(Observation):
"""A chaser observation for the Discrete Pursuit Evasion problem
Includes an observation of:
- whether or not the runner detected in field of vision
- whether or not runner has been heard (i.e. runner is within Manhattan
distance of 2)
- whether there is a wall in each of the cardinal directions
"""
def __init__(self, walls: WallObs, seen: bool, heard: bool):
self.walls = walls
self.seen = seen
self.heard = heard
def __str__(self):
return f"<{self.walls}, {self.seen}, {self.heard}>"
def __eq__(self, other):
return (
(self.walls, self.seen, self.heard)
== (other.walls, other.seen, other.heard)
)
def __hash__(self):
return hash((self.walls, self.seen, self.heard))
class PERunnerObs(Observation):
"""A Runner observation for the Discrete Pursuit Evasion problem
Includes an observation of:
- runners goal location
- whether or not the chaser detected in field of vision
- whether or not chaser has been heard (i.e. runner is within Manhattan
distance of 2)
- whether there is a wall in each of the cardinal directions
"""
def __init__(self,
walls: WallObs,
seen: bool,
heard: bool,
goal: grid_lib.Loc):
self.walls = walls
self.seen = seen
self.heard = heard
self.goal = goal
def __str__(self):
return f"<{self.walls}, {self.seen}, {self.heard}, {self.goal}>"
def __eq__(self, other):
return (
(self.walls, self.seen, self.heard, self.goal)
== (other.walls, other.seen, other.heard, other.goal)
)
def __hash__(self):
return hash((self.walls, self.seen, self.heard, self.goal))
|
"""Observations for the Discrete Persuit Evasion problem
The field of vision for agents looks like this:
***
****
******
>*******
******
****
*
I.e. the width increases by 1 on each side for when distance d:
- d < 3 and d == 1
- d >= 3 and d % 3 == 0
With objects blocking the view
**
# ****#
****#
>****#
**#
"""
from typing import Tuple
from intmcp.model import Observation
from intmcp.envs.pe import grid as grid_lib
WallObs = Tuple[bool, bool, bool, bool]
class PEChaserObs(Observation):
"""A chaser observation for the Discrete Pursuit Evasion problem
Includes an observation of:
- whether or not the runner detected in field of vision
- whether or not runner has been heard (i.e. runner is within Manhattan
distance of 2)
- whether there is a wall in each of the cardinal directions
"""
def __init__(self, walls: WallObs, seen: bool, heard: bool):
self.walls = walls
self.seen = seen
self.heard = heard
def __str__(self):
return f"<{self.walls}, {self.seen}, {self.heard}>"
def __eq__(self, other):
return (
(self.walls, self.seen, self.heard)
== (other.walls, other.seen, other.heard)
)
def __hash__(self):
return hash((self.walls, self.seen, self.heard))
class PERunnerObs(Observation):
"""A Runner observation for the Discrete Pursuit Evasion problem
Includes an observation of:
- runners goal location
- whether or not the chaser detected in field of vision
- whether or not chaser has been heard (i.e. runner is within Manhattan
distance of 2)
- whether there is a wall in each of the cardinal directions
"""
def __init__(self,
walls: WallObs,
seen: bool,
heard: bool,
goal: grid_lib.Loc):
self.walls = walls
self.seen = seen
self.heard = heard
self.goal = goal
def __str__(self):
return f"<{self.walls}, {self.seen}, {self.heard}, {self.goal}>"
def __eq__(self, other):
return (
(self.walls, self.seen, self.heard, self.goal)
== (other.walls, other.seen, other.heard, other.goal)
)
def __hash__(self):
return hash((self.walls, self.seen, self.heard, self.goal))
|
en
| 0.871265
|
Observations for the Discrete Persuit Evasion problem The field of vision for agents looks like this: *** **** ****** >******* ****** **** * I.e. the width increases by 1 on each side for when distance d: - d < 3 and d == 1 - d >= 3 and d % 3 == 0 With objects blocking the view ** # ****# ****# >****# **# A chaser observation for the Discrete Pursuit Evasion problem Includes an observation of: - whether or not the runner detected in field of vision - whether or not runner has been heard (i.e. runner is within Manhattan distance of 2) - whether there is a wall in each of the cardinal directions A Runner observation for the Discrete Pursuit Evasion problem Includes an observation of: - runners goal location - whether or not the chaser detected in field of vision - whether or not chaser has been heard (i.e. runner is within Manhattan distance of 2) - whether there is a wall in each of the cardinal directions
| 3.477554
| 3
|