hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c319f4f2f2fb60ea565490e1d1a2d9404c76ea7 | 8,074 | py | Python | lib/sqlalchemy/dialects/mysql/oursql.py | edelooff/sqlalchemy | 97d2a2091ed4caee1e19168d0db39e4d94a6d12f | [
"MIT"
] | 2 | 2020-02-19T17:50:50.000Z | 2021-02-10T02:52:41.000Z | lib/sqlalchemy/dialects/mysql/oursql.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | null | null | null | lib/sqlalchemy/dialects/mysql/oursql.py | KonstantinKlepikov/sqlalchemy-1 | 2c34d2503a17316cae3282192405b9b9d60df6fe | [
"MIT"
] | 1 | 2021-06-13T01:55:35.000Z | 2021-06-13T01:55:35.000Z | # mysql/oursql.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
.. note::
The OurSQL MySQL dialect is legacy and is no longer supported upstream,
and is **not tested as part of SQLAlchemy's continuous integration**.
The recommended MySQL dialects are mysqlclient and PyMySQL.
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get("_oursql_plain_query", False)
class MySQLDialect_oursql(MySQLDialect):
driver = "oursql"
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _oursqlBIT}
)
@classmethod
def dbapi(cls):
return __import__("oursql")
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of
*cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute("BEGIN", plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)
).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(
query % arg
)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, "XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA PREPARE %s", xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA ROLLBACK %s", xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, "XA COMMIT %s", xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema,
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema,
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self, connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
return MySQLDialect._show_create_table(
self,
connection.connect(
close_with_result=True
).execution_options(_oursql_plain_query=True),
table,
charset,
full_name,
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return (
e.errno is None
and "cursor" not in e.args[1]
and e.args[1].endswith("closed")
)
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "port", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "autoping", bool)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "default_charset", bool)
if opts.pop("default_charset", False):
opts["charset"] = None
else:
util.coerce_kw_type(opts, "charset", str)
opts["use_unicode"] = opts.get("use_unicode", True)
util.coerce_kw_type(opts, "use_unicode", bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault("found_rows", True)
ssl = {}
for key in [
"ssl_ca",
"ssl_key",
"ssl_cert",
"ssl_capath",
"ssl_cipher",
]:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
return [[], opts]
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| 30.69962 | 78 | 0.624845 |
from .base import BIT
from .base import MySQLDialect
from .base import MySQLExecutionContext
from ... import types as sqltypes
from ... import util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get("_oursql_plain_query", False)
class MySQLDialect_oursql(MySQLDialect):
driver = "oursql"
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs, {sqltypes.Time: sqltypes.Time, BIT: _oursqlBIT}
)
@classmethod
def dbapi(cls):
return __import__("oursql")
def do_execute(self, cursor, statement, parameters, context=None):
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute("BEGIN", plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)
).decode(charset)
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(
query % arg
)
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, "XA BEGIN %s", xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA PREPARE %s", xid)
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self._xa_query(connection, "XA END %s", xid)
self._xa_query(connection, "XA ROLLBACK %s", xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, "XA COMMIT %s", xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema,
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema,
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self, connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(
self, connection, table, charset=None, full_name=None
):
return MySQLDialect._show_create_table(
self,
connection.connect(
close_with_result=True
).execution_options(_oursql_plain_query=True),
table,
charset,
full_name,
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return (
e.errno is None
and "cursor" not in e.args[1]
and e.args[1].endswith("closed")
)
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(
database="db", username="user", password="passwd"
)
opts.update(url.query)
util.coerce_kw_type(opts, "port", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "autoping", bool)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "default_charset", bool)
if opts.pop("default_charset", False):
opts["charset"] = None
else:
util.coerce_kw_type(opts, "charset", str)
opts["use_unicode"] = opts.get("use_unicode", True)
util.coerce_kw_type(opts, "use_unicode", bool)
opts.setdefault("found_rows", True)
ssl = {}
for key in [
"ssl_ca",
"ssl_key",
"ssl_cert",
"ssl_capath",
"ssl_cipher",
]:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts["ssl"] = ssl
return [[], opts]
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| true | true |
1c319f8d1ab140c228b3c1a5c4b193ef76c61214 | 409 | py | Python | feder/main/views.py | efefre/feder | fdfe2f213266548fc40cea68ac72f739c8394b8e | [
"MIT"
] | null | null | null | feder/main/views.py | efefre/feder | fdfe2f213266548fc40cea68ac72f739c8394b8e | [
"MIT"
] | null | null | null | feder/main/views.py | efefre/feder | fdfe2f213266548fc40cea68ac72f739c8394b8e | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
from feder.monitorings.models import Monitoring
class HomeView(TemplateView):
template_name = "main/home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["monitoring_list"] = (
Monitoring.objects.only_public().order_by("-created").all()[:16]
)
return context
| 27.266667 | 76 | 0.679707 | from django.views.generic import TemplateView
from feder.monitorings.models import Monitoring
class HomeView(TemplateView):
template_name = "main/home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["monitoring_list"] = (
Monitoring.objects.only_public().order_by("-created").all()[:16]
)
return context
| true | true |
1c319fcd462d92bc0a42f8dafa4cea5196fd6d6e | 401 | py | Python | blackjack/game/hand.py | fdpeiter/Blackjack21 | 8a43edd24900000afd771d3dab601437c4a06893 | [
"MIT"
] | null | null | null | blackjack/game/hand.py | fdpeiter/Blackjack21 | 8a43edd24900000afd771d3dab601437c4a06893 | [
"MIT"
] | null | null | null | blackjack/game/hand.py | fdpeiter/Blackjack21 | 8a43edd24900000afd771d3dab601437c4a06893 | [
"MIT"
] | 1 | 2021-12-03T17:28:44.000Z | 2021-12-03T17:28:44.000Z | values = {
"Two": 2,
"Three": 3,
"Four": 4,
"Five": 5,
"Six": 6,
"Seven": 7,
"Eight": 8,
"Nine": 9,
"Ten": 10,
"Jack": 10,
"Queen": 10,
"King": 10,
"Ace": 11,
}
class Hand:
def __init__(self):
self.cards = []
self.value = 0
def add_card(self, card):
self.cards.append(card)
self.value += values[card.rank]
| 15.423077 | 39 | 0.448878 | values = {
"Two": 2,
"Three": 3,
"Four": 4,
"Five": 5,
"Six": 6,
"Seven": 7,
"Eight": 8,
"Nine": 9,
"Ten": 10,
"Jack": 10,
"Queen": 10,
"King": 10,
"Ace": 11,
}
class Hand:
def __init__(self):
self.cards = []
self.value = 0
def add_card(self, card):
self.cards.append(card)
self.value += values[card.rank]
| true | true |
1c31a0a69f4670d7ddfa73dd10fc53c4461d6164 | 365 | py | Python | CursoEmVideo/ex114.py | EduardoArgenti/Python | 18b4578033d6eb6fb0ae2d6a8511a4a813856203 | [
"MIT"
] | null | null | null | CursoEmVideo/ex114.py | EduardoArgenti/Python | 18b4578033d6eb6fb0ae2d6a8511a4a813856203 | [
"MIT"
] | null | null | null | CursoEmVideo/ex114.py | EduardoArgenti/Python | 18b4578033d6eb6fb0ae2d6a8511a4a813856203 | [
"MIT"
] | null | null | null | # Crie um código em Python que teste se o site pudim está
# acessível pelo computador usado.
import urllib
import urllib.request
try:
site = urllib.request.urlopen('http://www.pudim.com.br')
except urllib.error.URLError:
print('\033[1:31mErro: site inacessível.\033[0m')
else:
print('\033[1:32mSucesso: site disponível.\033[0m')
print(site.read()) | 28.076923 | 60 | 0.720548 |
import urllib
import urllib.request
try:
site = urllib.request.urlopen('http://www.pudim.com.br')
except urllib.error.URLError:
print('\033[1:31mErro: site inacessível.\033[0m')
else:
print('\033[1:32mSucesso: site disponível.\033[0m')
print(site.read()) | true | true |
1c31a15b5a6c2342045cc4fe641cdbe7458a338c | 4,261 | py | Python | scrapy/linkextractors/lxmlhtml.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | 2 | 2015-05-27T02:06:18.000Z | 2015-05-27T02:06:37.000Z | scrapy/linkextractors/lxmlhtml.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | null | null | null | scrapy/linkextractors/lxmlhtml.py | michaelgilmore/scrapy | 5a2a6bf6fc8861f00c0875659db11ba4d72406cd | [
"BSD-3-Clause"
] | 1 | 2020-11-01T20:40:01.000Z | 2020-11-01T20:40:01.000Z | """
Link extractor based on lxml.html
"""
import re
from six.moves.urllib.parse import urlparse, urljoin
import lxml.etree as etree
from scrapy.selector import Selector
from scrapy.link import Link
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import unique as unique_list, str_to_unicode
from scrapy.linkextractors import FilteringLinkExtractor
from scrapy.utils.response import get_base_url
# from lxml/src/lxml/html/__init__.py
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag):
if isinstance(tag, basestring):
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
return tag.split('}')[-1]
return tag
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _iter_links(self, document):
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector._root):
# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
attr_val = urljoin(base_url, attr_val)
url = self.process_attr(attr_val)
if url is None:
continue
if isinstance(url, unicode):
url = url.encode(response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(url, _collect_string_content(el) or u'',
nofollow=True if el.get('rel') == 'nofollow' else False)
links.append(link)
return unique_list(links, key=lambda link: link.url) \
if self.unique else links
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
return self._extract_links(html, response.url, response.encoding, base_url)
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if neccessary
"""
links = unique_list(links, key=lambda link: link.url) if self.unique else links
return links
class LxmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True,
unique=True, process_value=None, deny_extensions=None, restrict_css=()):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process=process_value)
super(LxmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions)
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [subdoc
for x in self.restrict_xpaths
for subdoc in html.xpath(x)]
else:
docs = [html]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
return unique_list(all_links)
| 38.044643 | 96 | 0.64445 |
import re
from six.moves.urllib.parse import urlparse, urljoin
import lxml.etree as etree
from scrapy.selector import Selector
from scrapy.link import Link
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import unique as unique_list, str_to_unicode
from scrapy.linkextractors import FilteringLinkExtractor
from scrapy.utils.response import get_base_url
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag):
if isinstance(tag, basestring):
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
return tag.split('}')[-1]
return tag
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _iter_links(self, document):
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
for el, attr, attr_val in self._iter_links(selector._root):
attr_val = urljoin(base_url, attr_val)
url = self.process_attr(attr_val)
if url is None:
continue
if isinstance(url, unicode):
url = url.encode(response_encoding)
url = urljoin(response_url, url)
link = Link(url, _collect_string_content(el) or u'',
nofollow=True if el.get('rel') == 'nofollow' else False)
links.append(link)
return unique_list(links, key=lambda link: link.url) \
if self.unique else links
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
return self._extract_links(html, response.url, response.encoding, base_url)
def _process_links(self, links):
links = unique_list(links, key=lambda link: link.url) if self.unique else links
return links
class LxmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=True,
unique=True, process_value=None, deny_extensions=None, restrict_css=()):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
lx = LxmlParserLinkExtractor(tag=tag_func, attr=attr_func,
unique=unique, process=process_value)
super(LxmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions)
def extract_links(self, response):
html = Selector(response)
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [subdoc
for x in self.restrict_xpaths
for subdoc in html.xpath(x)]
else:
docs = [html]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
return unique_list(all_links)
| true | true |
1c31a18a0617523c1bb28a5dc58ba367b22931b1 | 596 | py | Python | idea/migrations/0003_auto_20170118_0445.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | idea/migrations/0003_auto_20170118_0445.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | idea/migrations/0003_auto_20170118_0445.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-18 01:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('idea', '0002_auto_20170118_0359'),
]
operations = [
migrations.AlterField(
model_name='ideauser',
name='idea_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='ideauser',
name='idea_text',
field=models.TextField(),
),
]
| 22.923077 | 51 | 0.587248 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('idea', '0002_auto_20170118_0359'),
]
operations = [
migrations.AlterField(
model_name='ideauser',
name='idea_name',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='ideauser',
name='idea_text',
field=models.TextField(),
),
]
| true | true |
1c31a1bc387ebb0a412af0f080aed7fb1457d5f7 | 2,739 | py | Python | stellarobservatory/stellarbeat.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 14 | 2019-05-29T09:45:00.000Z | 2021-04-22T20:11:15.000Z | stellarobservatory/stellarbeat.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 10 | 2019-05-29T09:47:01.000Z | 2020-09-15T20:34:55.000Z | stellarobservatory/stellarbeat.py | andrenarchy/stellar-observatory | 0e1f6af4cdacae19248353f902686d8192130436 | [
"MIT"
] | 5 | 2019-05-29T07:33:02.000Z | 2021-11-24T18:46:03.000Z | """Fetch and process nodes"""
from typing import Any, Dict, List, Optional, Set, TypedDict, cast
import requests
from .utils.graph import Nodes
from .quorum_slice_definition import get_normalized_definition, Definition, Definitions
def get_nodes_from_stellarbeat():
"""Fetch nodes from stellarbeat.io"""
return requests.get('https://api.stellarbeat.io/v1/nodes').json()
QuorumSet = TypedDict('QuorumSet', {
'threshold': int,
'validators': List[str],
# NOTE: use List['QuorumSet] when https://github.com/python/mypy/issues/731 is fixed
'innerQuorumSets': Any
})
def get_definition_from_stellarbeat_quorum_set(quorum_set: QuorumSet) -> Definition:
"""Turn a stellarbeat quorum set into a quorum slice definition"""
return {
'threshold': quorum_set['threshold'],
'nodes': set(quorum_set['validators']) if 'validators' in quorum_set else set(),
'children_definitions': [
get_definition_from_stellarbeat_quorum_set(inner_quorum_set)
for inner_quorum_set in quorum_set['innerQuorumSets']
] if 'innerQuorumSets' in quorum_set else set()
}
StellarbeatNode = TypedDict('StellarbeatNode', {
# https://github.com/PyCQA/pylint/issues/3882
# pylint: disable=unsubscriptable-object
'publicKey': str,
'quorumSet': QuorumSet,
'name': Optional[str]
})
def get_nodes_by_public_key(stellarbeat_nodes: List[StellarbeatNode]) -> Dict[str, StellarbeatNode]:
"""Get nodes by public key as a dictionary"""
return {node['publicKey']: node for node in stellarbeat_nodes}
def convert_stellarbeat_to_observatory(stellarbeat_nodes: List[StellarbeatNode]):
"""Get nodes, definitions by node, node names from stellarbeat nodes"""
stellarbeat_nodes_by_public_key = get_nodes_by_public_key(stellarbeat_nodes)
nodes: Nodes = set(stellarbeat_nodes_by_public_key.keys())
definitions_by_node: Definitions = {
key: get_normalized_definition(
get_definition_from_stellarbeat_quorum_set(node['quorumSet']),
key
)
for key, node in stellarbeat_nodes_by_public_key.items()
}
node_names: Dict[str, str] = {
key: cast(str, node['name'] if 'name' in node else key)
for key, node in stellarbeat_nodes_by_public_key.items()
}
return nodes, definitions_by_node, node_names
def convert_public_keys_to_names(nodes_by_public_key: Dict[str, StellarbeatNode],
public_keys: Set[str]):
"""Convert a set of node public keys to a set of names"""
return {
nodes_by_public_key[public_key]['name'] if 'name' in nodes_by_public_key[public_key] \
else public_key \
for public_key in public_keys
}
| 41.5 | 100 | 0.703907 | from typing import Any, Dict, List, Optional, Set, TypedDict, cast
import requests
from .utils.graph import Nodes
from .quorum_slice_definition import get_normalized_definition, Definition, Definitions
def get_nodes_from_stellarbeat():
return requests.get('https://api.stellarbeat.io/v1/nodes').json()
QuorumSet = TypedDict('QuorumSet', {
'threshold': int,
'validators': List[str],
'innerQuorumSets': Any
})
def get_definition_from_stellarbeat_quorum_set(quorum_set: QuorumSet) -> Definition:
return {
'threshold': quorum_set['threshold'],
'nodes': set(quorum_set['validators']) if 'validators' in quorum_set else set(),
'children_definitions': [
get_definition_from_stellarbeat_quorum_set(inner_quorum_set)
for inner_quorum_set in quorum_set['innerQuorumSets']
] if 'innerQuorumSets' in quorum_set else set()
}
StellarbeatNode = TypedDict('StellarbeatNode', {
# https://github.com/PyCQA/pylint/issues/3882
# pylint: disable=unsubscriptable-object
'publicKey': str,
'quorumSet': QuorumSet,
'name': Optional[str]
})
def get_nodes_by_public_key(stellarbeat_nodes: List[StellarbeatNode]) -> Dict[str, StellarbeatNode]:
return {node['publicKey']: node for node in stellarbeat_nodes}
def convert_stellarbeat_to_observatory(stellarbeat_nodes: List[StellarbeatNode]):
stellarbeat_nodes_by_public_key = get_nodes_by_public_key(stellarbeat_nodes)
nodes: Nodes = set(stellarbeat_nodes_by_public_key.keys())
definitions_by_node: Definitions = {
key: get_normalized_definition(
get_definition_from_stellarbeat_quorum_set(node['quorumSet']),
key
)
for key, node in stellarbeat_nodes_by_public_key.items()
}
node_names: Dict[str, str] = {
key: cast(str, node['name'] if 'name' in node else key)
for key, node in stellarbeat_nodes_by_public_key.items()
}
return nodes, definitions_by_node, node_names
def convert_public_keys_to_names(nodes_by_public_key: Dict[str, StellarbeatNode],
public_keys: Set[str]):
return {
nodes_by_public_key[public_key]['name'] if 'name' in nodes_by_public_key[public_key] \
else public_key \
for public_key in public_keys
}
| true | true |
1c31a2534a93662db3935f20a70cbcbb719c581c | 9,037 | py | Python | art_collections/hooks.py | mohsinalimat/art_collections | 95d2f3627e59c8229cee6c14f01f4c513fc86304 | [
"MIT"
] | null | null | null | art_collections/hooks.py | mohsinalimat/art_collections | 95d2f3627e59c8229cee6c14f01f4c513fc86304 | [
"MIT"
] | null | null | null | art_collections/hooks.py | mohsinalimat/art_collections | 95d2f3627e59c8229cee6c14f01f4c513fc86304 | [
"MIT"
] | 1 | 2022-02-02T19:52:54.000Z | 2022-02-02T19:52:54.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
from frappe import _
app_name = "art_collections"
app_title = "Art Collections"
app_publisher = "GreyCube Technologies"
app_description = "Customization for art collections"
app_icon = "octicon octicon-gift"
app_color = "violet"
app_email = "admin@greycube.in"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
app_include_css = ["/assets/css/pos_list.min.css"]
app_include_js = "/assets/art_collections/js/art_collections.js"
# include js, css files in header of web template
web_include_css = "/assets/art_collections/css/art_collections.css"
web_include_js = ["/assets/art_collections/js/shopping_cart.js"]
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
doctype_js = {
"Customer": "public/js/customer.js",
"Supplier": "public/js/supplier.js",
"Address": "public/js/address.js",
"Item": "public/js/item.js",
"Delivery Note": "public/js/delivery_note.js",
"Sales Order": "public/js/sales_order.js",
"Quotation": "public/js/quotation.js",
"Purchase Order": "public/js/purchase_order.js",
"Issue": "public/js/issue.js",
"Pricing Rule": "public/js/pricing_rule.js",
"POS Profile": "public/js/pos_profile.js",
"Supplier Quotation": "public/js/supplier_quotation.js",
"Pick List": "public/js/pick_list.js",
"Stock Entry": "public/js/stock_entry.js",
"Sales Invoice": "public/js/sales_invoice.js",
"Website Item": "public/js/website_item.js",
# "Request for Quotation": "public/js/request_for_quotation.js"
}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
doctype_list_js = {"Sales Order": "public/js/sales_order_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "art_collections.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "art_collections.install.before_install"
# after_install = "art_collections.install.after_install"
after_migrate = "art_collections.migrations.after_migrations"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "art_collections.notifications.get_notification_config"
on_session_creation = "art_collections.art_cart.set_wishlist_cart_count"
on_logout = "art_collections.art_cart.clear_wishlist_cart_count"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
doc_events = {
"Website Item": {
"on_change": "art_collections.website_item_controller.make_route_ascents_free",
"validate": "art_collections.website_item_controller.make_route_ascents_free",
},
"Item": {
"validate": "art_collections.item_controller.item_custom_validation",
# "before_insert": "art_collections.item_controller.set_custom_item_name",
"autoname": "art_collections.item_controller.set_item_code_for_pre_item",
},
"Pricing Rule": {
"on_update": "art_collections.api.update_flag_table_from_pricing_rule"
},
"Issue Type": {"autoname": "art_collections.api.autoname_issue_type"},
"Purchase Receipt": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.purchase_receipt_controller.purchase_receipt_custom_submit_logic",
},
"Sales Order": {
"validate": [
"art_collections.sales_order_controller.sales_order_custom_validation",
# "art_collections.sales_order_controller.update_total_saleable_qty",
# "art_collections.directive_controller.get_directive",
],
"on_submit": [
"art_collections.api.sales_order_from_shopping_cart",
"art_collections.controllers.excel.sales_order.on_submit_sales_order",
],
# "on_update": [
# "art_collections.sales_order_controller.sales_order_custom_validation",
# ],
},
"Purchase Order": {
"on_submit": [
"art_collections.purchase_order_controller.purchase_order_custom_on_submit",
"art_collections.controllers.excel.purchase_order.on_submit_purchase_order",
],
"on_update_after_submit": [
"art_collections.purchase_order_controller.purchase_order_custom_on_submit"
# "art_collections.purchase_order_controller.purchase_order_update_delivery_date_of_item",
# "art_collections.purchase_order_controller.purchase_order_update_schedule_date_of_item",
],
"validate": ["art_collections.purchase_order_controller.purchase_order_custom_validation",
"art_collections.directive_controller.get_directive"]
},
"Supplier Quotation": {
"validate": [
"art_collections.supplier_quotation_controller.supplier_quotation_custom_validation",
"art_collections.directive_controller.get_directive",
]
},
"Request for Quotation": {
"validate": [
"art_collections.request_for_quotation_controller.request_for_quotation_custom_validation",
"art_collections.directive_controller.get_directive",
],
"on_submit": "art_collections.controllers.excel.request_for_quotation_excel.on_submit_request_for_quotation",
},
"Address": {
"autoname": "art_collections.address_controller.set_address_title_based_on_customer",
"validate": "art_collections.address_controller.fetch_default_mode_of_payment",
},
"Quotation": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.quotation_excel.on_submit_quotation",
},
"Delivery Note": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.delivery_note_excel.on_submit_delivery_note",
},
"Sales Invoice": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.sales_invoice_excel.on_submit_sales_invoice",
},
"Purchase Invoice": {
"validate": "art_collections.directive_controller.get_directive"
},
"Pick List": {"validate": "art_collections.directive_controller.get_directive"},
}
# Scheduled Tasks
# ---------------
scheduler_events = {
# "all": [
# "art_collections.tasks.all"
# ],
"cron": {
"15 00 * * *": [
"art_collections.item_controller.allow_order_still_stock_last",
]
},
"daily": ["art_collections.scheduler_task_controller.daily"]
# "hourly": [
# "art_collections.tasks.hourly"
# ],
# "weekly": [
# "art_collections.tasks.weekly"
# ]
# "monthly": [
# "art_collections.tasks.monthly"
# ]
}
# Testing
# -------
# before_tests = "art_collections.install.before_tests"
standard_portal_menu_items = [
{
"title": _("Manage Wish List Name"),
"route": "/wish-list-name",
"reference_doctype": "Wish List Name",
"role": "Customer",
}
]
# Overriding Whitelisted Methods
# ------------------------------
#
override_whitelisted_methods = {
" erpnext.e_commerce.shopping_cart.cart.update_cart": "art_collections.api.update_cart",
"erpnext.e_commerce.shopping_cart.product_info.get_product_info_for_website": "art_collections.api.get_product_info_for_website",
}
fixtures = [
{"dt": "Workflow", "filters": [["name", "in", ["BDC"]]]},
{
"dt": "Notification",
"filters": [
[
"name",
"in",
[
"Payment Reminder For Escompte Eligible Customers",
"validate_inner_qty_for_sales_order",
],
]
],
},
{
"dt": "Property Setter",
"filters": [["name", "in", ["Sales Order-delivery_date-no_copy"]]],
},
]
jenv = {
"methods": [
"get_print_context_for_art_collectons_sales_order:art_collections.art_collections.print_format.art_so.get_print_context",
"get_print_context_for_art_collectons_purchase_order:art_collections.art_collections.print_format.po_art.get_print_context",
],
"filters": [],
}
| 34.492366 | 133 | 0.682749 |
from __future__ import unicode_literals
from . import __version__ as app_version
from frappe import _
app_name = "art_collections"
app_title = "Art Collections"
app_publisher = "GreyCube Technologies"
app_description = "Customization for art collections"
app_icon = "octicon octicon-gift"
app_color = "violet"
app_email = "admin@greycube.in"
app_license = "MIT"
app_include_css = ["/assets/css/pos_list.min.css"]
app_include_js = "/assets/art_collections/js/art_collections.js"
web_include_css = "/assets/art_collections/css/art_collections.css"
web_include_js = ["/assets/art_collections/js/shopping_cart.js"]
doctype_js = {
"Customer": "public/js/customer.js",
"Supplier": "public/js/supplier.js",
"Address": "public/js/address.js",
"Item": "public/js/item.js",
"Delivery Note": "public/js/delivery_note.js",
"Sales Order": "public/js/sales_order.js",
"Quotation": "public/js/quotation.js",
"Purchase Order": "public/js/purchase_order.js",
"Issue": "public/js/issue.js",
"Pricing Rule": "public/js/pricing_rule.js",
"POS Profile": "public/js/pos_profile.js",
"Supplier Quotation": "public/js/supplier_quotation.js",
"Pick List": "public/js/pick_list.js",
"Stock Entry": "public/js/stock_entry.js",
"Sales Invoice": "public/js/sales_invoice.js",
"Website Item": "public/js/website_item.js",
}
doctype_list_js = {"Sales Order": "public/js/sales_order_list.js"}
after_migrate = "art_collections.migrations.after_migrations"
on_session_creation = "art_collections.art_cart.set_wishlist_cart_count"
on_logout = "art_collections.art_cart.clear_wishlist_cart_count"
doc_events = {
"Website Item": {
"on_change": "art_collections.website_item_controller.make_route_ascents_free",
"validate": "art_collections.website_item_controller.make_route_ascents_free",
},
"Item": {
"validate": "art_collections.item_controller.item_custom_validation",
"autoname": "art_collections.item_controller.set_item_code_for_pre_item",
},
"Pricing Rule": {
"on_update": "art_collections.api.update_flag_table_from_pricing_rule"
},
"Issue Type": {"autoname": "art_collections.api.autoname_issue_type"},
"Purchase Receipt": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.purchase_receipt_controller.purchase_receipt_custom_submit_logic",
},
"Sales Order": {
"validate": [
"art_collections.sales_order_controller.sales_order_custom_validation",
],
"on_submit": [
"art_collections.api.sales_order_from_shopping_cart",
"art_collections.controllers.excel.sales_order.on_submit_sales_order",
],
},
"Purchase Order": {
"on_submit": [
"art_collections.purchase_order_controller.purchase_order_custom_on_submit",
"art_collections.controllers.excel.purchase_order.on_submit_purchase_order",
],
"on_update_after_submit": [
"art_collections.purchase_order_controller.purchase_order_custom_on_submit"
],
"validate": ["art_collections.purchase_order_controller.purchase_order_custom_validation",
"art_collections.directive_controller.get_directive"]
},
"Supplier Quotation": {
"validate": [
"art_collections.supplier_quotation_controller.supplier_quotation_custom_validation",
"art_collections.directive_controller.get_directive",
]
},
"Request for Quotation": {
"validate": [
"art_collections.request_for_quotation_controller.request_for_quotation_custom_validation",
"art_collections.directive_controller.get_directive",
],
"on_submit": "art_collections.controllers.excel.request_for_quotation_excel.on_submit_request_for_quotation",
},
"Address": {
"autoname": "art_collections.address_controller.set_address_title_based_on_customer",
"validate": "art_collections.address_controller.fetch_default_mode_of_payment",
},
"Quotation": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.quotation_excel.on_submit_quotation",
},
"Delivery Note": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.delivery_note_excel.on_submit_delivery_note",
},
"Sales Invoice": {
"validate": "art_collections.directive_controller.get_directive",
"on_submit": "art_collections.controllers.excel.sales_invoice_excel.on_submit_sales_invoice",
},
"Purchase Invoice": {
"validate": "art_collections.directive_controller.get_directive"
},
"Pick List": {"validate": "art_collections.directive_controller.get_directive"},
}
scheduler_events = {
"cron": {
"15 00 * * *": [
"art_collections.item_controller.allow_order_still_stock_last",
]
},
"daily": ["art_collections.scheduler_task_controller.daily"]
}
standard_portal_menu_items = [
{
"title": _("Manage Wish List Name"),
"route": "/wish-list-name",
"reference_doctype": "Wish List Name",
"role": "Customer",
}
]
override_whitelisted_methods = {
" erpnext.e_commerce.shopping_cart.cart.update_cart": "art_collections.api.update_cart",
"erpnext.e_commerce.shopping_cart.product_info.get_product_info_for_website": "art_collections.api.get_product_info_for_website",
}
fixtures = [
{"dt": "Workflow", "filters": [["name", "in", ["BDC"]]]},
{
"dt": "Notification",
"filters": [
[
"name",
"in",
[
"Payment Reminder For Escompte Eligible Customers",
"validate_inner_qty_for_sales_order",
],
]
],
},
{
"dt": "Property Setter",
"filters": [["name", "in", ["Sales Order-delivery_date-no_copy"]]],
},
]
jenv = {
"methods": [
"get_print_context_for_art_collectons_sales_order:art_collections.art_collections.print_format.art_so.get_print_context",
"get_print_context_for_art_collectons_purchase_order:art_collections.art_collections.print_format.po_art.get_print_context",
],
"filters": [],
}
| true | true |
1c31a2d5a144fba929befb65d383bd0f0157eab5 | 7,619 | py | Python | test/helpers.py | fdeoliveirag/luigi | e20e4306786fa5b5f3c99878c2c56a77f987e0b5 | [
"Apache-2.0"
] | 14,755 | 2015-01-01T09:33:34.000Z | 2022-03-31T15:38:39.000Z | test/helpers.py | yassineaboukir/luigi | cd998eace682370e89524dd9368c3c537692eb7b | [
"Apache-2.0"
] | 2,387 | 2015-01-01T09:16:13.000Z | 2022-03-12T13:55:43.000Z | test/helpers.py | yassineaboukir/luigi | cd998eace682370e89524dd9368c3c537692eb7b | [
"Apache-2.0"
] | 2,630 | 2015-01-02T06:11:32.000Z | 2022-03-27T22:11:20.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import tempfile
import re
from contextlib import contextmanager
import luigi
import luigi.task_register
import luigi.cmdline_parser
from luigi.cmdline_parser import CmdlineParser
import os
import unittest
def skipOnTravisAndGithubActions(reason):
if _override_skip_CI_tests():
# Do not skip the CI tests
return unittest.skipIf(False, "")
# run the skip CI tests logic
return unittest.skipIf(_running_on_travis() or _running_on_github_actions(), reason)
def skipOnGithubActions(reason):
return unittest.skipIf(_running_on_github_actions(), reason)
def _running_on_travis():
return os.getenv('TRAVIS') == 'true'
def _running_on_github_actions():
return os.getenv('GITHUB_ACTIONS') == 'true'
def _override_skip_CI_tests():
return os.getenv('OVERRIDE_SKIP_CI_TESTS') == 'true'
class with_config:
"""
Decorator to override config settings for the length of a function.
Usage:
.. code-block: python
>>> import luigi.configuration
>>> @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
...
>>> my_test()
baz
>>> @with_config({'hoo': {'bar': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("hoo", "bar"))
...
>>> my_test()
baz
buz
>>> @with_config({'foo': {'bar': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
...
>>> my_test()
baz
>>> @with_config({'foo': {'bur': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}})
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("foo", "bur"))
...
>>> my_test()
baz
buz
>>> @with_config({'foo': {'bur': 'buz'}})
... @with_config({'foo': {'bar': 'baz'}}, replace_sections=True)
... def my_test():
... print(luigi.configuration.get_config().get("foo", "bar"))
... print(luigi.configuration.get_config().get("foo", "bur", "no_bur"))
...
>>> my_test()
baz
no_bur
"""
def __init__(self, config, replace_sections=False):
self.config = config
self.replace_sections = replace_sections
def _make_dict(self, old_dict):
if self.replace_sections:
old_dict.update(self.config)
return old_dict
def get_section(sec):
old_sec = old_dict.get(sec, {})
new_sec = self.config.get(sec, {})
old_sec.update(new_sec)
return old_sec
all_sections = itertools.chain(old_dict.keys(), self.config.keys())
return {sec: get_section(sec) for sec in all_sections}
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
import luigi.configuration
orig_conf = luigi.configuration.LuigiConfigParser.instance()
new_conf = luigi.configuration.LuigiConfigParser()
luigi.configuration.LuigiConfigParser._instance = new_conf
orig_dict = {k: dict(orig_conf.items(k)) for k in orig_conf.sections()}
new_dict = self._make_dict(orig_dict)
for section, settings in new_dict.items():
new_conf.add_section(section)
for name, value in settings.items():
new_conf.set(section, name, value)
try:
return fun(*args, **kwargs)
finally:
luigi.configuration.LuigiConfigParser._instance = orig_conf
return wrapper
class RunOnceTask(luigi.Task):
def __init__(self, *args, **kwargs):
super(RunOnceTask, self).__init__(*args, **kwargs)
self.comp = False
def complete(self):
return self.comp
def run(self):
self.comp = True
# string subclass that matches arguments containing the specified substring
# for use in mock 'called_with' assertions
class StringContaining(str):
def __eq__(self, other_str):
return self in other_str
class LuigiTestCase(unittest.TestCase):
"""
Tasks registred within a test case will get unregistered in a finalizer
Instance caches are cleared before and after all runs
"""
def setUp(self):
super(LuigiTestCase, self).setUp()
self._stashed_reg = luigi.task_register.Register._get_reg()
luigi.task_register.Register.clear_instance_cache()
def tearDown(self):
luigi.task_register.Register._set_reg(self._stashed_reg)
super(LuigiTestCase, self).tearDown()
luigi.task_register.Register.clear_instance_cache()
def run_locally(self, args):
""" Helper for running tests testing more of the stack, the command
line parsing and task from name intstantiation parts in particular. """
temp = CmdlineParser._instance
try:
CmdlineParser._instance = None
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
finally:
CmdlineParser._instance = temp
return run_exit_status
def run_locally_split(self, space_seperated_args):
""" Helper for running tests testing more of the stack, the command
line parsing and task from name intstantiation parts in particular. """
return self.run_locally(space_seperated_args.split(' '))
class parsing:
"""
Convenient decorator for test cases to set the parsing environment.
"""
def __init__(self, cmds):
self.cmds = cmds
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
with CmdlineParser.global_instance(self.cmds, allow_override=True):
return fun(*args, **kwargs)
return wrapper
def in_parse(cmds, deferred_computation):
with CmdlineParser.global_instance(cmds) as cp:
deferred_computation(cp.get_task_obj())
@contextmanager
def temporary_unloaded_module(python_file_contents):
""" Create an importable module
Return the name of importable module name given its file contents (source
code) """
with tempfile.NamedTemporaryFile(
dir='test/',
prefix="_test_time_generated_module",
suffix='.py') as temp_module_file:
temp_module_file.file.write(python_file_contents)
temp_module_file.file.flush()
temp_module_path = temp_module_file.name
temp_module_name = re.search(r'/(_test_time_generated_module.*).py',
temp_module_path).group(1)
yield temp_module_name
| 32.012605 | 88 | 0.628823 |
import functools
import itertools
import tempfile
import re
from contextlib import contextmanager
import luigi
import luigi.task_register
import luigi.cmdline_parser
from luigi.cmdline_parser import CmdlineParser
import os
import unittest
def skipOnTravisAndGithubActions(reason):
if _override_skip_CI_tests():
return unittest.skipIf(False, "")
return unittest.skipIf(_running_on_travis() or _running_on_github_actions(), reason)
def skipOnGithubActions(reason):
return unittest.skipIf(_running_on_github_actions(), reason)
def _running_on_travis():
return os.getenv('TRAVIS') == 'true'
def _running_on_github_actions():
return os.getenv('GITHUB_ACTIONS') == 'true'
def _override_skip_CI_tests():
return os.getenv('OVERRIDE_SKIP_CI_TESTS') == 'true'
class with_config:
def __init__(self, config, replace_sections=False):
self.config = config
self.replace_sections = replace_sections
def _make_dict(self, old_dict):
if self.replace_sections:
old_dict.update(self.config)
return old_dict
def get_section(sec):
old_sec = old_dict.get(sec, {})
new_sec = self.config.get(sec, {})
old_sec.update(new_sec)
return old_sec
all_sections = itertools.chain(old_dict.keys(), self.config.keys())
return {sec: get_section(sec) for sec in all_sections}
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
import luigi.configuration
orig_conf = luigi.configuration.LuigiConfigParser.instance()
new_conf = luigi.configuration.LuigiConfigParser()
luigi.configuration.LuigiConfigParser._instance = new_conf
orig_dict = {k: dict(orig_conf.items(k)) for k in orig_conf.sections()}
new_dict = self._make_dict(orig_dict)
for section, settings in new_dict.items():
new_conf.add_section(section)
for name, value in settings.items():
new_conf.set(section, name, value)
try:
return fun(*args, **kwargs)
finally:
luigi.configuration.LuigiConfigParser._instance = orig_conf
return wrapper
class RunOnceTask(luigi.Task):
def __init__(self, *args, **kwargs):
super(RunOnceTask, self).__init__(*args, **kwargs)
self.comp = False
def complete(self):
return self.comp
def run(self):
self.comp = True
class StringContaining(str):
def __eq__(self, other_str):
return self in other_str
class LuigiTestCase(unittest.TestCase):
def setUp(self):
super(LuigiTestCase, self).setUp()
self._stashed_reg = luigi.task_register.Register._get_reg()
luigi.task_register.Register.clear_instance_cache()
def tearDown(self):
luigi.task_register.Register._set_reg(self._stashed_reg)
super(LuigiTestCase, self).tearDown()
luigi.task_register.Register.clear_instance_cache()
def run_locally(self, args):
temp = CmdlineParser._instance
try:
CmdlineParser._instance = None
run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args)
finally:
CmdlineParser._instance = temp
return run_exit_status
def run_locally_split(self, space_seperated_args):
return self.run_locally(space_seperated_args.split(' '))
class parsing:
def __init__(self, cmds):
self.cmds = cmds
def __call__(self, fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
with CmdlineParser.global_instance(self.cmds, allow_override=True):
return fun(*args, **kwargs)
return wrapper
def in_parse(cmds, deferred_computation):
with CmdlineParser.global_instance(cmds) as cp:
deferred_computation(cp.get_task_obj())
@contextmanager
def temporary_unloaded_module(python_file_contents):
with tempfile.NamedTemporaryFile(
dir='test/',
prefix="_test_time_generated_module",
suffix='.py') as temp_module_file:
temp_module_file.file.write(python_file_contents)
temp_module_file.file.flush()
temp_module_path = temp_module_file.name
temp_module_name = re.search(r'/(_test_time_generated_module.*).py',
temp_module_path).group(1)
yield temp_module_name
| true | true |
1c31a31b67708149f7b5b2556e57b895b3c63e9d | 13,023 | py | Python | select2/fields.py | aprovin/django-select2-forms | 929a0a7acf6f9222cbe0f8c9287bf7544d169868 | [
"BSD-2-Clause"
] | null | null | null | select2/fields.py | aprovin/django-select2-forms | 929a0a7acf6f9222cbe0f8c9287bf7544d169868 | [
"BSD-2-Clause"
] | null | null | null | select2/fields.py | aprovin/django-select2-forms | 929a0a7acf6f9222cbe0f8c9287bf7544d169868 | [
"BSD-2-Clause"
] | 1 | 2015-05-18T09:35:45.000Z | 2015-05-18T09:35:45.000Z | import django
from django import forms
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError, FieldDoesNotExist
from django.forms.models import ModelChoiceIterator
from django.utils.encoding import force_str
from django.utils.functional import Promise
try:
from django.db.models.fields.related import lazy_related_operation
except ImportError:
lazy_related_operation = None
from django.db.models.fields.related import add_lazy_relation
else:
add_lazy_relation = None
from sortedm2m.fields import SortedManyToManyField
from sortedm2m.forms import SortedMultipleChoiceField
from .widgets import Select, SelectMultiple
__all__ = (
'Select2FieldMixin', 'Select2ModelFieldMixin', 'ChoiceField',
'MultipleChoiceField', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ForeignKey', 'ManyToManyField',)
def compat_add_lazy_relation(cls, field, relation, operation):
if add_lazy_relation is not None:
return add_lazy_relation(cls, field, relation, operation)
# Rearrange args for new Apps.lazy_model_operation
def function(local, related, field):
return operation(field, related, local)
lazy_related_operation(function, cls, relation, field=field)
dj19 = bool(django.VERSION >= (1, 9))
compat_rel = lambda f: getattr(f, 'remote_field' if dj19 else 'rel')
compat_rel_to = lambda f: getattr(compat_rel(f), 'model' if dj19 else 'to')
class Select2FieldMixin(object):
def __init__(self, *args, **kwargs):
widget_kwargs = {}
# The child field class can pass widget_kwargs as a dict. We use this
# in MultipleChoiceField to ensure that the field's choices get passed
# along to the widget. This is unnecessary for model fields since the
# choices in that case are iterators wrapping the queryset.
if 'widget_kwargs' in kwargs:
widget_kwargs.update(kwargs.pop('widget_kwargs'))
widget_kwarg_keys = ['overlay', 'js_options', 'sortable', 'ajax']
for k in widget_kwarg_keys:
if k in kwargs:
widget_kwargs[k] = kwargs.pop(k)
widget = kwargs.pop('widget', None)
if isinstance(widget, type):
if not issubclass(widget, Select):
widget = self.widget
elif not isinstance(widget, Select):
widget = self.widget
if isinstance(widget, type):
kwargs['widget'] = widget(**widget_kwargs)
else:
kwargs['widget'] = widget
super(Select2FieldMixin, self).__init__(*args, **kwargs)
class ChoiceField(Select2FieldMixin, forms.ChoiceField):
widget = Select
class MultipleChoiceField(Select2FieldMixin, forms.MultipleChoiceField):
widget = SelectMultiple
def __init__(self, *args, **kwargs):
# Explicitly pass the choices kwarg to the widget. "widget_kwargs"
# is not a standard Django Form Field kwarg, but we pop it off in
# Select2FieldMixin.__init__
kwargs['widget_kwargs'] = kwargs.get('widget_kwargs') or {}
if 'choices' in kwargs:
kwargs['widget_kwargs']['choices'] = kwargs['choices']
super(MultipleChoiceField, self).__init__(*args, **kwargs)
def has_changed(self, initial, data):
widget = self.widget
if not isinstance(widget, SelectMultiple) and hasattr(widget, 'widget'):
widget = widget.widget
if hasattr(widget, 'format_value'):
initial = widget.format_value(initial)
else:
initial = widget._format_value(initial)
return super(MultipleChoiceField, self).has_changed(initial, data)
class Select2ModelFieldMixin(Select2FieldMixin):
search_field = None
case_sensitive = False
choice_iterator_cls = ModelChoiceIterator
def __init__(self, search_field=None, case_sensitive=False, *args, **kwargs):
if search_field is None and kwargs.get('ajax'):
raise TypeError(
("keyword argument 'search_field' is required for field "
"%s <%s>") % (self.name, self.__class__.__name__))
self.search_field = search_field
self.case_sensitive = case_sensitive
self.name = kwargs.pop('name')
self.model = kwargs.pop('model')
self.choice_iterator_cls = kwargs.pop('choice_iterator_cls', self.choice_iterator_cls)
super(Select2ModelFieldMixin, self).__init__(*args, **kwargs)
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return self.choice_iterator_cls(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
class ModelChoiceField(Select2ModelFieldMixin, forms.ModelChoiceField):
widget = Select
def __init__(self, *args, **kwargs):
super(ModelChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
class ModelMultipleChoiceField(Select2ModelFieldMixin, SortedMultipleChoiceField):
widget = SelectMultiple
#: Instance of the field on the through table used for storing sort position
sort_field = None
def __init__(self, *args, **kwargs):
self.sort_field = kwargs.pop('sort_field', self.sort_field)
if self.sort_field is not None:
kwargs['sortable'] = True
super(ModelMultipleChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if isinstance(value, str):
value = value.split(',')
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(**{
('%s__in' % key): value,
})
pks = set([force_str(getattr(o, key)) for o in qs])
# Create a dictionary for storing the original order of the items
# passed from the form
pk_positions = {}
for i, val in enumerate(value):
pk = force_str(val)
if pk not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
pk_positions[pk] = i
if not self.sort_field:
return qs
else:
# Iterate through the objects and set the sort field to its
# position in the comma-separated request data. Then return
# a list of objects sorted on the sort field.
sort_value_field_name = self.sort_field.name
objs = []
for i, obj in enumerate(qs):
pk = force_str(getattr(obj, key))
setattr(obj, sort_value_field_name, pk_positions[pk])
objs.append(obj)
return sorted(objs, key=lambda obj: getattr(obj, sort_value_field_name))
class RelatedFieldMixin(object):
search_field = None
js_options = None
overlay = None
case_sensitive = False
ajax = False
def __init__(self, *args, **kwargs):
self.search_field = kwargs.pop('search_field', None)
self.js_options = kwargs.pop('js_options', None)
self.overlay = kwargs.pop('overlay', self.overlay)
self.case_sensitive = kwargs.pop('case_sensitive', self.case_sensitive)
self.ajax = kwargs.pop('ajax', self.ajax)
super(RelatedFieldMixin, self).__init__(*args, **kwargs)
def _get_queryset(self, db=None):
return compat_rel_to(self)._default_manager.using(db).complex_filter(
compat_rel(self).limit_choices_to)
@property
def queryset(self):
return self._get_queryset()
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': ModelChoiceField,
'queryset': self._get_queryset(db),
'js_options': self.js_options,
'search_field': self.search_field,
'ajax': self.ajax,
'name': self.name,
'model': self.model,
}
defaults.update(kwargs)
if self.overlay is not None:
defaults.update({'overlay': self.overlay})
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return models.Field.formfield(self, **defaults)
def contribute_to_related_class(self, cls, related):
if not self.ajax:
return super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
if self.search_field is None:
raise TypeError(
("keyword argument 'search_field' is required for field "
"'%(field_name)s' of model %(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if not callable(self.search_field) and not isinstance(self.search_field, str):
raise TypeError(
("keyword argument 'search_field' must be either callable or "
"string on field '%(field_name)s' of model "
"%(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if isinstance(self.search_field, str):
try:
opts = related.parent_model._meta
except AttributeError:
# Django 1.8
opts = related.model._meta
try:
opts.get_field(self.search_field)
except FieldDoesNotExist:
raise ImproperlyConfigured(
("keyword argument 'search_field' references non-existent "
"field '%(search_field)s' in %(field_name)s of model "
"<%(app_label)s.%(object_name)s>") % {
'search_field': self.search_field,
'field_name': self.name,
'app_label': opts.app_label,
'object_name': opts.object_name})
super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
class ForeignKey(RelatedFieldMixin, models.ForeignKey):
def formfield(self, **kwargs):
defaults = {
'to_field_name': compat_rel(self).field_name,
}
defaults.update(**kwargs)
return super(ForeignKey, self).formfield(**defaults)
class OneToOneField(RelatedFieldMixin, models.OneToOneField):
def formfield(self, **kwargs):
defaults = {
'to_field_name': compat_rel(self).field_name,
}
defaults.update(**kwargs)
return super(OneToOneField, self).formfield(**defaults)
class ManyToManyField(RelatedFieldMixin, SortedManyToManyField):
#: Name of the field on the through table used for storing sort position
sort_value_field_name = None
#: Instance of the field on the through table used for storing sort position
sort_field = None
def __init__(self, *args, **kwargs):
if 'sort_field' in kwargs:
kwargs['sort_value_field_name'] = kwargs.pop('sort_field')
if 'sorted' not in kwargs:
kwargs['sorted'] = bool(kwargs.get('sort_value_field_name'))
super(ManyToManyField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': ModelMultipleChoiceField,
'sort_field': self.sort_field,
}
defaults.update(**kwargs)
return super(ManyToManyField, self).formfield(**defaults)
def contribute_to_class(self, cls, name):
"""
Replace the descriptor with our custom descriptor, so that the
position field (which is saved in the formfield clean()) gets saved
"""
super(ManyToManyField, self).contribute_to_class(cls, name)
if self.sorted:
def resolve_sort_field(field, model, cls):
model._sort_field_name = field.sort_value_field_name
field.sort_field = model._meta.get_field(field.sort_value_field_name)
if isinstance(compat_rel(self).through, str):
compat_add_lazy_relation(cls, self, compat_rel(self).through, resolve_sort_field)
else:
resolve_sort_field(self, compat_rel(self).through, cls)
| 38.078947 | 97 | 0.63457 | import django
from django import forms
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError, FieldDoesNotExist
from django.forms.models import ModelChoiceIterator
from django.utils.encoding import force_str
from django.utils.functional import Promise
try:
from django.db.models.fields.related import lazy_related_operation
except ImportError:
lazy_related_operation = None
from django.db.models.fields.related import add_lazy_relation
else:
add_lazy_relation = None
from sortedm2m.fields import SortedManyToManyField
from sortedm2m.forms import SortedMultipleChoiceField
from .widgets import Select, SelectMultiple
__all__ = (
'Select2FieldMixin', 'Select2ModelFieldMixin', 'ChoiceField',
'MultipleChoiceField', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ForeignKey', 'ManyToManyField',)
def compat_add_lazy_relation(cls, field, relation, operation):
if add_lazy_relation is not None:
return add_lazy_relation(cls, field, relation, operation)
def function(local, related, field):
return operation(field, related, local)
lazy_related_operation(function, cls, relation, field=field)
dj19 = bool(django.VERSION >= (1, 9))
compat_rel = lambda f: getattr(f, 'remote_field' if dj19 else 'rel')
compat_rel_to = lambda f: getattr(compat_rel(f), 'model' if dj19 else 'to')
class Select2FieldMixin(object):
def __init__(self, *args, **kwargs):
widget_kwargs = {}
# along to the widget. This is unnecessary for model fields since the
# choices in that case are iterators wrapping the queryset.
if 'widget_kwargs' in kwargs:
widget_kwargs.update(kwargs.pop('widget_kwargs'))
widget_kwarg_keys = ['overlay', 'js_options', 'sortable', 'ajax']
for k in widget_kwarg_keys:
if k in kwargs:
widget_kwargs[k] = kwargs.pop(k)
widget = kwargs.pop('widget', None)
if isinstance(widget, type):
if not issubclass(widget, Select):
widget = self.widget
elif not isinstance(widget, Select):
widget = self.widget
if isinstance(widget, type):
kwargs['widget'] = widget(**widget_kwargs)
else:
kwargs['widget'] = widget
super(Select2FieldMixin, self).__init__(*args, **kwargs)
class ChoiceField(Select2FieldMixin, forms.ChoiceField):
widget = Select
class MultipleChoiceField(Select2FieldMixin, forms.MultipleChoiceField):
widget = SelectMultiple
def __init__(self, *args, **kwargs):
# Explicitly pass the choices kwarg to the widget. "widget_kwargs"
# is not a standard Django Form Field kwarg, but we pop it off in
# Select2FieldMixin.__init__
kwargs['widget_kwargs'] = kwargs.get('widget_kwargs') or {}
if 'choices' in kwargs:
kwargs['widget_kwargs']['choices'] = kwargs['choices']
super(MultipleChoiceField, self).__init__(*args, **kwargs)
def has_changed(self, initial, data):
widget = self.widget
if not isinstance(widget, SelectMultiple) and hasattr(widget, 'widget'):
widget = widget.widget
if hasattr(widget, 'format_value'):
initial = widget.format_value(initial)
else:
initial = widget._format_value(initial)
return super(MultipleChoiceField, self).has_changed(initial, data)
class Select2ModelFieldMixin(Select2FieldMixin):
search_field = None
case_sensitive = False
choice_iterator_cls = ModelChoiceIterator
def __init__(self, search_field=None, case_sensitive=False, *args, **kwargs):
if search_field is None and kwargs.get('ajax'):
raise TypeError(
("keyword argument 'search_field' is required for field "
"%s <%s>") % (self.name, self.__class__.__name__))
self.search_field = search_field
self.case_sensitive = case_sensitive
self.name = kwargs.pop('name')
self.model = kwargs.pop('model')
self.choice_iterator_cls = kwargs.pop('choice_iterator_cls', self.choice_iterator_cls)
super(Select2ModelFieldMixin, self).__init__(*args, **kwargs)
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return self.choice_iterator_cls(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
class ModelChoiceField(Select2ModelFieldMixin, forms.ModelChoiceField):
widget = Select
def __init__(self, *args, **kwargs):
super(ModelChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
class ModelMultipleChoiceField(Select2ModelFieldMixin, SortedMultipleChoiceField):
widget = SelectMultiple
#: Instance of the field on the through table used for storing sort position
sort_field = None
def __init__(self, *args, **kwargs):
self.sort_field = kwargs.pop('sort_field', self.sort_field)
if self.sort_field is not None:
kwargs['sortable'] = True
super(ModelMultipleChoiceField, self).__init__(*args, **kwargs)
self.widget.field = self
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if isinstance(value, str):
value = value.split(',')
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(**{
('%s__in' % key): value,
})
pks = set([force_str(getattr(o, key)) for o in qs])
# Create a dictionary for storing the original order of the items
# passed from the form
pk_positions = {}
for i, val in enumerate(value):
pk = force_str(val)
if pk not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
pk_positions[pk] = i
if not self.sort_field:
return qs
else:
# Iterate through the objects and set the sort field to its
# position in the comma-separated request data. Then return
# a list of objects sorted on the sort field.
sort_value_field_name = self.sort_field.name
objs = []
for i, obj in enumerate(qs):
pk = force_str(getattr(obj, key))
setattr(obj, sort_value_field_name, pk_positions[pk])
objs.append(obj)
return sorted(objs, key=lambda obj: getattr(obj, sort_value_field_name))
class RelatedFieldMixin(object):
search_field = None
js_options = None
overlay = None
case_sensitive = False
ajax = False
def __init__(self, *args, **kwargs):
self.search_field = kwargs.pop('search_field', None)
self.js_options = kwargs.pop('js_options', None)
self.overlay = kwargs.pop('overlay', self.overlay)
self.case_sensitive = kwargs.pop('case_sensitive', self.case_sensitive)
self.ajax = kwargs.pop('ajax', self.ajax)
super(RelatedFieldMixin, self).__init__(*args, **kwargs)
def _get_queryset(self, db=None):
return compat_rel_to(self)._default_manager.using(db).complex_filter(
compat_rel(self).limit_choices_to)
@property
def queryset(self):
return self._get_queryset()
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': ModelChoiceField,
'queryset': self._get_queryset(db),
'js_options': self.js_options,
'search_field': self.search_field,
'ajax': self.ajax,
'name': self.name,
'model': self.model,
}
defaults.update(kwargs)
if self.overlay is not None:
defaults.update({'overlay': self.overlay})
# If initial is passed in, it's a list of related objects, but the
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return models.Field.formfield(self, **defaults)
def contribute_to_related_class(self, cls, related):
if not self.ajax:
return super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
if self.search_field is None:
raise TypeError(
("keyword argument 'search_field' is required for field "
"'%(field_name)s' of model %(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if not callable(self.search_field) and not isinstance(self.search_field, str):
raise TypeError(
("keyword argument 'search_field' must be either callable or "
"string on field '%(field_name)s' of model "
"%(app_label)s.%(object_name)s") % {
'field_name': self.name,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.object_name})
if isinstance(self.search_field, str):
try:
opts = related.parent_model._meta
except AttributeError:
opts = related.model._meta
try:
opts.get_field(self.search_field)
except FieldDoesNotExist:
raise ImproperlyConfigured(
("keyword argument 'search_field' references non-existent "
"field '%(search_field)s' in %(field_name)s of model "
"<%(app_label)s.%(object_name)s>") % {
'search_field': self.search_field,
'field_name': self.name,
'app_label': opts.app_label,
'object_name': opts.object_name})
super(RelatedFieldMixin, self).contribute_to_related_class(cls, related)
class ForeignKey(RelatedFieldMixin, models.ForeignKey):
def formfield(self, **kwargs):
defaults = {
'to_field_name': compat_rel(self).field_name,
}
defaults.update(**kwargs)
return super(ForeignKey, self).formfield(**defaults)
class OneToOneField(RelatedFieldMixin, models.OneToOneField):
def formfield(self, **kwargs):
defaults = {
'to_field_name': compat_rel(self).field_name,
}
defaults.update(**kwargs)
return super(OneToOneField, self).formfield(**defaults)
class ManyToManyField(RelatedFieldMixin, SortedManyToManyField):
sort_value_field_name = None
sort_field = None
def __init__(self, *args, **kwargs):
if 'sort_field' in kwargs:
kwargs['sort_value_field_name'] = kwargs.pop('sort_field')
if 'sorted' not in kwargs:
kwargs['sorted'] = bool(kwargs.get('sort_value_field_name'))
super(ManyToManyField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {
'form_class': ModelMultipleChoiceField,
'sort_field': self.sort_field,
}
defaults.update(**kwargs)
return super(ManyToManyField, self).formfield(**defaults)
def contribute_to_class(self, cls, name):
super(ManyToManyField, self).contribute_to_class(cls, name)
if self.sorted:
def resolve_sort_field(field, model, cls):
model._sort_field_name = field.sort_value_field_name
field.sort_field = model._meta.get_field(field.sort_value_field_name)
if isinstance(compat_rel(self).through, str):
compat_add_lazy_relation(cls, self, compat_rel(self).through, resolve_sort_field)
else:
resolve_sort_field(self, compat_rel(self).through, cls)
| true | true |
1c31a329dd2b475f0506b43cbf573628ba087ccf | 5,633 | py | Python | BlueprintsApp/main/gui/forms/language_selection_form.py | PiCodingClub/BlueprintsEdu | f65ad2c3bf6f01acb26660505f6ceded0bee888f | [
"Apache-2.0"
] | null | null | null | BlueprintsApp/main/gui/forms/language_selection_form.py | PiCodingClub/BlueprintsEdu | f65ad2c3bf6f01acb26660505f6ceded0bee888f | [
"Apache-2.0"
] | null | null | null | BlueprintsApp/main/gui/forms/language_selection_form.py | PiCodingClub/BlueprintsEdu | f65ad2c3bf6f01acb26660505f6ceded0bee888f | [
"Apache-2.0"
] | null | null | null | from gui.forms.form import Form
import pygame as pg
from utils.gui_utils import Themes
from utils.string_utils import StringUtils
from utils.app_utils import Images
from utils.app_utils import DisplaySettings
from pygame.locals import *
from utils import logger_utils
class LanguageSelectionForm(Form):
def __init__(self, display, coords=None, size=None):
Form.__init__(self, display, coords, size)
self.lang_select = None
self.btn_drop_down = None
self.__logger = logger_utils.get_logger(__name__)
self.__lang = StringUtils.get_string(StringUtils.DEFAULT_LANGUAGE)
self.__lang_content = list()
self.__lang_counter = 0
self.__is_drop_down_pressed = False
self.__selected = False
def update_form(self, coords=None, size=None):
super().update_form(coords=coords, size=size)
def draw_form(self):
super().draw_form()
if self.visible:
font = pg.font.Font(Themes.DEFAULT_THEME.get("banner_font_style"), int(self.size[1] * .07))
txt = font.render(StringUtils.get_string("ID_LANGUAGE"), True, Themes.DEFAULT_THEME.get("font"))
rect_txt = txt.get_rect()
rect_txt.topleft = (int(self.coords[0] * 1.05), int(self.coords[1] * 1.05))
self.display.blit(txt, rect_txt)
self.lang_select = pg.Rect(
(0, int(rect_txt.bottom * 1.2)), (int(self.size[0] * .85), int(self.size[1] * .12)))
self.lang_select.centerx = self.get_rect().centerx
img = Images.get_icon(Images.DROP_DOWN)
img[1].midright = (
int(self.lang_select.right - DisplaySettings.get_size_by_key()[0] * .01),
int(self.lang_select.center[1]))
self.btn_drop_down = img[1]
pg.draw.rect(self.display, Themes.DEFAULT_THEME.get("text_area_background"), self.lang_select, 0)
self.display.blit(img[0], img[1])
font = pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"), int(self.lang_select.height * 0.6))
txt = font.render(self.__lang, True, Themes.DEFAULT_THEME.get("text_area_text"))
rect_txt = txt.get_rect()
rect_txt.center = self.lang_select.center
self.display.blit(txt, rect_txt)
self.draw_drop_down()
def draw_drop_down(self):
if self.__is_drop_down_pressed:
self.__lang_content.clear()
for pos in range(self.__lang_counter, len(StringUtils.LANGUAGES), 1):
if (pos - self.__lang_counter) < 3:
rect = pg.Rect((self.lang_select.x, int(self.lang_select.y +
self.lang_select.height * (
(pos - self.__lang_counter) + 1))),
self.lang_select.size)
font = pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"),
int(self.lang_select.height * 0.6))
txt = font.render(StringUtils.LANGUAGES[pos][1], True, Themes.DEFAULT_THEME.get("text_area_text"))
rect_txt = txt.get_rect()
rect_txt.center = rect.center
self.__lang_content.append([rect, txt, rect_txt])
for i in range(0, len(self.__lang_content), 1):
pg.draw.rect(self.display, Themes.DEFAULT_THEME.get(
"text_area_background"), self.__lang_content[i][0], 0)
self.display.blit(
self.__lang_content[i][1], self.__lang_content[i][2])
def check_menu_pressed(self, pos):
if self.__is_drop_down_pressed:
for i in range(0, len(self.__lang_content), 1):
if self.__lang_content[i][0].collidepoint(pos) == 1:
self.__lang = StringUtils.LANGUAGES[i + self.__lang_counter][1]
self.__is_drop_down_pressed = False
self.__selected = True
def check_form_events(self, event):
super().check_form_events(event)
if event.type == MOUSEBUTTONUP:
if event.button != 4 and event.button != 5:
pos = pg.mouse.get_pos()
self.check_menu_pressed(pos)
if self.btn_drop_down.collidepoint(pos) == 1:
self.__logger.debug("DROP DOWN PRESSED")
if self.__is_drop_down_pressed:
self.__is_drop_down_pressed = False
else:
self.__is_drop_down_pressed = True
self.__lang_counter = 0
elif self.btn_apply.get_rect().collidepoint(pos) == 1:
for i in range(0, len(StringUtils.LANGUAGES), 1):
if self.__lang == StringUtils.LANGUAGES[i][1]:
StringUtils.set_language(StringUtils.LANGUAGES[i][0])
else:
self.__is_drop_down_pressed = False
elif event.type == MOUSEBUTTONDOWN:
if self.__is_drop_down_pressed:
if event.button == 4:
self.__lang_counter -= 1
elif event.button == 5 and len(StringUtils.LANGUAGES) > 3:
self.__lang_counter += 1
if self.__lang_counter < 0:
self.__lang_counter = 0
elif (len(StringUtils.LANGUAGES) > 3) and (self.__lang_counter > len(StringUtils.LANGUAGES) - 3):
self.__lang_counter = (len(StringUtils.LANGUAGES) - 3)
| 49.849558 | 118 | 0.573407 | from gui.forms.form import Form
import pygame as pg
from utils.gui_utils import Themes
from utils.string_utils import StringUtils
from utils.app_utils import Images
from utils.app_utils import DisplaySettings
from pygame.locals import *
from utils import logger_utils
class LanguageSelectionForm(Form):
def __init__(self, display, coords=None, size=None):
Form.__init__(self, display, coords, size)
self.lang_select = None
self.btn_drop_down = None
self.__logger = logger_utils.get_logger(__name__)
self.__lang = StringUtils.get_string(StringUtils.DEFAULT_LANGUAGE)
self.__lang_content = list()
self.__lang_counter = 0
self.__is_drop_down_pressed = False
self.__selected = False
def update_form(self, coords=None, size=None):
super().update_form(coords=coords, size=size)
def draw_form(self):
super().draw_form()
if self.visible:
font = pg.font.Font(Themes.DEFAULT_THEME.get("banner_font_style"), int(self.size[1] * .07))
txt = font.render(StringUtils.get_string("ID_LANGUAGE"), True, Themes.DEFAULT_THEME.get("font"))
rect_txt = txt.get_rect()
rect_txt.topleft = (int(self.coords[0] * 1.05), int(self.coords[1] * 1.05))
self.display.blit(txt, rect_txt)
self.lang_select = pg.Rect(
(0, int(rect_txt.bottom * 1.2)), (int(self.size[0] * .85), int(self.size[1] * .12)))
self.lang_select.centerx = self.get_rect().centerx
img = Images.get_icon(Images.DROP_DOWN)
img[1].midright = (
int(self.lang_select.right - DisplaySettings.get_size_by_key()[0] * .01),
int(self.lang_select.center[1]))
self.btn_drop_down = img[1]
pg.draw.rect(self.display, Themes.DEFAULT_THEME.get("text_area_background"), self.lang_select, 0)
self.display.blit(img[0], img[1])
font = pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"), int(self.lang_select.height * 0.6))
txt = font.render(self.__lang, True, Themes.DEFAULT_THEME.get("text_area_text"))
rect_txt = txt.get_rect()
rect_txt.center = self.lang_select.center
self.display.blit(txt, rect_txt)
self.draw_drop_down()
def draw_drop_down(self):
if self.__is_drop_down_pressed:
self.__lang_content.clear()
for pos in range(self.__lang_counter, len(StringUtils.LANGUAGES), 1):
if (pos - self.__lang_counter) < 3:
rect = pg.Rect((self.lang_select.x, int(self.lang_select.y +
self.lang_select.height * (
(pos - self.__lang_counter) + 1))),
self.lang_select.size)
font = pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"),
int(self.lang_select.height * 0.6))
txt = font.render(StringUtils.LANGUAGES[pos][1], True, Themes.DEFAULT_THEME.get("text_area_text"))
rect_txt = txt.get_rect()
rect_txt.center = rect.center
self.__lang_content.append([rect, txt, rect_txt])
for i in range(0, len(self.__lang_content), 1):
pg.draw.rect(self.display, Themes.DEFAULT_THEME.get(
"text_area_background"), self.__lang_content[i][0], 0)
self.display.blit(
self.__lang_content[i][1], self.__lang_content[i][2])
def check_menu_pressed(self, pos):
if self.__is_drop_down_pressed:
for i in range(0, len(self.__lang_content), 1):
if self.__lang_content[i][0].collidepoint(pos) == 1:
self.__lang = StringUtils.LANGUAGES[i + self.__lang_counter][1]
self.__is_drop_down_pressed = False
self.__selected = True
def check_form_events(self, event):
super().check_form_events(event)
if event.type == MOUSEBUTTONUP:
if event.button != 4 and event.button != 5:
pos = pg.mouse.get_pos()
self.check_menu_pressed(pos)
if self.btn_drop_down.collidepoint(pos) == 1:
self.__logger.debug("DROP DOWN PRESSED")
if self.__is_drop_down_pressed:
self.__is_drop_down_pressed = False
else:
self.__is_drop_down_pressed = True
self.__lang_counter = 0
elif self.btn_apply.get_rect().collidepoint(pos) == 1:
for i in range(0, len(StringUtils.LANGUAGES), 1):
if self.__lang == StringUtils.LANGUAGES[i][1]:
StringUtils.set_language(StringUtils.LANGUAGES[i][0])
else:
self.__is_drop_down_pressed = False
elif event.type == MOUSEBUTTONDOWN:
if self.__is_drop_down_pressed:
if event.button == 4:
self.__lang_counter -= 1
elif event.button == 5 and len(StringUtils.LANGUAGES) > 3:
self.__lang_counter += 1
if self.__lang_counter < 0:
self.__lang_counter = 0
elif (len(StringUtils.LANGUAGES) > 3) and (self.__lang_counter > len(StringUtils.LANGUAGES) - 3):
self.__lang_counter = (len(StringUtils.LANGUAGES) - 3)
| true | true |
1c31a49ea3f7362c00543ee23e7e4555e491096b | 5,448 | py | Python | mmdet/core/post_processing/bbox_nms.py | wobushishuiguo/Rotation-ship-detection | e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e | [
"Apache-2.0"
] | 1 | 2021-11-17T16:07:14.000Z | 2021-11-17T16:07:14.000Z | mmdet/core/post_processing/bbox_nms.py | wobushishuiguo/Rotation-ship-detection | e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e | [
"Apache-2.0"
] | null | null | null | mmdet/core/post_processing/bbox_nms.py | wobushishuiguo/Rotation-ship-detection | e49f2c7fd71d6f05b3d0fa6dd67ad751b306592e | [
"Apache-2.0"
] | null | null | null | import torch
from mmcv.ops.nms import batched_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 6)
else:
bboxes = multi_bboxes[:, None].expand(
multi_scores.size(0), num_classes, 4)
scores = multi_scores[:, :-1]
# filter out boxes with low scores
valid_mask = scores > score_thr
# We use masked_select for ONNX exporting purpose,
# which is equivalent to bboxes = bboxes[valid_mask]
# (TODO): as ONNX does not support repeat now,
# we have to use this ugly code
bboxes = torch.masked_select(
bboxes,
torch.stack((valid_mask, valid_mask, valid_mask, valid_mask, valid_mask, valid_mask),
-1)).view(-1, 6)
if score_factors is not None:
scores = scores * score_factors[:, None]
scores = torch.masked_select(scores, valid_mask)
labels = valid_mask.nonzero(as_tuple=False)[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 7))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
return bboxes, labels
dets, keep = batched_nms(bboxes[:, 0:4], scores, labels, nms_cfg)
ratioboxes = bboxes[:, 4:6]
ratioboxes = ratioboxes[keep]
dets = torch.cat([dets, ratioboxes], -1)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
return dets, labels[keep]
def fast_nms(multi_bboxes,
multi_scores,
multi_coeffs,
score_thr,
iou_thr,
top_k,
max_num=-1):
"""Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.
Fast NMS allows already-removed detections to suppress other detections so
that every instance can be decided to be kept or discarded in parallel,
which is not possible in traditional NMS. This relaxation allows us to
implement Fast NMS entirely in standard GPU-accelerated matrix operations.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class+1), where the last column
contains scores of the background class, but this will be ignored.
multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
iou_thr (float): IoU threshold to be considered as conflicted.
top_k (int): if there are more than top_k bboxes before NMS,
only top top_k will be kept.
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept. If -1, keep all the bboxes.
Default: -1.
Returns:
tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1),
and (k, coeffs_dim). Labels are 0-based.
"""
scores = multi_scores[:, :-1].t() # [#class, n]
scores, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k] # [#class, topk]
num_classes, num_dets = idx.size()
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=1)
# Now just filter out the ones higher than the threshold
keep = iou_max <= iou_thr
# Second thresholding introduces 0.2 mAP gain at negligible time cost
keep *= scores > score_thr
# Assign each kept detection to its corresponding class
classes = torch.arange(
num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
coeffs = coeffs[keep]
scores = scores[keep]
# Only keep the top max_num highest scores across all classes
scores, idx = scores.sort(0, descending=True)
if max_num > 0:
idx = idx[:max_num]
scores = scores[:max_num]
classes = classes[idx]
boxes = boxes[idx]
coeffs = coeffs[idx]
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
return cls_dets, classes, coeffs
| 36.563758 | 93 | 0.623715 | import torch
from mmcv.ops.nms import batched_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
num_classes = multi_scores.size(1) - 1
if multi_bboxes.shape[1] > 4:
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 6)
else:
bboxes = multi_bboxes[:, None].expand(
multi_scores.size(0), num_classes, 4)
scores = multi_scores[:, :-1]
valid_mask = scores > score_thr
bboxes = torch.masked_select(
bboxes,
torch.stack((valid_mask, valid_mask, valid_mask, valid_mask, valid_mask, valid_mask),
-1)).view(-1, 6)
if score_factors is not None:
scores = scores * score_factors[:, None]
scores = torch.masked_select(scores, valid_mask)
labels = valid_mask.nonzero(as_tuple=False)[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 7))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
if torch.onnx.is_in_onnx_export():
raise RuntimeError('[ONNX Error] Can not record NMS '
'as it has not been executed this time')
return bboxes, labels
dets, keep = batched_nms(bboxes[:, 0:4], scores, labels, nms_cfg)
ratioboxes = bboxes[:, 4:6]
ratioboxes = ratioboxes[keep]
dets = torch.cat([dets, ratioboxes], -1)
if max_num > 0:
dets = dets[:max_num]
keep = keep[:max_num]
return dets, labels[keep]
def fast_nms(multi_bboxes,
multi_scores,
multi_coeffs,
score_thr,
iou_thr,
top_k,
max_num=-1):
scores = multi_scores[:, :-1].t() s, idx = scores.sort(1, descending=True)
idx = idx[:, :top_k].contiguous()
scores = scores[:, :top_k] ses, num_dets = idx.size()
boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
iou = bbox_overlaps(boxes, boxes) onal=1)
iou_max, _ = iou.max(dim=1)
keep = iou_max <= iou_thr
keep *= scores > score_thr
classes = torch.arange(
num_classes, device=boxes.device)[:, None].expand_as(keep)
classes = classes[keep]
boxes = boxes[keep]
coeffs = coeffs[keep]
scores = scores[keep]
scores, idx = scores.sort(0, descending=True)
if max_num > 0:
idx = idx[:max_num]
scores = scores[:max_num]
classes = classes[idx]
boxes = boxes[idx]
coeffs = coeffs[idx]
cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
return cls_dets, classes, coeffs
| true | true |
1c31a51b2a4c958d1cf381597a29713110b80cf1 | 1,222 | py | Python | lya_data_structures.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | 2 | 2017-03-21T14:18:35.000Z | 2020-03-30T20:51:33.000Z | lya_data_structures.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | null | null | null | lya_data_structures.py | yishayv/lyacorr | deed114b4cadd4971caec68e2838a5fac39827b1 | [
"MIT"
] | null | null | null | class LyaForestTransmittance:
def __init__(self, ar_z, ar_transmittance, ar_pipeline_ivar, ar_fit):
"""
a simple wrapper for holding lya-forest data.
:type ar_z: np.array
:type ar_transmittance: np.array
:type ar_pipeline_ivar: np.array
"""
self.ar_z = ar_z
self.ar_transmittance = ar_transmittance
self.ar_ivar = ar_pipeline_ivar
self.ar_fit = ar_fit
class LyaForestTransmittanceBinned:
def __init__(self, ar_mask, ar_transmittance, ar_pipeline_ivar):
"""
a simple wrapper for holding lya-forest data.
:type ar_mask: np.array
:type ar_transmittance: np.array
:type ar_pipeline_ivar: np.array
"""
self.ar_mask = ar_mask
self.ar_transmittance = ar_transmittance
self.ar_ivar = ar_pipeline_ivar
class LyaForestDeltaT:
def __init__(self, ar_z, ar_delta_t, ar_delta_t_ivar):
"""
a simple wrapper for holding lya-forest data.
:type ar_z: np.array
:type ar_delta_t: np.array
:type ar_delta_t_ivar: np.array
"""
self.ar_z = ar_z
self.ar_delta_t = ar_delta_t
self.ar_ivar = ar_delta_t_ivar
| 31.333333 | 73 | 0.640753 | class LyaForestTransmittance:
def __init__(self, ar_z, ar_transmittance, ar_pipeline_ivar, ar_fit):
self.ar_z = ar_z
self.ar_transmittance = ar_transmittance
self.ar_ivar = ar_pipeline_ivar
self.ar_fit = ar_fit
class LyaForestTransmittanceBinned:
def __init__(self, ar_mask, ar_transmittance, ar_pipeline_ivar):
self.ar_mask = ar_mask
self.ar_transmittance = ar_transmittance
self.ar_ivar = ar_pipeline_ivar
class LyaForestDeltaT:
def __init__(self, ar_z, ar_delta_t, ar_delta_t_ivar):
self.ar_z = ar_z
self.ar_delta_t = ar_delta_t
self.ar_ivar = ar_delta_t_ivar
| true | true |
1c31a7a52f12b45d81c7942f0e107db7e9523bc7 | 2,521 | py | Python | MLImageSegmentation/makeImageDataSet.py | StevenHuang2020/OpencvPython | 42cde4880a50f7b3917027e6359485d3569bf40f | [
"MIT"
] | null | null | null | MLImageSegmentation/makeImageDataSet.py | StevenHuang2020/OpencvPython | 42cde4880a50f7b3917027e6359485d3569bf40f | [
"MIT"
] | null | null | null | MLImageSegmentation/makeImageDataSet.py | StevenHuang2020/OpencvPython | 42cde4880a50f7b3917027e6359485d3569bf40f | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
import pandas as pd
from ImageBase import cannyImg, grayImg, loadGrayImg
from skimage.filters import roberts, sobel, scharr, prewitt, gaussian, laplace, farid, median
from FeatureExtract.imageFeatures import garborFeature
from mainImagePlot import plotImagList
def makeImageFeatures(img):
df = pd.DataFrame()
img1 = img.reshape(-1)
print(img.shape, img1.shape)
df['Original Image'] = img1
# first set -Gabor features
for (name, feature) in garborFeature(img):
df[name] = feature.reshape(-1)
# Canny edge
df['Canny Edge'] = cannyImg(img).reshape(-1)
df['Roberts'] = roberts(img).reshape(-1)
df['Sobel'] = sobel(img).reshape(-1)
df['Scharr'] = scharr(img).reshape(-1)
df['Prewitt'] = prewitt(img).reshape(-1)
df['Gaussian s3'] = gaussian(img, sigma=3).reshape(-1)
df['Gaussian s5'] = gaussian(img, sigma=5).reshape(-1)
df['Gaussian s7'] = gaussian(img, sigma=7).reshape(-1)
df['Lplace'] = laplace(img).reshape(-1)
df['Farid'] = farid(img).reshape(-1)
df['Median'] = median(img).reshape(-1)
return df
def makeDb(img, mask, dbFile):
df = makeImageFeatures(img)
label = mask.reshape(-1)
df['Label'] = label
print(df.head())
df.to_csv(dbFile, index=False)
def loadDb(file):
df = pd.read_csv(file)
Y = df['Label'].values
X = df.drop(columns=['Label'])
# print(X.head())
return X, Y
def showImageFeatures(img):
img = grayImg(img)
ls, nameList = [], []
# ls.append(cannyImg(img)),nameList.append('Canny')
# ls.append(roberts(img)),nameList.append('roberts')
# ls.append(sobel(img)),nameList.append('sobel')
# ls.append(scharr(img)),nameList.append('scharr')
# ls.append(prewitt(img)),nameList.append('prewitt')
# ls.append(gaussian(img)),nameList.append('gaussian')
# ls.append(laplace(img)),nameList.append('laplace')
# ls.append(farid(img)),nameList.append('farid')
for (name, feature) in garborFeature(img):
ls.append(feature), nameList.append(name)
plotImagList(ls, nameList, gray=True, title='', showticks=False)
def main():
file = r'.\res\FudanPed00001.png'
maskFile = r'.\res\FudanPed00001_mask.png'
dbFile = r'.\res\FudanPed00001.csv'
# file = r'..\res\Lenna.png'
# img = loadImg(file)
img = loadGrayImg(file)
mask = loadGrayImg(maskFile)
makeDb(img, mask, dbFile)
# showImageFeatures(img)
# loadDb(dbFile)
if __name__ == '__main__':
main()
| 27.402174 | 93 | 0.645775 | import sys
sys.path.append('..')
import pandas as pd
from ImageBase import cannyImg, grayImg, loadGrayImg
from skimage.filters import roberts, sobel, scharr, prewitt, gaussian, laplace, farid, median
from FeatureExtract.imageFeatures import garborFeature
from mainImagePlot import plotImagList
def makeImageFeatures(img):
df = pd.DataFrame()
img1 = img.reshape(-1)
print(img.shape, img1.shape)
df['Original Image'] = img1
for (name, feature) in garborFeature(img):
df[name] = feature.reshape(-1)
df['Canny Edge'] = cannyImg(img).reshape(-1)
df['Roberts'] = roberts(img).reshape(-1)
df['Sobel'] = sobel(img).reshape(-1)
df['Scharr'] = scharr(img).reshape(-1)
df['Prewitt'] = prewitt(img).reshape(-1)
df['Gaussian s3'] = gaussian(img, sigma=3).reshape(-1)
df['Gaussian s5'] = gaussian(img, sigma=5).reshape(-1)
df['Gaussian s7'] = gaussian(img, sigma=7).reshape(-1)
df['Lplace'] = laplace(img).reshape(-1)
df['Farid'] = farid(img).reshape(-1)
df['Median'] = median(img).reshape(-1)
return df
def makeDb(img, mask, dbFile):
df = makeImageFeatures(img)
label = mask.reshape(-1)
df['Label'] = label
print(df.head())
df.to_csv(dbFile, index=False)
def loadDb(file):
df = pd.read_csv(file)
Y = df['Label'].values
X = df.drop(columns=['Label'])
return X, Y
def showImageFeatures(img):
img = grayImg(img)
ls, nameList = [], []
for (name, feature) in garborFeature(img):
ls.append(feature), nameList.append(name)
plotImagList(ls, nameList, gray=True, title='', showticks=False)
def main():
file = r'.\res\FudanPed00001.png'
maskFile = r'.\res\FudanPed00001_mask.png'
dbFile = r'.\res\FudanPed00001.csv'
img = loadGrayImg(file)
mask = loadGrayImg(maskFile)
makeDb(img, mask, dbFile)
if __name__ == '__main__':
main()
| true | true |
1c31a7c66db65397cfc105dab85a9c9a189b3059 | 25,804 | py | Python | bsz_gimp_lib.py | Beinsezii/bsz-gimp-plugins | bb735ddbf47fa4f2d383f4e359518c0809ab6e09 | [
"MIT"
] | 6 | 2020-12-03T14:50:32.000Z | 2022-02-04T03:15:44.000Z | bsz_gimp_lib.py | Beinsezii/bsz-gimp-plugins | bb735ddbf47fa4f2d383f4e359518c0809ab6e09 | [
"MIT"
] | null | null | null | bsz_gimp_lib.py | Beinsezii/bsz-gimp-plugins | bb735ddbf47fa4f2d383f4e359518c0809ab6e09 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Shared code between plugins.
Use python's help() for prettier help info.
"""
import gi
gi.require_version('Gimp', '3.0')
from gi.repository import Gimp
gi.require_version('Gegl', '0.4')
from gi.repository import Gegl
from gi.repository import GObject
from gi.repository import GLib
# from gi.repository import Gio
from abc import ABC, abstractmethod
# UI imports. Can't figure out a good way to only import these
# in INTERACTIVE mode while keeping ui stuff in the params.
gi.require_version('GimpUi', '3.0')
from gi.repository import GimpUi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # noqa: F401
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
import sys
import os.path
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import bszgw
import threading
import time
def PDB(procedure: str, *args):
# {{{
argsv = Gimp.ValueArray.new(len(args))
for num, arg in enumerate(args):
if isinstance(arg, str):
gtype = GObject.TYPE_STRING
elif isinstance(arg, Gimp.RunMode):
gtype = Gimp.RunMode
elif isinstance(arg, Gimp.Image):
gtype = Gimp.Image
elif isinstance(arg, Gimp.Drawable):
gtype = Gimp.Drawable
else:
raise ValueError("PDB Type not supported")
argsv.insert(num, GObject.Value(gtype, arg))
return Gimp.get_pdb().run_procedure_array(procedure, argsv)
# }}}
GEGL_COMPOSITORS = {
# {{{
"Source": "svg:src",
"Source-Atop": "svg:src-atop",
"Source-In": "svg:src-in",
"Source-Out": "svg:src-out",
"Source-Over": "svg:src-over",
"Destination": "svg:dst",
"Destination-Atop": "svg:dst-atop",
"Destination-In": "svg:dst-in",
"Destination-Out": "svg:dst-out",
"Destination-Over": "svg:dst-over",
"Lighten": "svg:lighten",
"Screen": "svg:screen",
"Color-Dodge": "svg:color-dodge",
"Add": "gegl:add",
"Plus": "svg:plus",
"Darken": "svg:darken",
"Multiply": "gegl:multiply",
"Color-Burn": "svg:color-burn",
"Overlay": "svg:overlay",
"Soft-Light": "gegl:soft-light",
"Hard-Light": "svg:hard-light",
"Difference": "svg:difference",
"Exclusion": "svg:exclusion",
"Subtract": "gegl:subtract",
"Divide": "gegl:divide",
"Gamma": "gegl:gamma",
"Seamless-Clone-Compose": "gegl:seamless-clone-compose",
"Weighted-Blend": "gegl:weighted-blend",
"Clear": "svg:clear",
"Xor": "svg:xor",
} # }}}
class Param(ABC):
# {{{
"""Abstract class taken by PlugIn."""
def __init__(self, name: str, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
self.name = name
if not description:
self.description = name
else:
self.description = description
self.ui_preview = ui_preview
self.ui_column = ui_column
self.ui_row = ui_row
self.ui_width = ui_width
self.ui_height = ui_height
self.value = value
self.__widget = None
def connect_preview(self, function: callable, *args):
"""Connects the widget's value change signal to the function
`pass` acceptable for widgets where it makes no sense"""
if self.ui_preview:
self.connect_changed(function, *args if args else ())
@abstractmethod
def create_widget(self):
"""Returns a new widget for param.
Mostly used internally for widget property."""
pass
@abstractmethod
def connect_changed(self, function: callable, *args):
"""Connects widget's appropriate value change signal to fn with args.
Mostly used internally for widget property."""
pass
def ui_reset(self):
"""Assuming ui_value properties are set up correctly,
there's no reason to implement this differently on a class-by-class basis."""
self.ui_value = self.value
@property
@abstractmethod
def gproperty(self):
"""Returns a dictionary containing the gproperty for the parameter."""
pass
@property
@abstractmethod
def ui_value(self):
"""Returns/sets ui value. Usually a shadow of bszgw.Widget.value.
'Why make this its own property instead of calling param.widget.value' you ask?
I intend to eventually use some gimp specific widgets when they're available"""
pass
@ui_value.setter
@abstractmethod
def ui_value(self, new):
pass
@property
def widget(self):
"""Readonly property containing the ui widget.
Will create the widget on first read."""
if self.__widget is None:
self.__widget = self.create_widget()
return self.__widget
# }}}
class ParamBool(Param):
# {{{
"""Creates a BSZGW CheckButton for booleans"""
def __init__(self, name: str, value: bool,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamBool, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.CheckButton(self.name, self.value)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(bool,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamCombo(Param):
# {{{
"""Creates a BSZGW ComboBox from a dictionary"""
def __init__(self, name: str, dictionary: dict, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamCombo, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.dictionary = dictionary
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.ComboBox.new_dict(
self.dictionary,
self.value,
show_ids=False,
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumber(Param):
# {{{
"""Creates a BSZGW Adjustment for numeric (float or int) parameters.
AKA a cool slider"""
def __init__(self, name: str, value: int, min, max,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
integer: bool = False,
ui_step: int = 1, ui_logarithmic: bool = False):
super(ParamNumber, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.min = min
self.max = max
self.integer = integer
self.ui_step = ui_step
self.ui_logarithmic = ui_logarithmic
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.SpinScale.new(
value=self.value,
min_value=self.min,
max_value=self.max,
step_increment=self.ui_step,
page_increment=self.ui_step,
label=self.name,
digits=0 if self.integer else 2,
logarithmic=self.ui_logarithmic
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(int if self.integer else float,
self.name,
self.description,
self.min, self.max, self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumberChain(Param):
# {{{
"""Creates a chain (checkbutton for now) linking two `ParamNumber`s
Note chain ui columns are *separate* from regular ui columns
Currently only visually good for chaining across-columns."""
def __init__(self, name: str, value: bool,
param1: ParamNumber, param2: ParamNumber,
description: str = "",
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamNumberChain, self).__init__(name, value,
description, False,
ui_column, ui_row,
ui_width, ui_height)
self.param1 = param1
self.param2 = param2
def create_widget(self):
self.param1.widget.adjustment.connect(
"value-changed", self.update, self.param1, self.param2)
self.param2.widget.adjustment.connect(
"value-changed", self.update, self.param2, self.param1)
widget = bszgw.CheckButton("Link", self.value)
widget.props.tooltip_text = self.description
return widget
# # Currently Gimp.ChainButton() is borked
# return GimpUi.ChainButton(active=self.value)
def connect_changed(self, function, *args):
pass
def update(self, widget, from_param, to_param):
"""copies values between params"""
if self.widget.get_active():
# using logarithmic scales can cause an update-loop
# thus we *double* check that the values aren't identical
# to avoid sending more signals
if to_param.ui_value != from_param.ui_value:
to_param.ui_value = from_param.ui_value
@property
def gproperty(self):
return None
@property
def ui_value(self):
return self.widget.get_active()
@ui_value.setter
def ui_value(self, new):
self.widget.set_active(new)
# }}}
class ParamString(Param):
# {{{
"""Creates a BSZGW Entry for inputting text."""
def __init__(self, name: str, value: str,
description: str = "", ui_preview: bool = False,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
ui_multiline: bool = False,
ui_min_width: int = 300, ui_min_height: int = 100):
super(ParamString, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.ui_multiline = ui_multiline
self.ui_min_width = ui_min_width
self.ui_min_height = ui_min_height
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.Entry(
value=self.value,
label=self.name,
multi_line=self.ui_multiline,
min_width=self.ui_min_width,
min_height=self.ui_min_height
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.name, # desc?
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class PreviewThread(threading.Thread):
# {{{
"""Runs `function` after self.request_preview has been called no more than
once in the last 0.5 seconds."""
def __init__(self, function, *args):
super(PreviewThread, self).__init__()
self.function = function
self.args = args
self.time = time.time()
self.active = True
self.request = True
def run(self):
"""Thread's main loop. Not called directly, use thread.start()"""
while self.active:
time.sleep(0.1)
if time.time() - self.time > 0.5 and self.request:
self.function(*self.args)
self.time = time.time()
self.request = False
def request_preview(self, *args):
self.request = True
self.time = time.time()
def stop(self, *args):
self.active = False
self.join()
# }}}
class PlugIn():
# {{{
"""Automatically creates a gimp plugin UI from given Param classes.
It's basically the old GimpFu but way cooler and more unstable.
Check out one of my scripts that uses it and you'll instantly go
\"ah it's like that\"."""
# Get & save properties
def __init__(self, name: str, function: callable, *params: Param,
description: str, alt_description: str = None,
gegl_preview: bool = True,
procedure_name: str = None, images: str = "RGB*",
path: str = "<Image>/Beinsezii/", icon=GimpUi.ICON_GEGL,
authors: str = "Beinsezii", copyright: str = None,
date: str = "2020"):
# {{{
if not procedure_name:
procedure_name = name.lower().replace(" ", "-")
if not alt_description:
alt_description = description
if not copyright:
copyright = authors
gproperties = {}
for param in params:
gproperty = param.gproperty
if gproperty:
gproperties.update(gproperty)
class Procedure(Gimp.PlugIn):
# {{{
"""The generated pdb procedure stuff. Class inside a class.
'Why not just have PlugIn inherit Gimp.PlugIn' you ask?
because it doesn't FUKKEN work. Believe me I *really* dislike this solution,
and I initially tried inheriting. The problem is the way gimp handles the class
when you feed it. If you super the init, it just crashes, and if you don't,
it works except for the fact that your entire init block is ignored,
so self.name and everything get unset. It's like it runs an uninitiated
PlugIn.run(), which I didn't even know was possible, since surely the
self required by run() needs to go through PlugIn's __init__, right?
If I figure out literally anything that's still an all-in-one builder solution
and looks nicer I'll replace it ASAP."""
# basically create a dict of parameters the plugin takes
__gproperties__ = gproperties
# GimpPlugIn virtual methods
# Not completely sure how they work
# Why do they have 'do_' in front
# when it's never mentioned in the gir docs?
def do_query_procedures(self2):
# {{{
# This section can also be used to provide translations,
# but I have no idea how it works or know any other languages
# so I'm going to ignore that for now.
# script name as it shows up in the PDB
return [procedure_name]
# }}}
def do_create_procedure(self2, name2):
# {{{
# Will almost always be ImageProcedure using PLUGIN proctype
procedure = Gimp.ImageProcedure.new(
self2, name2,
Gimp.PDBProcType.PLUGIN,
# name of function if something other than 'run'
self.run, None
)
# Supported colorspaces
procedure.set_image_types(images)
# Name in menu
procedure.set_menu_label(name)
# Icon. See Gimp-3.0.gir docs and gimp's icon folder for others
# Usually plugins based on Gegl operations use ICON_GEGL
# while the rest use ICON_SYSTEM_RUN
procedure.set_icon_name(icon)
# Location in the top menu, with <Image> being root
procedure.add_menu_path(path)
# Help text. First set is in-menu, second is PDB
procedure.set_documentation(
description,
alt_description,
name2
)
# Maker man
procedure.set_attribution(authors, copyright, date)
# add the gproperties to the procedures
for key in gproperties:
procedure.add_argument_from_property(self2, key)
return procedure
# }}}
# }}}
self.Procedure = Procedure
self.name = name
self.function = function
self.params = params
self.gegl_preview = gegl_preview
# }}}
# I decided to name the function called by the PDB procedure 'run'
def run(self, procedure, run_mode, image, n_drawables, drawables, args, run_data):
# convert the ValueArray into a regular list
if n_drawables != 1:
error = GLib.Error.new_literal(
Gimp.PlugIn.error_quark(),
"Procedure '{}' only works with one drawable.".format(procedure.get_name()),
0
)
return procedure.new_return_values(
Gimp.PDBStatusType.CALLING_ERROR,
error
)
else:
drawable = drawables[0]
args = [args.index(x) for x in range(args.length())]
# if no params and therefore no widgets always run non-interactive
if self.params == ():
run_mode = Gimp.RunMode.NONINTERACTIVE
# run_mode 'NONINTERACTIVE' is if another plugin calls it through PDB
if run_mode == Gimp.RunMode.NONINTERACTIVE:
self.function(image, drawable, *args)
# run_mode 'WITH_LAST_VALS' is when you use Ctrl-F aka 'Repeat'
# seems the gimp shelf isn't implemented yet?
if run_mode == Gimp.RunMode.WITH_LAST_VALS:
# {{{
PDB("gimp-message", "Repeate not supported yet")
run_mode = Gimp.RunMode.INTERACTIVE
# }}}
# run_mode 'INTERACTIVE' means clicked in the menu
if run_mode == Gimp.RunMode.INTERACTIVE:
# puts all ui params into a list
# ignors ui-specific params like chains
def ui_vals():
# {{{
vals = []
for param in self.params:
if not isinstance(param, ParamNumberChain):
vals.append(param.ui_value)
return vals
# }}}
# final run and destroy app.
# maybe it should only destroy if there's no preview?
def run_fn(widget):
# {{{
preview_thread.stop()
clear_preview()
image.undo_group_start()
self.function(image, drawable,
*ui_vals())
image.undo_group_end()
app.destroy()
# }}}
run_button = bszgw.Button("Run", run_fn)
def reset_fn(widget):
for param in self.params:
param.ui_reset()
reset_button = bszgw.Button("Reset", reset_fn)
Gegl.init(None)
self.buffer = drawable.get_buffer().dup()
self.has_preview = False
self.flush = False
# if any preview layers, delete them and thaw
# TODO: hide base layers when preview is up
def clear_preview(*args):
# {{{
if self.has_preview:
# self.drawable.buffer = self.buffer
intersect, x, y, width, height = drawable.mask_intersect()
if intersect:
Gegl.init(None)
tree = Gegl.Node()
target = drawable.get_buffer()
Input = tree.create_child("gegl:buffer-source")
Input.set_property("buffer", self.buffer)
Output = tree.create_child("gegl:write-buffer")
Output.set_property("buffer", target)
Input.link(Output)
Output.process()
if self.flush:
target.flush()
drawable.update(x, y, width, height)
Gimp.displays_flush()
self.has_preview = False
while not image.undo_is_enabled():
image.undo_thaw()
# }}}
# if preview function, get new preview layer[s] from
# self.preview_function and add them to self.preview_layers
def preview_fn(*args):
# {{{
if self.gegl_preview:
clear_preview()
if preview_check.value:
image.undo_freeze()
self.function(image, drawable, *ui_vals())
self.has_preview = True
# }}}
# creates preview_check, starts the live preview thread,
# and has the widgets connect to function
preview_thread = PreviewThread(preview_fn)
preview_thread.start()
if self.gegl_preview:
# {{{
preview_button = bszgw.Button("Update", preview_fn)
preview_button.props.hexpand = True
preview_check = bszgw.CheckButton("Preview", True)
def onclick(*args):
self.flush = not preview_check.value
preview_check.connect("clicked", onclick)
preview_check.connect("clicked", preview_fn)
for param in self.params:
param.connect_preview(preview_thread.request_preview)
# }}}
else:
preview_button = None
preview_check = None
# creates buttons box to avoid attaching buttons directly.
# reduces buggery with grid attach widths.
# Creates the main grid using attach_all for collision detection.
# Chains have separate columns since they're meant to be in-between
# widgets connecting them.
grid = bszgw.Grid()
grid.props.margin = 10
GC = bszgw.GridChild
children = []
max_off = 0
for param in self.params:
col = param.ui_column * 2
max_off = max(col, max_off)
if isinstance(param, ParamNumberChain):
col += 1
children.append(GC(param.widget,
col_off=col, row_off=param.ui_row,
width=param.ui_width,
height=param.ui_height))
buttons = bszgw.Grid()
buttons.props.column_homogeneous = True
if max_off > 0 and self.gegl_preview:
buttons.attach_all_right(preview_button, preview_check,
reset_button, run_button)
if self.gegl_preview:
buttons = GC(buttons, col_off=max_off - 2, width=3)
else:
buttons.attach_all_right(preview_button, preview_check)
buttons.attach_all_right(reset_button, run_button, row=1)
grid.attach_all_down(*children, buttons)
# create the app window with the grid
app = bszgw.App(self.name, grid)
# hints it as a pop-up instead of a full window.
app.props.type_hint = Gdk.WindowTypeHint.DIALOG
# clear preview on destroy
def destroy_fn(*args):
preview_thread.stop()
self.flush = True
clear_preview()
app.connect("destroy", destroy_fn)
# create preview before start
app.launch()
# Don't actually really know what this does but seems important
return procedure.new_return_values(
Gimp.PDBStatusType.SUCCESS, GLib.Error())
# }}}
| 35.29959 | 92 | 0.55317 |
import gi
gi.require_version('Gimp', '3.0')
from gi.repository import Gimp
gi.require_version('Gegl', '0.4')
from gi.repository import Gegl
from gi.repository import GObject
from gi.repository import GLib
from abc import ABC, abstractmethod
# in INTERACTIVE mode while keeping ui stuff in the params.
gi.require_version('GimpUi', '3.0')
from gi.repository import GimpUi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # noqa: F401
gi.require_version('Gdk', '3.0')
from gi.repository import Gdk
import sys
import os.path
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import bszgw
import threading
import time
def PDB(procedure: str, *args):
# {{{
argsv = Gimp.ValueArray.new(len(args))
for num, arg in enumerate(args):
if isinstance(arg, str):
gtype = GObject.TYPE_STRING
elif isinstance(arg, Gimp.RunMode):
gtype = Gimp.RunMode
elif isinstance(arg, Gimp.Image):
gtype = Gimp.Image
elif isinstance(arg, Gimp.Drawable):
gtype = Gimp.Drawable
else:
raise ValueError("PDB Type not supported")
argsv.insert(num, GObject.Value(gtype, arg))
return Gimp.get_pdb().run_procedure_array(procedure, argsv)
# }}}
GEGL_COMPOSITORS = {
# {{{
"Source": "svg:src",
"Source-Atop": "svg:src-atop",
"Source-In": "svg:src-in",
"Source-Out": "svg:src-out",
"Source-Over": "svg:src-over",
"Destination": "svg:dst",
"Destination-Atop": "svg:dst-atop",
"Destination-In": "svg:dst-in",
"Destination-Out": "svg:dst-out",
"Destination-Over": "svg:dst-over",
"Lighten": "svg:lighten",
"Screen": "svg:screen",
"Color-Dodge": "svg:color-dodge",
"Add": "gegl:add",
"Plus": "svg:plus",
"Darken": "svg:darken",
"Multiply": "gegl:multiply",
"Color-Burn": "svg:color-burn",
"Overlay": "svg:overlay",
"Soft-Light": "gegl:soft-light",
"Hard-Light": "svg:hard-light",
"Difference": "svg:difference",
"Exclusion": "svg:exclusion",
"Subtract": "gegl:subtract",
"Divide": "gegl:divide",
"Gamma": "gegl:gamma",
"Seamless-Clone-Compose": "gegl:seamless-clone-compose",
"Weighted-Blend": "gegl:weighted-blend",
"Clear": "svg:clear",
"Xor": "svg:xor",
} # }}}
class Param(ABC):
# {{{
def __init__(self, name: str, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
self.name = name
if not description:
self.description = name
else:
self.description = description
self.ui_preview = ui_preview
self.ui_column = ui_column
self.ui_row = ui_row
self.ui_width = ui_width
self.ui_height = ui_height
self.value = value
self.__widget = None
def connect_preview(self, function: callable, *args):
if self.ui_preview:
self.connect_changed(function, *args if args else ())
@abstractmethod
def create_widget(self):
pass
@abstractmethod
def connect_changed(self, function: callable, *args):
pass
def ui_reset(self):
self.ui_value = self.value
@property
@abstractmethod
def gproperty(self):
pass
@property
@abstractmethod
def ui_value(self):
pass
@ui_value.setter
@abstractmethod
def ui_value(self, new):
pass
@property
def widget(self):
if self.__widget is None:
self.__widget = self.create_widget()
return self.__widget
# }}}
class ParamBool(Param):
# {{{
def __init__(self, name: str, value: bool,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamBool, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.CheckButton(self.name, self.value)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(bool,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamCombo(Param):
# {{{
def __init__(self, name: str, dictionary: dict, value,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamCombo, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.dictionary = dictionary
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.ComboBox.new_dict(
self.dictionary,
self.value,
show_ids=False,
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.description,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumber(Param):
# {{{
def __init__(self, name: str, value: int, min, max,
description: str = "", ui_preview: bool = True,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
integer: bool = False,
ui_step: int = 1, ui_logarithmic: bool = False):
super(ParamNumber, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.min = min
self.max = max
self.integer = integer
self.ui_step = ui_step
self.ui_logarithmic = ui_logarithmic
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.SpinScale.new(
value=self.value,
min_value=self.min,
max_value=self.max,
step_increment=self.ui_step,
page_increment=self.ui_step,
label=self.name,
digits=0 if self.integer else 2,
logarithmic=self.ui_logarithmic
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(int if self.integer else float,
self.name,
self.description,
self.min, self.max, self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
# }}}
class ParamNumberChain(Param):
# {{{
def __init__(self, name: str, value: bool,
param1: ParamNumber, param2: ParamNumber,
description: str = "",
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1):
super(ParamNumberChain, self).__init__(name, value,
description, False,
ui_column, ui_row,
ui_width, ui_height)
self.param1 = param1
self.param2 = param2
def create_widget(self):
self.param1.widget.adjustment.connect(
"value-changed", self.update, self.param1, self.param2)
self.param2.widget.adjustment.connect(
"value-changed", self.update, self.param2, self.param1)
widget = bszgw.CheckButton("Link", self.value)
widget.props.tooltip_text = self.description
return widget
# # Currently Gimp.ChainButton() is borked
# return GimpUi.ChainButton(active=self.value)
def connect_changed(self, function, *args):
pass
def update(self, widget, from_param, to_param):
if self.widget.get_active():
# using logarithmic scales can cause an update-loop
# thus we *double* check that the values aren't identical
if to_param.ui_value != from_param.ui_value:
to_param.ui_value = from_param.ui_value
@property
def gproperty(self):
return None
@property
def ui_value(self):
return self.widget.get_active()
@ui_value.setter
def ui_value(self, new):
self.widget.set_active(new)
class ParamString(Param):
def __init__(self, name: str, value: str,
description: str = "", ui_preview: bool = False,
ui_column: int = 0, ui_row: int = 0,
ui_width: int = 1, ui_height: int = 1,
ui_multiline: bool = False,
ui_min_width: int = 300, ui_min_height: int = 100):
super(ParamString, self).__init__(name, value,
description, ui_preview,
ui_column, ui_row,
ui_width, ui_height)
self.ui_multiline = ui_multiline
self.ui_min_width = ui_min_width
self.ui_min_height = ui_min_height
def connect_changed(self, function, *args):
self.widget.connect_changed(function, *args)
def create_widget(self):
widget = bszgw.Entry(
value=self.value,
label=self.name,
multi_line=self.ui_multiline,
min_width=self.ui_min_width,
min_height=self.ui_min_height
)
widget.props.tooltip_text = self.description
return widget
@property
def gproperty(self):
return {self.name.lower().replace(' ', '-'):
(str,
self.name,
self.name,
self.value,
GObject.ParamFlags.READWRITE)
}
@property
def ui_value(self):
return self.widget.value
@ui_value.setter
def ui_value(self, new):
self.widget.value = new
class PreviewThread(threading.Thread):
def __init__(self, function, *args):
super(PreviewThread, self).__init__()
self.function = function
self.args = args
self.time = time.time()
self.active = True
self.request = True
def run(self):
while self.active:
time.sleep(0.1)
if time.time() - self.time > 0.5 and self.request:
self.function(*self.args)
self.time = time.time()
self.request = False
def request_preview(self, *args):
self.request = True
self.time = time.time()
def stop(self, *args):
self.active = False
self.join()
class PlugIn():
def __init__(self, name: str, function: callable, *params: Param,
description: str, alt_description: str = None,
gegl_preview: bool = True,
procedure_name: str = None, images: str = "RGB*",
path: str = "<Image>/Beinsezii/", icon=GimpUi.ICON_GEGL,
authors: str = "Beinsezii", copyright: str = None,
date: str = "2020"):
if not procedure_name:
procedure_name = name.lower().replace(" ", "-")
if not alt_description:
alt_description = description
if not copyright:
copyright = authors
gproperties = {}
for param in params:
gproperty = param.gproperty
if gproperty:
gproperties.update(gproperty)
class Procedure(Gimp.PlugIn):
__gproperties__ = gproperties
def do_query_procedures(self2):
# {{{
# This section can also be used to provide translations,
# but I have no idea how it works or know any other languages
# so I'm going to ignore that for now.
return [procedure_name]
def do_create_procedure(self2, name2):
procedure = Gimp.ImageProcedure.new(
self2, name2,
Gimp.PDBProcType.PLUGIN,
self.run, None
)
procedure.set_image_types(images)
procedure.set_menu_label(name)
# Usually plugins based on Gegl operations use ICON_GEGL
# while the rest use ICON_SYSTEM_RUN
procedure.set_icon_name(icon)
# Location in the top menu, with <Image> being root
procedure.add_menu_path(path)
# Help text. First set is in-menu, second is PDB
procedure.set_documentation(
description,
alt_description,
name2
)
# Maker man
procedure.set_attribution(authors, copyright, date)
# add the gproperties to the procedures
for key in gproperties:
procedure.add_argument_from_property(self2, key)
return procedure
# }}}
# }}}
self.Procedure = Procedure
self.name = name
self.function = function
self.params = params
self.gegl_preview = gegl_preview
# }}}
# I decided to name the function called by the PDB procedure 'run'
def run(self, procedure, run_mode, image, n_drawables, drawables, args, run_data):
# convert the ValueArray into a regular list
if n_drawables != 1:
error = GLib.Error.new_literal(
Gimp.PlugIn.error_quark(),
"Procedure '{}' only works with one drawable.".format(procedure.get_name()),
0
)
return procedure.new_return_values(
Gimp.PDBStatusType.CALLING_ERROR,
error
)
else:
drawable = drawables[0]
args = [args.index(x) for x in range(args.length())]
# if no params and therefore no widgets always run non-interactive
if self.params == ():
run_mode = Gimp.RunMode.NONINTERACTIVE
# run_mode 'NONINTERACTIVE' is if another plugin calls it through PDB
if run_mode == Gimp.RunMode.NONINTERACTIVE:
self.function(image, drawable, *args)
# run_mode 'WITH_LAST_VALS' is when you use Ctrl-F aka 'Repeat'
# seems the gimp shelf isn't implemented yet?
if run_mode == Gimp.RunMode.WITH_LAST_VALS:
PDB("gimp-message", "Repeate not supported yet")
run_mode = Gimp.RunMode.INTERACTIVE
if run_mode == Gimp.RunMode.INTERACTIVE:
def ui_vals():
vals = []
for param in self.params:
if not isinstance(param, ParamNumberChain):
vals.append(param.ui_value)
return vals
def run_fn(widget):
# {{{
preview_thread.stop()
clear_preview()
image.undo_group_start()
self.function(image, drawable,
*ui_vals())
image.undo_group_end()
app.destroy()
# }}}
run_button = bszgw.Button("Run", run_fn)
def reset_fn(widget):
for param in self.params:
param.ui_reset()
reset_button = bszgw.Button("Reset", reset_fn)
Gegl.init(None)
self.buffer = drawable.get_buffer().dup()
self.has_preview = False
self.flush = False
# if any preview layers, delete them and thaw
# TODO: hide base layers when preview is up
def clear_preview(*args):
# {{{
if self.has_preview:
# self.drawable.buffer = self.buffer
intersect, x, y, width, height = drawable.mask_intersect()
if intersect:
Gegl.init(None)
tree = Gegl.Node()
target = drawable.get_buffer()
Input = tree.create_child("gegl:buffer-source")
Input.set_property("buffer", self.buffer)
Output = tree.create_child("gegl:write-buffer")
Output.set_property("buffer", target)
Input.link(Output)
Output.process()
if self.flush:
target.flush()
drawable.update(x, y, width, height)
Gimp.displays_flush()
self.has_preview = False
while not image.undo_is_enabled():
image.undo_thaw()
# }}}
# if preview function, get new preview layer[s] from
# self.preview_function and add them to self.preview_layers
def preview_fn(*args):
# {{{
if self.gegl_preview:
clear_preview()
if preview_check.value:
image.undo_freeze()
self.function(image, drawable, *ui_vals())
self.has_preview = True
# }}}
# creates preview_check, starts the live preview thread,
# and has the widgets connect to function
preview_thread = PreviewThread(preview_fn)
preview_thread.start()
if self.gegl_preview:
# {{{
preview_button = bszgw.Button("Update", preview_fn)
preview_button.props.hexpand = True
preview_check = bszgw.CheckButton("Preview", True)
def onclick(*args):
self.flush = not preview_check.value
preview_check.connect("clicked", onclick)
preview_check.connect("clicked", preview_fn)
for param in self.params:
param.connect_preview(preview_thread.request_preview)
# }}}
else:
preview_button = None
preview_check = None
# creates buttons box to avoid attaching buttons directly.
# reduces buggery with grid attach widths.
# Creates the main grid using attach_all for collision detection.
# Chains have separate columns since they're meant to be in-between
grid = bszgw.Grid()
grid.props.margin = 10
GC = bszgw.GridChild
children = []
max_off = 0
for param in self.params:
col = param.ui_column * 2
max_off = max(col, max_off)
if isinstance(param, ParamNumberChain):
col += 1
children.append(GC(param.widget,
col_off=col, row_off=param.ui_row,
width=param.ui_width,
height=param.ui_height))
buttons = bszgw.Grid()
buttons.props.column_homogeneous = True
if max_off > 0 and self.gegl_preview:
buttons.attach_all_right(preview_button, preview_check,
reset_button, run_button)
if self.gegl_preview:
buttons = GC(buttons, col_off=max_off - 2, width=3)
else:
buttons.attach_all_right(preview_button, preview_check)
buttons.attach_all_right(reset_button, run_button, row=1)
grid.attach_all_down(*children, buttons)
app = bszgw.App(self.name, grid)
app.props.type_hint = Gdk.WindowTypeHint.DIALOG
def destroy_fn(*args):
preview_thread.stop()
self.flush = True
clear_preview()
app.connect("destroy", destroy_fn)
app.launch()
return procedure.new_return_values(
Gimp.PDBStatusType.SUCCESS, GLib.Error())
# }}}
| true | true |
1c31a9b4789e5bca40f40ecac816609a2e3661a5 | 6,527 | py | Python | backend/api/utils/profile_parse.py | hack4impact-uiuc/mentee | c56945db8051e798c7bf6703577a0e50a54b0d67 | [
"MIT"
] | 7 | 2020-10-03T22:45:38.000Z | 2021-10-02T09:54:40.000Z | backend/api/utils/profile_parse.py | hack4impact-uiuc/mentee | c56945db8051e798c7bf6703577a0e50a54b0d67 | [
"MIT"
] | 265 | 2020-10-01T20:06:27.000Z | 2022-02-27T12:18:55.000Z | backend/api/utils/profile_parse.py | hack4impact-uiuc/mentee | c56945db8051e798c7bf6703577a0e50a54b0d67 | [
"MIT"
] | 1 | 2020-10-06T19:57:37.000Z | 2020-10-06T19:57:37.000Z | from bson import ObjectId
from api.core import logger
from api.models import db, Education, Video, MentorProfile, MenteeProfile, Image
from api.utils.request_utils import imgur_client
from api.utils.constants import Account
def new_profile(data: dict = {}, profile_type: int = -1):
"""Parses data given by POST request
Args:
data (dict): POST Data. Defaults to {}.
profile_type (int): Type of account parsing. Defaults to -1
Returns:
MongoDB Model: Depending on type it returns the respective model object
"""
if not data or profile_type == -1:
return None
new_profile = None
if profile_type == Account.MENTOR:
new_profile = MentorProfile(
firebase_uid=data["firebase_uid"],
name=data["name"],
email=data["email"],
professional_title=data["professional_title"],
specializations=data["specializations"],
offers_in_person=data["offers_in_person"],
offers_group_appointments=data["offers_group_appointments"],
email_notifications=data.get("email_notifications", True),
text_notifications=data.get("text_notifications", False),
)
new_profile.website = data.get("website")
new_profile.linkedin = data.get("linkedin")
if "videos" in data:
video_data = data.get("videos")
new_profile.videos = [
Video(
title=video["title"],
url=video["url"],
tag=video["tag"],
date_uploaded=video["date_uploaded"],
)
for video in video_data
]
elif profile_type == Account.MENTEE:
new_profile = MenteeProfile(
firebase_uid=data["firebase_uid"],
name=data["name"],
# TODO: Change this to the actual email and remove default
email=data.get("email", "email@gmail.com"),
email_notifications=data.get("email_notifications", True),
text_notifications=data.get("text_notifications", False),
organization=data["organization"],
age=data["age"],
gender=data["gender"],
is_private=data.get("is_private", True),
)
if "video" in data:
video_data = data.get("video")
new_profile.video = Video(
title=video_data["title"],
url=video_data["url"],
tag=video_data["tag"],
date_uploaded=video_data["date_uploaded"],
)
else:
# There is not match with mentee/mentor
return None
new_profile.languages = data["languages"]
new_profile.biography = data.get("biography")
new_profile.phone_number = data.get("phone_number")
new_profile.location = data.get("location")
if "education" in data:
education_data = data.get("education")
new_profile.education = [
Education(
education_level=education.get("education_level"),
majors=education.get("majors"),
school=education.get("school"),
graduation_year=education.get("graduation_year"),
)
for education in education_data
]
return new_profile
def edit_profile(data: dict = {}, profile: object = None):
"""PUT Request Parsing
Args:
data (dict, optional): PUT Request data. Defaults to {}.
profile (MongoDB Model, optional): Edits the model in place. Defaults to None.
Returns:
Boolean: True if successful otherwise false
"""
if not data or not profile:
return False
if isinstance(profile, MentorProfile):
# Edit fields or keep original data if no added data
profile.professional_title = data.get(
"professional_title", profile.professional_title
)
profile.specializations = data.get("specializations", profile.specializations)
profile.offers_group_appointments = data.get(
"offers_group_appointments", profile.offers_group_appointments
)
profile.offers_in_person = data.get(
"offers_in_person", profile.offers_in_person
)
profile.linkedin = data.get("linkedin", profile.linkedin)
profile.website = data.get("website", profile.website)
# Create video objects for each item in list
if "videos" in data:
video_data = data.get("videos")
profile.videos = [
Video(
title=video.get("title"),
url=video.get("url"),
tag=video.get("tag"),
date_uploaded=video.get("date_uploaded"),
)
for video in video_data
]
elif isinstance(profile, MenteeProfile):
profile.age = data.get("age", profile.age)
profile.gender = data.get("gender", profile.gender)
profile.organization = data.get("organization", profile.organization)
profile.is_private = data.get("is_private", profile.is_private)
if "video" in data:
video_data = data.get("video")
profile.video = Video(
title=video_data.get("title"),
url=video_data.get("url"),
tag=video_data.get("tag"),
date_uploaded=video_data.get("date_uploaded"),
)
profile.name = data.get("name", profile.name)
profile.location = data.get("location", profile.location)
profile.email = data.get("email", profile.email)
profile.phone_number = data.get("phone_number", profile.phone_number)
profile.languages = data.get("languages", profile.languages)
profile.biography = data.get("biography", profile.biography)
profile.text_notifications = data.get(
"text_notifications", profile.text_notifications
)
profile.email_notifications = data.get(
"email_notifications", profile.email_notifications
)
# Create education object
if "education" in data:
education_data = data.get("education")
profile.education = [
Education(
education_level=education.get("education_level"),
majors=education.get("majors"),
school=education.get("school"),
graduation_year=education.get("graduation_year"),
)
for education in education_data
]
return True
| 36.668539 | 86 | 0.597978 | from bson import ObjectId
from api.core import logger
from api.models import db, Education, Video, MentorProfile, MenteeProfile, Image
from api.utils.request_utils import imgur_client
from api.utils.constants import Account
def new_profile(data: dict = {}, profile_type: int = -1):
if not data or profile_type == -1:
return None
new_profile = None
if profile_type == Account.MENTOR:
new_profile = MentorProfile(
firebase_uid=data["firebase_uid"],
name=data["name"],
email=data["email"],
professional_title=data["professional_title"],
specializations=data["specializations"],
offers_in_person=data["offers_in_person"],
offers_group_appointments=data["offers_group_appointments"],
email_notifications=data.get("email_notifications", True),
text_notifications=data.get("text_notifications", False),
)
new_profile.website = data.get("website")
new_profile.linkedin = data.get("linkedin")
if "videos" in data:
video_data = data.get("videos")
new_profile.videos = [
Video(
title=video["title"],
url=video["url"],
tag=video["tag"],
date_uploaded=video["date_uploaded"],
)
for video in video_data
]
elif profile_type == Account.MENTEE:
new_profile = MenteeProfile(
firebase_uid=data["firebase_uid"],
name=data["name"],
email=data.get("email", "email@gmail.com"),
email_notifications=data.get("email_notifications", True),
text_notifications=data.get("text_notifications", False),
organization=data["organization"],
age=data["age"],
gender=data["gender"],
is_private=data.get("is_private", True),
)
if "video" in data:
video_data = data.get("video")
new_profile.video = Video(
title=video_data["title"],
url=video_data["url"],
tag=video_data["tag"],
date_uploaded=video_data["date_uploaded"],
)
else:
return None
new_profile.languages = data["languages"]
new_profile.biography = data.get("biography")
new_profile.phone_number = data.get("phone_number")
new_profile.location = data.get("location")
if "education" in data:
education_data = data.get("education")
new_profile.education = [
Education(
education_level=education.get("education_level"),
majors=education.get("majors"),
school=education.get("school"),
graduation_year=education.get("graduation_year"),
)
for education in education_data
]
return new_profile
def edit_profile(data: dict = {}, profile: object = None):
if not data or not profile:
return False
if isinstance(profile, MentorProfile):
profile.professional_title = data.get(
"professional_title", profile.professional_title
)
profile.specializations = data.get("specializations", profile.specializations)
profile.offers_group_appointments = data.get(
"offers_group_appointments", profile.offers_group_appointments
)
profile.offers_in_person = data.get(
"offers_in_person", profile.offers_in_person
)
profile.linkedin = data.get("linkedin", profile.linkedin)
profile.website = data.get("website", profile.website)
if "videos" in data:
video_data = data.get("videos")
profile.videos = [
Video(
title=video.get("title"),
url=video.get("url"),
tag=video.get("tag"),
date_uploaded=video.get("date_uploaded"),
)
for video in video_data
]
elif isinstance(profile, MenteeProfile):
profile.age = data.get("age", profile.age)
profile.gender = data.get("gender", profile.gender)
profile.organization = data.get("organization", profile.organization)
profile.is_private = data.get("is_private", profile.is_private)
if "video" in data:
video_data = data.get("video")
profile.video = Video(
title=video_data.get("title"),
url=video_data.get("url"),
tag=video_data.get("tag"),
date_uploaded=video_data.get("date_uploaded"),
)
profile.name = data.get("name", profile.name)
profile.location = data.get("location", profile.location)
profile.email = data.get("email", profile.email)
profile.phone_number = data.get("phone_number", profile.phone_number)
profile.languages = data.get("languages", profile.languages)
profile.biography = data.get("biography", profile.biography)
profile.text_notifications = data.get(
"text_notifications", profile.text_notifications
)
profile.email_notifications = data.get(
"email_notifications", profile.email_notifications
)
if "education" in data:
education_data = data.get("education")
profile.education = [
Education(
education_level=education.get("education_level"),
majors=education.get("majors"),
school=education.get("school"),
graduation_year=education.get("graduation_year"),
)
for education in education_data
]
return True
| true | true |
1c31ab96d1d8e4573b9657d3792bd880da3d50ec | 9,249 | py | Python | adversarial_text/data/data_utils.py | zhangyingying94/models | bf46247b4207698bbeb315d9086eb81662015359 | [
"Apache-2.0"
] | 5 | 2019-03-25T12:36:37.000Z | 2022-02-06T16:36:17.000Z | adversarial_text/data/data_utils.py | zhangyingying94/models | bf46247b4207698bbeb315d9086eb81662015359 | [
"Apache-2.0"
] | null | null | null | adversarial_text/data/data_utils.py | zhangyingying94/models | bf46247b4207698bbeb315d9086eb81662015359 | [
"Apache-2.0"
] | 3 | 2018-01-05T18:31:24.000Z | 2022-02-06T16:36:20.000Z | # Copyright 2017 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for generating/preprocessing data for adversarial text models."""
import operator
import os
import random
import re
import tensorflow as tf
EOS_TOKEN = '</s>'
# Data filenames
# Sequence Autoencoder
ALL_SA = 'all_sa.tfrecords'
TRAIN_SA = 'train_sa.tfrecords'
TEST_SA = 'test_sa.tfrecords'
# Language Model
ALL_LM = 'all_lm.tfrecords'
TRAIN_LM = 'train_lm.tfrecords'
TEST_LM = 'test_lm.tfrecords'
# Classification
TRAIN_CLASS = 'train_classification.tfrecords'
TEST_CLASS = 'test_classification.tfrecords'
VALID_CLASS = 'validate_classification.tfrecords'
# LM with bidirectional LSTM
TRAIN_REV_LM = 'train_reverse_lm.tfrecords'
TEST_REV_LM = 'test_reverse_lm.tfrecords'
# Classification with bidirectional LSTM
TRAIN_BD_CLASS = 'train_bidir_classification.tfrecords'
TEST_BD_CLASS = 'test_bidir_classification.tfrecords'
VALID_BD_CLASS = 'validate_bidir_classification.tfrecords'
class ShufflingTFRecordWriter(object):
"""Thin wrapper around TFRecordWriter that shuffles records."""
def __init__(self, path):
self._path = path
self._records = []
self._closed = False
def write(self, record):
assert not self._closed
self._records.append(record)
def close(self):
assert not self._closed
random.shuffle(self._records)
with tf.python_io.TFRecordWriter(self._path) as f:
for record in self._records:
f.write(record)
self._closed = True
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.close()
class Timestep(object):
"""Represents a single timestep in a SequenceWrapper."""
def __init__(self, token, label, weight, multivalent_tokens=False):
"""Constructs Timestep from empty Features."""
self._token = token
self._label = label
self._weight = weight
self._multivalent_tokens = multivalent_tokens
self._fill_with_defaults()
@property
def token(self):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `tokens`')
return self._token.int64_list.value[0]
@property
def tokens(self):
return self._token.int64_list.value
@property
def label(self):
return self._label.int64_list.value[0]
@property
def weight(self):
return self._weight.float_list.value[0]
def set_token(self, token):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `add_token`')
self._token.int64_list.value[0] = token
return self
def add_token(self, token):
self._token.int64_list.value.append(token)
return self
def set_label(self, label):
self._label.int64_list.value[0] = label
return self
def set_weight(self, weight):
self._weight.float_list.value[0] = weight
return self
def copy_from(self, timestep):
self.set_token(timestep.token).set_label(timestep.label).set_weight(
timestep.weight)
return self
def _fill_with_defaults(self):
if not self._multivalent_tokens:
self._token.int64_list.value.append(0)
self._label.int64_list.value.append(0)
self._weight.float_list.value.append(0.0)
class SequenceWrapper(object):
"""Wrapper around tf.SequenceExample."""
F_TOKEN_ID = 'token_id'
F_LABEL = 'label'
F_WEIGHT = 'weight'
def __init__(self, multivalent_tokens=False):
self._seq = tf.train.SequenceExample()
self._flist = self._seq.feature_lists.feature_list
self._timesteps = []
self._multivalent_tokens = multivalent_tokens
@property
def seq(self):
return self._seq
@property
def multivalent_tokens(self):
return self._multivalent_tokens
@property
def _tokens(self):
return self._flist[SequenceWrapper.F_TOKEN_ID].feature
@property
def _labels(self):
return self._flist[SequenceWrapper.F_LABEL].feature
@property
def _weights(self):
return self._flist[SequenceWrapper.F_WEIGHT].feature
def add_timestep(self):
timestep = Timestep(
self._tokens.add(),
self._labels.add(),
self._weights.add(),
multivalent_tokens=self._multivalent_tokens)
self._timesteps.append(timestep)
return timestep
def __iter__(self):
for timestep in self._timesteps:
yield timestep
def __len__(self):
return len(self._timesteps)
def __getitem__(self, idx):
return self._timesteps[idx]
def build_reverse_sequence(seq):
"""Builds a sequence that is the reverse of the input sequence."""
reverse_seq = SequenceWrapper()
# Copy all but last timestep
for timestep in reversed(seq[:-1]):
reverse_seq.add_timestep().copy_from(timestep)
# Copy final timestep
reverse_seq.add_timestep().copy_from(seq[-1])
return reverse_seq
def build_bidirectional_seq(seq, rev_seq):
bidir_seq = SequenceWrapper(multivalent_tokens=True)
for forward_ts, reverse_ts in zip(seq, rev_seq):
bidir_seq.add_timestep().add_token(forward_ts.token).add_token(
reverse_ts.token)
return bidir_seq
def build_lm_sequence(seq):
"""Builds language model sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` tokens copied over to output sequence tokens and
labels (offset by 1, i.e. predict next token) with weights set to 1.0.
"""
lm_seq = SequenceWrapper()
for i, timestep in enumerate(seq[:-1]):
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i + 1].token).set_weight(1.0)
return lm_seq
def build_seq_ae_sequence(seq):
"""Builds seq_ae sequence from input sequence.
Args:
seq: SequenceWrapper.
Returns:
SequenceWrapper with `seq` inputs copied and concatenated, and with labels
copied in on the right-hand (i.e. decoder) side with weights set to 1.0.
The new sequence will have length `len(seq) * 2 - 1`, as the last timestep
of the encoder section and the first step of the decoder section will
overlap.
"""
seq_ae_seq = SequenceWrapper()
for i in range(len(seq) * 2 - 1):
ts = seq_ae_seq.add_timestep()
if i < len(seq) - 1:
# Encoder
ts.set_token(seq[i].token)
elif i == len(seq) - 1:
# Transition step
ts.set_token(seq[i].token)
ts.set_label(seq[0].token)
ts.set_weight(1.0)
else:
# Decoder
ts.set_token(seq[i % len(seq)].token)
ts.set_label(seq[(i + 1) % len(seq)].token)
ts.set_weight(1.0)
return seq_ae_seq
def build_labeled_sequence(seq, class_label, label_gain=False):
"""Builds labeled sequence from input sequence.
Args:
seq: SequenceWrapper.
class_label: bool.
label_gain: bool. If True, class_label will be put on every timestep and
weight will increase linearly from 0 to 1.
Returns:
SequenceWrapper with `seq` copied in and `class_label` added as label to
final timestep.
"""
label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens)
# Copy sequence without labels
seq_len = len(seq)
final_timestep = None
for i, timestep in enumerate(seq):
label_timestep = label_seq.add_timestep()
if seq.multivalent_tokens:
for token in timestep.tokens:
label_timestep.add_token(token)
else:
label_timestep.set_token(timestep.token)
if label_gain:
label_timestep.set_label(int(class_label))
weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1)
label_timestep.set_weight(weight)
if i == (seq_len - 1):
final_timestep = label_timestep
# Edit final timestep to have class label and weight = 1.
final_timestep.set_label(int(class_label)).set_weight(1.0)
return label_seq
def split_by_punct(segment):
"""Splits str segment by punctuation, filters our empties and spaces."""
return [s for s in re.split(r'\W+', segment) if s and not s.isspace()]
def sort_vocab_by_frequency(vocab_freq_map):
"""Sorts vocab_freq_map by count.
Args:
vocab_freq_map: dict<str term, int count>, vocabulary terms with counts.
Returns:
list<tuple<str term, int count>> sorted by count, descending.
"""
return sorted(
vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True)
def write_vocab_and_frequency(ordered_vocab_freqs, output_dir):
"""Writes ordered_vocab_freqs into vocab.txt and vocab_freq.txt."""
tf.gfile.MakeDirs(output_dir)
with open(os.path.join(output_dir, 'vocab.txt'), 'w') as vocab_f:
with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
| 28.284404 | 80 | 0.711969 |
import operator
import os
import random
import re
import tensorflow as tf
EOS_TOKEN = '</s>'
ALL_SA = 'all_sa.tfrecords'
TRAIN_SA = 'train_sa.tfrecords'
TEST_SA = 'test_sa.tfrecords'
ALL_LM = 'all_lm.tfrecords'
TRAIN_LM = 'train_lm.tfrecords'
TEST_LM = 'test_lm.tfrecords'
TRAIN_CLASS = 'train_classification.tfrecords'
TEST_CLASS = 'test_classification.tfrecords'
VALID_CLASS = 'validate_classification.tfrecords'
TRAIN_REV_LM = 'train_reverse_lm.tfrecords'
TEST_REV_LM = 'test_reverse_lm.tfrecords'
TRAIN_BD_CLASS = 'train_bidir_classification.tfrecords'
TEST_BD_CLASS = 'test_bidir_classification.tfrecords'
VALID_BD_CLASS = 'validate_bidir_classification.tfrecords'
class ShufflingTFRecordWriter(object):
def __init__(self, path):
self._path = path
self._records = []
self._closed = False
def write(self, record):
assert not self._closed
self._records.append(record)
def close(self):
assert not self._closed
random.shuffle(self._records)
with tf.python_io.TFRecordWriter(self._path) as f:
for record in self._records:
f.write(record)
self._closed = True
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.close()
class Timestep(object):
def __init__(self, token, label, weight, multivalent_tokens=False):
self._token = token
self._label = label
self._weight = weight
self._multivalent_tokens = multivalent_tokens
self._fill_with_defaults()
@property
def token(self):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `tokens`')
return self._token.int64_list.value[0]
@property
def tokens(self):
return self._token.int64_list.value
@property
def label(self):
return self._label.int64_list.value[0]
@property
def weight(self):
return self._weight.float_list.value[0]
def set_token(self, token):
if self._multivalent_tokens:
raise TypeError('Timestep may contain multiple values; use `add_token`')
self._token.int64_list.value[0] = token
return self
def add_token(self, token):
self._token.int64_list.value.append(token)
return self
def set_label(self, label):
self._label.int64_list.value[0] = label
return self
def set_weight(self, weight):
self._weight.float_list.value[0] = weight
return self
def copy_from(self, timestep):
self.set_token(timestep.token).set_label(timestep.label).set_weight(
timestep.weight)
return self
def _fill_with_defaults(self):
if not self._multivalent_tokens:
self._token.int64_list.value.append(0)
self._label.int64_list.value.append(0)
self._weight.float_list.value.append(0.0)
class SequenceWrapper(object):
F_TOKEN_ID = 'token_id'
F_LABEL = 'label'
F_WEIGHT = 'weight'
def __init__(self, multivalent_tokens=False):
self._seq = tf.train.SequenceExample()
self._flist = self._seq.feature_lists.feature_list
self._timesteps = []
self._multivalent_tokens = multivalent_tokens
@property
def seq(self):
return self._seq
@property
def multivalent_tokens(self):
return self._multivalent_tokens
@property
def _tokens(self):
return self._flist[SequenceWrapper.F_TOKEN_ID].feature
@property
def _labels(self):
return self._flist[SequenceWrapper.F_LABEL].feature
@property
def _weights(self):
return self._flist[SequenceWrapper.F_WEIGHT].feature
def add_timestep(self):
timestep = Timestep(
self._tokens.add(),
self._labels.add(),
self._weights.add(),
multivalent_tokens=self._multivalent_tokens)
self._timesteps.append(timestep)
return timestep
def __iter__(self):
for timestep in self._timesteps:
yield timestep
def __len__(self):
return len(self._timesteps)
def __getitem__(self, idx):
return self._timesteps[idx]
def build_reverse_sequence(seq):
reverse_seq = SequenceWrapper()
for timestep in reversed(seq[:-1]):
reverse_seq.add_timestep().copy_from(timestep)
reverse_seq.add_timestep().copy_from(seq[-1])
return reverse_seq
def build_bidirectional_seq(seq, rev_seq):
bidir_seq = SequenceWrapper(multivalent_tokens=True)
for forward_ts, reverse_ts in zip(seq, rev_seq):
bidir_seq.add_timestep().add_token(forward_ts.token).add_token(
reverse_ts.token)
return bidir_seq
def build_lm_sequence(seq):
lm_seq = SequenceWrapper()
for i, timestep in enumerate(seq[:-1]):
lm_seq.add_timestep().set_token(timestep.token).set_label(
seq[i + 1].token).set_weight(1.0)
return lm_seq
def build_seq_ae_sequence(seq):
seq_ae_seq = SequenceWrapper()
for i in range(len(seq) * 2 - 1):
ts = seq_ae_seq.add_timestep()
if i < len(seq) - 1:
ts.set_token(seq[i].token)
elif i == len(seq) - 1:
ts.set_token(seq[i].token)
ts.set_label(seq[0].token)
ts.set_weight(1.0)
else:
ts.set_token(seq[i % len(seq)].token)
ts.set_label(seq[(i + 1) % len(seq)].token)
ts.set_weight(1.0)
return seq_ae_seq
def build_labeled_sequence(seq, class_label, label_gain=False):
label_seq = SequenceWrapper(multivalent_tokens=seq.multivalent_tokens)
seq_len = len(seq)
final_timestep = None
for i, timestep in enumerate(seq):
label_timestep = label_seq.add_timestep()
if seq.multivalent_tokens:
for token in timestep.tokens:
label_timestep.add_token(token)
else:
label_timestep.set_token(timestep.token)
if label_gain:
label_timestep.set_label(int(class_label))
weight = 1.0 if seq_len < 2 else float(i) / (seq_len - 1)
label_timestep.set_weight(weight)
if i == (seq_len - 1):
final_timestep = label_timestep
final_timestep.set_label(int(class_label)).set_weight(1.0)
return label_seq
def split_by_punct(segment):
return [s for s in re.split(r'\W+', segment) if s and not s.isspace()]
def sort_vocab_by_frequency(vocab_freq_map):
return sorted(
vocab_freq_map.items(), key=operator.itemgetter(1), reverse=True)
def write_vocab_and_frequency(ordered_vocab_freqs, output_dir):
tf.gfile.MakeDirs(output_dir)
with open(os.path.join(output_dir, 'vocab.txt'), 'w') as vocab_f:
with open(os.path.join(output_dir, 'vocab_freq.txt'), 'w') as freq_f:
for word, freq in ordered_vocab_freqs:
vocab_f.write('{}\n'.format(word))
freq_f.write('{}\n'.format(freq))
| true | true |
1c31acb81b1d58970e393327abc0600198759062 | 184 | py | Python | graviteeio_cli/extensions/jinja_filters.py | Shaker5191/graviteeio-cli | 318748bb8e631743ea58afaee24333249ca3d227 | [
"Apache-2.0"
] | 12 | 2019-05-29T20:06:01.000Z | 2020-10-07T07:40:27.000Z | graviteeio_cli/extensions/jinja_filters.py | Shaker5191/graviteeio-cli | 318748bb8e631743ea58afaee24333249ca3d227 | [
"Apache-2.0"
] | 41 | 2019-11-04T18:18:18.000Z | 2021-04-22T16:12:51.000Z | graviteeio_cli/extensions/jinja_filters.py | gravitee-io/gravitee-cli | 8e3bf9f2c0c2873e0f6e67f8fcaf0d3b6c44b3ca | [
"Apache-2.0"
] | 6 | 2019-06-18T04:27:49.000Z | 2021-06-02T17:52:24.000Z | import yaml
def to_yaml(a, *args, **kw):
return yaml.safe_dump(a, default_flow_style=True).rstrip()
def filter_loader(environment):
environment.filters['toyaml'] = to_yaml
| 18.4 | 62 | 0.722826 | import yaml
def to_yaml(a, *args, **kw):
return yaml.safe_dump(a, default_flow_style=True).rstrip()
def filter_loader(environment):
environment.filters['toyaml'] = to_yaml
| true | true |
1c31acd628192da6c64c8e9b339c87b49429fe77 | 2,621 | py | Python | src/genetics.py | NDHall/pyvovlExtension | 521c7cbd340c6f90e4b6d9d1a1da1282020c9225 | [
"BSD-2-Clause-FreeBSD"
] | 67 | 2015-05-17T21:22:08.000Z | 2022-03-16T09:59:05.000Z | src/genetics.py | NDHall/pyvovlExtension | 521c7cbd340c6f90e4b6d9d1a1da1282020c9225 | [
"BSD-2-Clause-FreeBSD"
] | 24 | 2016-01-12T18:02:25.000Z | 2021-12-16T14:09:25.000Z | src/genetics.py | NDHall/pyvovlExtension | 521c7cbd340c6f90e4b6d9d1a1da1282020c9225 | [
"BSD-2-Clause-FreeBSD"
] | 26 | 2015-01-30T20:50:17.000Z | 2022-01-11T14:11:33.000Z | #! /usr/bin/env python
##############################################################################
## pyvolve: Python platform for simulating evolutionary sequences.
##
## Written by Stephanie J. Spielman (stephanie.spielman@gmail.com)
##############################################################################
'''
This module contains various genetic definitions and mappings used throughout pyvolve.
'''
class Genetics():
'''
Molecular alphabet objects.
'''
def __init__(self):
'''
Set up internally-used genetic code lists.
'''
self.pyrims = ["C", "T"]
self.purines = ["A", "G"]
self.nucleotides = ["A", "C", "G", "T"]
self.amino_acids = ["A", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "Y"]
self.genetic_code = [["GCA", "GCC", "GCG", "GCT"], ["TGC","TGT"], ["GAC", "GAT"], ["GAA", "GAG"], ["TTC", "TTT"], ["GGA", "GGC", "GGG", "GGT"], ["CAC", "CAT"], ["ATA", "ATC", "ATT"], ["AAA", "AAG"], ["CTA", "CTC", "CTG", "CTT", "TTA", "TTG"], ["ATG"], ["AAC", "AAT"], ["CCA", "CCC", "CCG", "CCT"], ["CAA", "CAG"], ["AGA", "AGG", "CGA", "CGC", "CGG", "CGT"] , ["AGC", "AGT", "TCA", "TCC", "TCG", "TCT"], ["ACA", "ACC", "ACG", "ACT"], ["GTA", "GTC", "GTG", "GTT"], ["TGG"], ["TAC", "TAT"]]
self.codon_dict = {"AAA":"K", "AAC":"N", "AAG":"K", "AAT":"N", "ACA":"T", "ACC":"T", "ACG":"T", "ACT":"T", "AGA":"R", "AGC":"S", "AGG":"R", "AGT":"S", "ATA":"I", "ATC":"I", "ATG":"M", "ATT":"I", "CAA":"Q", "CAC":"H", "CAG":"Q", "CAT":"H", "CCA":"P", "CCC":"P", "CCG":"P", "CCT":"P", "CGA":"R", "CGC":"R", "CGG":"R", "CGT":"R", "CTA":"L", "CTC":"L", "CTG":"L", "CTT":"L", "GAA":"E", "GAC":"D", "GAG":"E", "GAT":"D", "GCA":"A", "GCC":"A", "GCG":"A", "GCT":"A", "GGA":"G", "GGC":"G", "GGG":"G", "GGT":"G", "GTA":"V", "GTC":"V", "GTG":"V", "GTT":"V", "TAC":"Y", "TAT":"Y", "TCA":"S", "TCC":"S", "TCG":"S", "TCT":"S", "TGC":"C", "TGG":"W", "TGT":"C", "TTA":"L", "TTC":"F", "TTG":"L", "TTT":"F"}
self.codons = ["AAA", "AAC", "AAG", "AAT", "ACA", "ACC", "ACG", "ACT", "AGA", "AGC", "AGG", "AGT", "ATA", "ATC", "ATG", "ATT", "CAA", "CAC", "CAG", "CAT", "CCA", "CCC", "CCG", "CCT", "CGA", "CGC", "CGG", "CGT", "CTA", "CTC", "CTG", "CTT", "GAA", "GAC", "GAG", "GAT", "GCA", "GCC", "GCG", "GCT", "GGA", "GGC", "GGG", "GGT", "GTA", "GTC", "GTG", "GTT", "TAC", "TAT", "TCA", "TCC", "TCG", "TCT", "TGC", "TGG", "TGT", "TTA", "TTC", "TTG", "TTT"]
self.stop_codons = ["TAA", "TAG", "TGA"]
| 68.973684 | 699 | 0.395651 | true | true | |
1c31aef0f44271f91bc6d3c39f12903ee36bcf7e | 3,041 | py | Python | ikabot/function/searchForIslandSpaces.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | ikabot/function/searchForIslandSpaces.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | ikabot/function/searchForIslandSpaces.py | adaamz/ikabot | d243e612ba083a39f6efce15012d173aad693dc6 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import gettext
import traceback
import sys
from ikabot.config import *
from ikabot.helpers.botComm import *
from ikabot.helpers.gui import enter, enter
from ikabot.helpers.varios import wait
from ikabot.helpers.signals import setInfoSignal
from ikabot.helpers.pedirInfo import getIslandsIds
from ikabot.helpers.getJson import getIsland
from ikabot.helpers.process import set_child_mode
t = gettext.translation('searchForIslandSpaces',
localedir,
languages=languages,
fallback=True)
_ = t.gettext
def searchForIslandSpaces(session, event, stdin_fd):
"""
Parameters
----------
session : ikabot.web.session.Session
event : multiprocessing.Event
stdin_fd: int
"""
sys.stdin = os.fdopen(stdin_fd)
try:
if checkTelegramData(session) is False:
event.set()
return
banner()
print(_('I will search for new spaces each hour.'))
enter()
except KeyboardInterrupt:
event.set()
return
set_child_mode(session)
event.set()
info = _('\nI search for new spaces each hour\n')
setInfoSignal(session, info)
try:
do_it(session)
except:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
def do_it(session):
"""
Parameters
----------
session : ikabot.web.session.Session
"""
# this dict will contain all the cities from each island
# as they where in last scan
cities_before_per_island = {}
while True:
# this is done inside the loop because the user may colonize in a new island
islandsIds = getIslandsIds(session)
for islandId in islandsIds:
html = session.get(island_url + islandId)
island = getIsland(html)
# cities in the current island
cities_now = [city_space for city_space in island['cities'] if city_space['type'] != 'empty'] #loads the islands non empty cities into ciudades
# if we haven't scaned this island before,
# save it and do nothing
if islandId not in cities_before_per_island:
cities_before_per_island[islandId] = cities_now.copy()
else:
cities_before = cities_before_per_island[islandId]
# someone disappeared
for city_before in cities_before:
if city_before['id'] not in [ city_now['id'] for city_now in cities_now ]:
# we didn't find the city_before in the cities_now
msg = _('the city {} of the player {} disappeared in {} {}:{} {}').format(city_before['name'], city_before['Name'], materials_names[int(island['good'])], island['x'], island['y'], island['name'])
sendToBot(session, msg)
# someone colonised
for city_now in cities_now:
if city_now['id'] not in [ city_before['id'] for city_before in cities_before ]:
# we didn't find the city_now in the cities_before
msg = _('{} founded {} in {} {}:{} {}').format(city_now['Name'], city_now['name'], materials_names[int(island['good'])], island['x'], island['y'], island['name'])
sendToBot(session, msg)
wait(1*60*60)
| 31.030612 | 201 | 0.692207 |
import time
import gettext
import traceback
import sys
from ikabot.config import *
from ikabot.helpers.botComm import *
from ikabot.helpers.gui import enter, enter
from ikabot.helpers.varios import wait
from ikabot.helpers.signals import setInfoSignal
from ikabot.helpers.pedirInfo import getIslandsIds
from ikabot.helpers.getJson import getIsland
from ikabot.helpers.process import set_child_mode
t = gettext.translation('searchForIslandSpaces',
localedir,
languages=languages,
fallback=True)
_ = t.gettext
def searchForIslandSpaces(session, event, stdin_fd):
sys.stdin = os.fdopen(stdin_fd)
try:
if checkTelegramData(session) is False:
event.set()
return
banner()
print(_('I will search for new spaces each hour.'))
enter()
except KeyboardInterrupt:
event.set()
return
set_child_mode(session)
event.set()
info = _('\nI search for new spaces each hour\n')
setInfoSignal(session, info)
try:
do_it(session)
except:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
def do_it(session):
cities_before_per_island = {}
while True:
islandsIds = getIslandsIds(session)
for islandId in islandsIds:
html = session.get(island_url + islandId)
island = getIsland(html)
cities_now = [city_space for city_space in island['cities'] if city_space['type'] != 'empty']
# save it and do nothing
if islandId not in cities_before_per_island:
cities_before_per_island[islandId] = cities_now.copy()
else:
cities_before = cities_before_per_island[islandId]
# someone disappeared
for city_before in cities_before:
if city_before['id'] not in [ city_now['id'] for city_now in cities_now ]:
# we didn't find the city_before in the cities_now
msg = _('the city {} of the player {} disappeared in {} {}:{} {}').format(city_before['name'], city_before['Name'], materials_names[int(island['good'])], island['x'], island['y'], island['name'])
sendToBot(session, msg)
for city_now in cities_now:
if city_now['id'] not in [ city_before['id'] for city_before in cities_before ]:
msg = _('{} founded {} in {} {}:{} {}').format(city_now['Name'], city_now['name'], materials_names[int(island['good'])], island['x'], island['y'], island['name'])
sendToBot(session, msg)
wait(1*60*60)
| true | true |
1c31af208eee8a287bd5a28a478bc5bf5a3d37ba | 1,010 | py | Python | plugins/jokey_plugin.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | 2 | 2021-11-10T21:39:34.000Z | 2021-11-11T13:43:40.000Z | plugins/jokey_plugin.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | null | null | null | plugins/jokey_plugin.py | personGithubAccount/WitheredBot | 2b25a1da7796e94ff8e54f58adbf2e07e46e8bd4 | [
"MIT"
] | null | null | null | from libs.embed import (Embed)
from discord.ext import (commands)
from requests import get
class Joke(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def joke(self, ctx):
params: dict[str] = {}
params.update({'blacklistFlags': 'nsfw'})
params.update({'type': 'single'})
request = get('https://v2.jokeapi.dev/joke/Programming,Miscellaneous,Dark,Pun,Spooky?type=single', params=params).json()
if not request['error']:
joke = Embed(
title=f"{request['category']}",
description=f"{request['joke']}"
)
await ctx.send(joke.create)
else:
error = Embed(
title="Error",
description="Api is unavailable"
)
await ctx.send(error.create)
def setup(bot) -> dict:
return {
"Object": Joke(bot),
"name": "Jokey",
"description": "Gets a joke :wink:",
}
| 27.297297 | 128 | 0.541584 | from libs.embed import (Embed)
from discord.ext import (commands)
from requests import get
class Joke(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def joke(self, ctx):
params: dict[str] = {}
params.update({'blacklistFlags': 'nsfw'})
params.update({'type': 'single'})
request = get('https://v2.jokeapi.dev/joke/Programming,Miscellaneous,Dark,Pun,Spooky?type=single', params=params).json()
if not request['error']:
joke = Embed(
title=f"{request['category']}",
description=f"{request['joke']}"
)
await ctx.send(joke.create)
else:
error = Embed(
title="Error",
description="Api is unavailable"
)
await ctx.send(error.create)
def setup(bot) -> dict:
return {
"Object": Joke(bot),
"name": "Jokey",
"description": "Gets a joke :wink:",
}
| true | true |
1c31af3a0709854f81377b508f9890e2b1bdd318 | 526 | py | Python | HackerRank/Interview Preparation Kit/Time Complexity Primality.py | will-data/Self-Study-WIL | 69d627c65130fcfa23f27f97948a20107bb33394 | [
"MIT"
] | 1 | 2020-04-11T09:51:54.000Z | 2020-04-11T09:51:54.000Z | HackerRank/Interview Preparation Kit/Time Complexity Primality.py | will-data/Self-Study-WIL | 69d627c65130fcfa23f27f97948a20107bb33394 | [
"MIT"
] | null | null | null | HackerRank/Interview Preparation Kit/Time Complexity Primality.py | will-data/Self-Study-WIL | 69d627c65130fcfa23f27f97948a20107bb33394 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
import math
# Complete the primality function below.
def primality(n):
if n == 1: return('Not prime')
for i in range(2, math.floor(n**(1/2))+1):
if n % i == 0: return ('Not prime')
return('Prime')
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
p = int(input())
for p_itr in range(p):
n = int(input())
result = primality(n)
fptr.write(result + '\n')
fptr.close()
| 17.533333 | 47 | 0.585551 |
import math
import os
import random
import re
import sys
import math
def primality(n):
if n == 1: return('Not prime')
for i in range(2, math.floor(n**(1/2))+1):
if n % i == 0: return ('Not prime')
return('Prime')
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
p = int(input())
for p_itr in range(p):
n = int(input())
result = primality(n)
fptr.write(result + '\n')
fptr.close()
| true | true |
1c31b0c679e5815c7a8a471eea9e2ac5d661ea89 | 5,219 | py | Python | backend/api/v1/groups/serializers.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | null | null | null | backend/api/v1/groups/serializers.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | 7 | 2021-03-19T04:47:13.000Z | 2022-01-13T02:02:46.000Z | backend/api/v1/groups/serializers.py | donicrazy/ChatApp | ab129a9c0706bbb972cbce43283ba6e06d144635 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth import get_user_model
from backend.api.v1.profiles.serializers import ProfileSerializer
from backend.groups.models import (ChatGroup, GroupMembership, GroupMessage,
GroupMessageInfo)
User = get_user_model()
class MemberSerializer(serializers.ModelSerializer):
class Meta:
model = GroupMembership
fields = ("person", "group", "role", "date_joined")
class ProfileAsMemberSerializer(ProfileSerializer):
role = serializers.SerializerMethodField()
date_joined = serializers.SerializerMethodField()
@property
def group_id(self):
return self.context.get('group_id')
def get_membership(self, obj):
return GroupMembership.objects.get(
group__id=self.group_id,
person=obj
)
def get_role(self, obj):
return self.get_membership(obj).role
def get_date_joined(self, obj):
date = self.get_membership(obj).date_joined
return str(date)
class Meta:
model = ProfileSerializer.Meta.model
fields = ProfileSerializer.Meta.fields + ("role", "date_joined")
class GroupSerializer(serializers.ModelSerializer):
""" Group Serializer"""
messages = serializers.SerializerMethodField()
last_message = serializers.SerializerMethodField()
unread_count = serializers.SerializerMethodField()
members = serializers.SerializerMethodField()
messages_qs = None
@property
def user_id(self):
if self.context.get('requests'):
return self.context.get('request').query_params.get('user_id')
else:
return self.context.get('user_id')
def get_messages(self, obj):
self.messages_qs = obj.messages.all()
if self.user_id:
person = User.objects.get(id=self.user_id).profile
if self.context.get('filter') == 'unread':
self.messages_qs = person.group_messages.filter(
group=obj,
message_info__unread=True
)
elif self.context.get('filter') == 'stared':
self.messages_qs = person.group_messages.filter(
group=obj,
message_info__stared=True
)
if self.context.get('message_details'):
return GroupMessageSerializer(
self.messages_qs,
context={
"user_id": self.user_id
},
many=True
).data
else:
return GroupMessageSerializer(
obj.messages.none(),
context={
"user_id": self.user_id
},
many=True
).data
def get_last_message(self, _):
message = None
if len(self.messages_qs) > 0:
message = self.messages_qs.last()
return GroupMessageSerializer(
message,
context={
"user_id": self.user_id
}
).data
def get_unread_count(self, obj):
if self.user_id:
count = GroupMessageInfo.objects.filter(
message__group=obj,
person__id=self.user_id,
unread=True
).count()
return count
def get_members(self, obj):
return ProfileAsMemberSerializer(
obj.members.all(),
many=True,
read_only=True,
context={
"group_id": obj.id
}
).data
class Meta:
model = ChatGroup
fields = (
"name",
"id",
"slug",
"img",
"description",
"members",
"messages",
"last_message",
"unread_count"
)
class GroupMessageSerializer(serializers.ModelSerializer):
""" Message Serializer"""
unread = serializers.SerializerMethodField()
stared = serializers.SerializerMethodField()
avatar = serializers.SerializerMethodField()
sender_name = serializers.SerializerMethodField()
chat_id = serializers.IntegerField(source="group.id")
@property
def user_id(self):
if self.context.get('requests'):
return self.context.get('request').query_params.get('user_id')
else:
return self.context.get('user_id')
def get_message_info(self, obj):
return GroupMessageInfo.objects.get(
message=obj,
person__id=self.user_id
)
def get_sender_name(self, obj):
return obj.sender.user.username
def get_avatar(self, obj):
if obj.sender.avatar:
return obj.sender.avatar.url
def get_unread(self, obj):
return self.get_message_info(obj).unread
def get_stared(self, obj):
return self.get_message_info(obj).stared
class Meta:
model = GroupMessage
fields = (
"id",
"sender",
"sender_name",
"avatar",
"chat_id",
"text",
"date",
"unread",
"stared"
)
| 28.210811 | 76 | 0.567733 | from rest_framework import serializers
from django.contrib.auth import get_user_model
from backend.api.v1.profiles.serializers import ProfileSerializer
from backend.groups.models import (ChatGroup, GroupMembership, GroupMessage,
GroupMessageInfo)
User = get_user_model()
class MemberSerializer(serializers.ModelSerializer):
class Meta:
model = GroupMembership
fields = ("person", "group", "role", "date_joined")
class ProfileAsMemberSerializer(ProfileSerializer):
role = serializers.SerializerMethodField()
date_joined = serializers.SerializerMethodField()
@property
def group_id(self):
return self.context.get('group_id')
def get_membership(self, obj):
return GroupMembership.objects.get(
group__id=self.group_id,
person=obj
)
def get_role(self, obj):
return self.get_membership(obj).role
def get_date_joined(self, obj):
date = self.get_membership(obj).date_joined
return str(date)
class Meta:
model = ProfileSerializer.Meta.model
fields = ProfileSerializer.Meta.fields + ("role", "date_joined")
class GroupSerializer(serializers.ModelSerializer):
messages = serializers.SerializerMethodField()
last_message = serializers.SerializerMethodField()
unread_count = serializers.SerializerMethodField()
members = serializers.SerializerMethodField()
messages_qs = None
@property
def user_id(self):
if self.context.get('requests'):
return self.context.get('request').query_params.get('user_id')
else:
return self.context.get('user_id')
def get_messages(self, obj):
self.messages_qs = obj.messages.all()
if self.user_id:
person = User.objects.get(id=self.user_id).profile
if self.context.get('filter') == 'unread':
self.messages_qs = person.group_messages.filter(
group=obj,
message_info__unread=True
)
elif self.context.get('filter') == 'stared':
self.messages_qs = person.group_messages.filter(
group=obj,
message_info__stared=True
)
if self.context.get('message_details'):
return GroupMessageSerializer(
self.messages_qs,
context={
"user_id": self.user_id
},
many=True
).data
else:
return GroupMessageSerializer(
obj.messages.none(),
context={
"user_id": self.user_id
},
many=True
).data
def get_last_message(self, _):
message = None
if len(self.messages_qs) > 0:
message = self.messages_qs.last()
return GroupMessageSerializer(
message,
context={
"user_id": self.user_id
}
).data
def get_unread_count(self, obj):
if self.user_id:
count = GroupMessageInfo.objects.filter(
message__group=obj,
person__id=self.user_id,
unread=True
).count()
return count
def get_members(self, obj):
return ProfileAsMemberSerializer(
obj.members.all(),
many=True,
read_only=True,
context={
"group_id": obj.id
}
).data
class Meta:
model = ChatGroup
fields = (
"name",
"id",
"slug",
"img",
"description",
"members",
"messages",
"last_message",
"unread_count"
)
class GroupMessageSerializer(serializers.ModelSerializer):
unread = serializers.SerializerMethodField()
stared = serializers.SerializerMethodField()
avatar = serializers.SerializerMethodField()
sender_name = serializers.SerializerMethodField()
chat_id = serializers.IntegerField(source="group.id")
@property
def user_id(self):
if self.context.get('requests'):
return self.context.get('request').query_params.get('user_id')
else:
return self.context.get('user_id')
def get_message_info(self, obj):
return GroupMessageInfo.objects.get(
message=obj,
person__id=self.user_id
)
def get_sender_name(self, obj):
return obj.sender.user.username
def get_avatar(self, obj):
if obj.sender.avatar:
return obj.sender.avatar.url
def get_unread(self, obj):
return self.get_message_info(obj).unread
def get_stared(self, obj):
return self.get_message_info(obj).stared
class Meta:
model = GroupMessage
fields = (
"id",
"sender",
"sender_name",
"avatar",
"chat_id",
"text",
"date",
"unread",
"stared"
)
| true | true |
1c31b1b16df1630363132b378573133e697d96b7 | 2,452 | py | Python | macdaily/cmd/logging.py | JarryShaw/MacDaily | 853b841dd1f1f7e6aae7bf2c305ff008bc76055c | [
"BSD-3-Clause"
] | 10 | 2018-09-20T19:57:56.000Z | 2021-11-14T18:28:10.000Z | macdaily/cmd/logging.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | 2 | 2020-05-31T08:49:47.000Z | 2021-12-28T16:57:42.000Z | macdaily/cmd/logging.py | JarryShaw/jsdaily | 3ca7aa7c75a12dc08ab44f78af2b089e1ed41d3d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import abc
import os
from macdaily.cls.command import Command
from macdaily.util.compat import pathlib
from macdaily.util.tools.print import print_info
class LoggingCommand(Command):
@property
@abc.abstractmethod
def log(self):
return NotImplemented
@property
@abc.abstractmethod
def ext(self):
return NotImplemented
@property
def cmd(self):
return 'logging'
@property
def act(self):
return ('log', 'logged', 'recorded')
@property
def job(self):
return ('logging', 'logging')
@property
def sample(self):
return os.path.join(self._logroot, f'{self.log}{self.ext}')
@property
def packages(self):
return NotImplemented
@property
def ignored(self):
return NotImplemented
@property
def failed(self):
return NotImplemented
@property
def notfound(self):
return NotImplemented
def __init__(self, namespace, filename, timeout, confirm, # pylint: disable=super-init-not-called
askpass, password, disk_dir, brew_renew=None):
self._qflag = namespace.get('quiet', False)
self._vflag = self._qflag or (not namespace.get('verbose', False))
text = f'Running {self.cmd} command for {self.mode}'
print_info(text, filename, redirect=self._qflag)
# assign members
self._file = filename
self._timeout = timeout
self._confirm = confirm
self._askpass = askpass
self._password = password
self._disk_dir = disk_dir
self._brew_renew = brew_renew
self._logroot = str(pathlib.Path(filename).resolve().parents[1])
# exit if no executable found
if self._check_exec():
# mainloop process
self._pkg_args(namespace)
self._loc_exec()
self._run_proc()
# remove temp vars
[delattr(self, attr) for attr in filter(lambda s: s.startswith('_var_'), dir(self))] # pylint: disable=expression-not-assigned
def _pkg_args(self, namespace):
return self._parse_args(namespace)
def _run_proc(self):
for path in self._exec:
text = f'Using {self.name} executable {path!r}'
print_info(text, self._file, redirect=self._qflag)
self._proc_logging(path)
@abc.abstractmethod
def _proc_logging(self, path):
pass
| 25.810526 | 135 | 0.625204 |
import abc
import os
from macdaily.cls.command import Command
from macdaily.util.compat import pathlib
from macdaily.util.tools.print import print_info
class LoggingCommand(Command):
@property
@abc.abstractmethod
def log(self):
return NotImplemented
@property
@abc.abstractmethod
def ext(self):
return NotImplemented
@property
def cmd(self):
return 'logging'
@property
def act(self):
return ('log', 'logged', 'recorded')
@property
def job(self):
return ('logging', 'logging')
@property
def sample(self):
return os.path.join(self._logroot, f'{self.log}{self.ext}')
@property
def packages(self):
return NotImplemented
@property
def ignored(self):
return NotImplemented
@property
def failed(self):
return NotImplemented
@property
def notfound(self):
return NotImplemented
def __init__(self, namespace, filename, timeout, confirm,
askpass, password, disk_dir, brew_renew=None):
self._qflag = namespace.get('quiet', False)
self._vflag = self._qflag or (not namespace.get('verbose', False))
text = f'Running {self.cmd} command for {self.mode}'
print_info(text, filename, redirect=self._qflag)
self._file = filename
self._timeout = timeout
self._confirm = confirm
self._askpass = askpass
self._password = password
self._disk_dir = disk_dir
self._brew_renew = brew_renew
self._logroot = str(pathlib.Path(filename).resolve().parents[1])
if self._check_exec():
self._pkg_args(namespace)
self._loc_exec()
self._run_proc()
[delattr(self, attr) for attr in filter(lambda s: s.startswith('_var_'), dir(self))]
def _pkg_args(self, namespace):
return self._parse_args(namespace)
def _run_proc(self):
for path in self._exec:
text = f'Using {self.name} executable {path!r}'
print_info(text, self._file, redirect=self._qflag)
self._proc_logging(path)
@abc.abstractmethod
def _proc_logging(self, path):
pass
| true | true |
1c31b332cd2f592824567873d1679df2de07496c | 3,592 | py | Python | CD_Installer.py | GascaK/CosmoDispatch | 2ffcf67116719d768db1215d11026c06e1188f11 | [
"MIT"
] | 1 | 2019-07-21T10:30:00.000Z | 2019-07-21T10:30:00.000Z | CD_Installer.py | GascaK/CosmoDispatch | 2ffcf67116719d768db1215d11026c06e1188f11 | [
"MIT"
] | null | null | null | CD_Installer.py | GascaK/CosmoDispatch | 2ffcf67116719d768db1215d11026c06e1188f11 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import tkinter as tk
from tkinter import ttk
class CDUpdate(tk.Tk):
def __init__(self, update_file):
""" Cosmo Dispatch Update/Installer class.
Update the CosmoDispatch application source code
with information provided in predefined format from
inside CD_update.txt file. This is to circumvent
limitations in the ability to send .py extensions
over email.
Noteable Variables
------------------------------
update_file - string
File to load for update. Usually 'CD_update.txt'
"""
# Standard location of local.
location = ''
tk.Tk.__init__(self)
frame = tk.Frame()
frame.pack()
entry = tk.Text(frame, width=50, height=10)
entry.pack()
update_notes = False
try:
with open(update_file, 'r') as u_file:
for line in u_file:
if line[:9] == '[UpNotes]' or update_notes == True:
if line[:9] == '[UpNotes]':
update_notes = True
version = line[9:]
self.title(f'Update # {version}')
continue
elif line[:9] == '[DnNotes]':
update_notes = False
continue
# Display Update Notes.
entry.insert(tk.END, line)
# Locate File Load location. Will return
elif line[:4] == '[FL]':
location = line[4:]
try:
os.remove(location[:-1])
# Catch FileNotFoundError while attempting to delete
# file at location. Continue as this works out.
except FileNotFoundError:
continue
continue # skip the location line
# If location is not empty continue. Otherwise skip
# line assignment.
if location is not '':
self.run_update(location[:-1], line)
# Catch FileNotFoundError while attempting to open CD_update.txt
except FileNotFoundError:
print('File was not located. Verify CD_update.'
'txt was downloaded correctly')
def run_update(self, location, line):
""" Update file at location with line
Write line by line to the specific location listed.
Point of friction as opening and closing the file seems
tedious and may need refractoring.
Noteable Variables
------------------------------
location - string
Location of file to be updated. If not found, create file
then add 'line'.
line - string
String to append to file.
"""
try:
up_file = open(location, 'a')
# Catch FileNotFoundError, if true then Create file first then
# write line.
except FileNotFoundError:
result = location.find('\\')
if not os.path.exists(location[:result]):
print(f'Creating Directory: {location[:result]}')
os.makedirs(location[:result])
# Create file
up_file = open(location, 'w')
finally:
up_file.write(line)
up_file.close()
if __name__ == '__main__':
cdi = CDUpdate('CD_update.txt')
cdi.mainloop()
| 36.653061 | 76 | 0.505846 |
import os
import tkinter as tk
from tkinter import ttk
class CDUpdate(tk.Tk):
def __init__(self, update_file):
location = ''
tk.Tk.__init__(self)
frame = tk.Frame()
frame.pack()
entry = tk.Text(frame, width=50, height=10)
entry.pack()
update_notes = False
try:
with open(update_file, 'r') as u_file:
for line in u_file:
if line[:9] == '[UpNotes]' or update_notes == True:
if line[:9] == '[UpNotes]':
update_notes = True
version = line[9:]
self.title(f'Update # {version}')
continue
elif line[:9] == '[DnNotes]':
update_notes = False
continue
entry.insert(tk.END, line)
elif line[:4] == '[FL]':
location = line[4:]
try:
os.remove(location[:-1])
except FileNotFoundError:
continue
continue
if location is not '':
self.run_update(location[:-1], line)
except FileNotFoundError:
print('File was not located. Verify CD_update.'
'txt was downloaded correctly')
def run_update(self, location, line):
try:
up_file = open(location, 'a')
except FileNotFoundError:
result = location.find('\\')
if not os.path.exists(location[:result]):
print(f'Creating Directory: {location[:result]}')
os.makedirs(location[:result])
up_file = open(location, 'w')
finally:
up_file.write(line)
up_file.close()
if __name__ == '__main__':
cdi = CDUpdate('CD_update.txt')
cdi.mainloop()
| true | true |
1c31b46de7c9e75374ebbdfe1bd81e71b65d2e42 | 15,073 | py | Python | trader/client.py | donaldng/bitfinex-arbitrage | 1a36ea6f354fbf2954e4341ec75a90ad208f60d1 | [
"MIT"
] | 25 | 2017-10-22T14:56:02.000Z | 2022-03-10T00:30:00.000Z | client.py | donaldng/binance_listing | 1d1612ecb57225a3b3c0854721dd9bac4b3a140a | [
"MIT"
] | 1 | 2017-09-30T03:25:43.000Z | 2017-09-30T07:40:29.000Z | client.py | donaldng/binance_listing | 1d1612ecb57225a3b3c0854721dd9bac4b3a140a | [
"MIT"
] | 8 | 2017-10-25T15:02:55.000Z | 2021-05-14T09:20:09.000Z | from __future__ import absolute_import
import requests
import json
import base64
import hmac
import hashlib
import time
PROTOCOL = "https"
HOST = "api.bitfinex.com"
VERSION = "v1"
PATH_SYMBOLS = "symbols"
PATH_TICKER = "ticker/%s"
PATH_TODAY = "today/%s"
PATH_STATS = "stats/%s"
PATH_LENDBOOK = "lendbook/%s"
PATH_ORDERBOOK = "book/%s"
# HTTP request timeout in seconds
TIMEOUT = 5.0
class TradeClient:
"""
Authenticated client for trading through Bitfinex API
"""
def __init__(self, key, secret):
self.URL = "{0:s}://{1:s}/{2:s}".format(PROTOCOL, HOST, VERSION)
self.KEY = key
self.SECRET = secret
pass
@property
def _nonce(self):
"""
Returns a nonce
Used in authentication
"""
return str(time.time() * 1000000)
def _sign_payload(self, payload):
j = json.dumps(payload)
data = base64.standard_b64encode(j.encode('utf8'))
h = hmac.new(self.SECRET.encode('utf8'), data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.KEY,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data
}
def place_order(self, amount, price, side, ord_type, symbol='btcusd', exchange='bitfinex'):
"""
Submit a new order.
:param amount:
:param price:
:param side:
:param ord_type:
:param symbol:
:param exchange:
:return:
"""
payload = {
"request": "/v1/order/new",
"nonce": self._nonce,
"symbol": symbol,
"amount": amount,
"price": price,
"exchange": exchange,
"side": side,
"type": ord_type
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/new", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['order_id']
except:
return json_resp['message']
return json_resp
def delete_order(self, order_id):
"""
Cancel an order.
:param order_id:
:return:
"""
payload = {
"request": "/v1/order/cancel",
"nonce": self._nonce,
"order_id": order_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/cancel", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['avg_execution_price']
except:
return json_resp['message']
return json_resp
def delete_all_orders(self):
"""
Cancel all orders.
:return:
"""
payload = {
"request": "/v1/order/cancel/all",
"nonce": self._nonce,
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/cancel/all", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def status_order(self, order_id):
"""
Get the status of an order. Is it active? Was it cancelled? To what extent has it been executed? etc.
:param order_id:
:return:
"""
payload = {
"request": "/v1/order/status",
"nonce": self._nonce,
"order_id": order_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/status", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['avg_execution_price']
except:
return json_resp['message']
return json_resp
def active_orders(self):
"""
Fetch active orders
"""
payload = {
"request": "/v1/orders",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/orders", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def active_positions(self):
"""
Fetch active Positions
"""
payload = {
"request": "/v1/positions",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/positions", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def claim_position(self, position_id):
"""
Claim a position.
:param position_id:
:return:
"""
payload = {
"request": "/v1/position/claim",
"nonce": self._nonce,
"position_id": position_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/position/claim", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def past_trades(self, timestamp=0, symbol='btcusd'):
"""
Fetch past trades
:param timestamp:
:param symbol:
:return:
"""
payload = {
"request": "/v1/mytrades",
"nonce": self._nonce,
"symbol": symbol,
"timestamp": timestamp
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/mytrades", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def place_offer(self, currency, amount, rate, period, direction):
"""
:param currency:
:param amount:
:param rate:
:param period:
:param direction:
:return:
"""
payload = {
"request": "/v1/offer/new",
"nonce": self._nonce,
"currency": currency,
"amount": amount,
"rate": rate,
"period": period,
"direction": direction
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/new", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def cancel_offer(self, offer_id):
"""
:param offer_id:
:return:
"""
payload = {
"request": "/v1/offer/cancel",
"nonce": self._nonce,
"offer_id": offer_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/cancel", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def status_offer(self, offer_id):
"""
:param offer_id:
:return:
"""
payload = {
"request": "/v1/offer/status",
"nonce": self._nonce,
"offer_id": offer_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/status", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def active_offers(self):
"""
Fetch active_offers
:return:
"""
payload = {
"request": "/v1/offers",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offers", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def balances(self):
"""
Fetch balances
:return:
"""
payload = {
"request": "/v1/balances",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/balances", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def history(self, currency, since=0, until=9999999999, limit=500, wallet='exchange'):
"""
View you balance ledger entries
:param currency: currency to look for
:param since: Optional. Return only the history after this timestamp.
:param until: Optional. Return only the history before this timestamp.
:param limit: Optional. Limit the number of entries to return. Default is 500.
:param wallet: Optional. Return only entries that took place in this wallet. Accepted inputs are: “trading”,
“exchange”, “deposit”.
"""
payload = {
"request": "/v1/history",
"nonce": self._nonce,
"currency": currency,
"since": since,
"until": until,
"limit": limit,
"wallet": wallet
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/history", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
class Client:
"""
Client for the bitfinex.com API.
See https://www.bitfinex.com/pages/api for API documentation.
"""
def server(self):
return u"{0:s}://{1:s}/{2:s}".format(PROTOCOL, HOST, VERSION)
def url_for(self, path, path_arg=None, parameters=None):
# build the basic url
url = "%s/%s" % (self.server(), path)
# If there is a path_arh, interpolate it into the URL.
# In this case the path that was provided will need to have string
# interpolation characters in it, such as PATH_TICKER
if path_arg:
url = url % (path_arg)
# Append any parameters to the URL.
if parameters:
url = "%s?%s" % (url, self._build_parameters(parameters))
return url
def symbols(self):
"""
GET /symbols
curl https://api.bitfinex.com/v1/symbols
['btcusd','ltcusd','ltcbtc']
"""
return self._get(self.url_for(PATH_SYMBOLS))
def ticker(self, symbol):
"""
GET /ticker/:symbol
curl https://api.bitfinex.com/v1/ticker/btcusd
{
'ask': '562.9999',
'timestamp': '1395552290.70933607',
'bid': '562.25',
'last_price': u'562.25',
'mid': u'562.62495'}
"""
data = self._get(self.url_for(PATH_TICKER, (symbol)))
# convert all values to floats
return self._convert_to_floats(data)
def today(self, symbol):
"""
GET /today/:symbol
curl "https://api.bitfinex.com/v1/today/btcusd"
{"low":"550.09","high":"572.2398","volume":"7305.33119836"}
"""
data = self._get(self.url_for(PATH_TODAY, (symbol)))
# convert all values to floats
return self._convert_to_floats(data)
def stats(self, symbol):
"""
curl https://api.bitfinex.com/v1/stats/btcusd
[
{"period":1,"volume":"7410.27250155"},
{"period":7,"volume":"52251.37118006"},
{"period":30,"volume":"464505.07753251"}
]
"""
data = self._get(self.url_for(PATH_STATS, (symbol)))
for period in data:
for key, value in period.items():
if key == 'period':
new_value = int(value)
elif key == 'volume':
new_value = float(value)
period[key] = new_value
return data
def lendbook(self, currency, parameters=None):
"""
curl "https://api.bitfinex.com/v1/lendbook/btc"
{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}
Optional parameters
limit_bids (int): Optional. Limit the number of bids (loan demands) returned. May be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks (loan offers) returned. May be 0 in which case the array of asks is empty. Default is 50.
"""
data = self._get(self.url_for(PATH_LENDBOOK, path_arg=currency, parameters=parameters))
for lend_type in data.keys():
for lend in data[lend_type]:
for key, value in lend.items():
if key in ['rate', 'amount', 'timestamp']:
new_value = float(value)
elif key == 'period':
new_value = int(value)
elif key == 'frr':
new_value = value == 'Yes'
lend[key] = new_value
return data
def order_book(self, symbol, parameters=None):
"""
curl "https://api.bitfinex.com/v1/book/btcusd"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[{"price":"562.9999","amount":"0.985","timestamp":"1395557711.0"}]}
The 'bids' and 'asks' arrays will have multiple bid and ask dicts.
Optional parameters
limit_bids (int): Optional. Limit the number of bids returned. May be 0 in which case the array of bids is empty. Default is 50.
limit_asks (int): Optional. Limit the number of asks returned. May be 0 in which case the array of asks is empty. Default is 50.
eg.
curl "https://api.bitfinex.com/v1/book/btcusd?limit_bids=1&limit_asks=0"
{"bids":[{"price":"561.1101","amount":"0.985","timestamp":"1395557729.0"}],"asks":[]}
"""
data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters))
for type_ in data.keys():
for list_ in data[type_]:
for key, value in list_.items():
list_[key] = float(value)
return data
def _convert_to_floats(self, data):
"""
Convert all values in a dict to floats
"""
for key, value in data.items():
data[key] = float(value)
return data
def _get(self, url):
return requests.get(url, timeout=TIMEOUT).json()
def _build_parameters(self, parameters):
# sort the keys so we can test easily in Python 3.3 (dicts are not
# ordered)
keys = list(parameters.keys())
keys.sort()
return '&'.join(["%s=%s" % (k, parameters[k]) for k in keys]) | 29.154739 | 387 | 0.529888 | from __future__ import absolute_import
import requests
import json
import base64
import hmac
import hashlib
import time
PROTOCOL = "https"
HOST = "api.bitfinex.com"
VERSION = "v1"
PATH_SYMBOLS = "symbols"
PATH_TICKER = "ticker/%s"
PATH_TODAY = "today/%s"
PATH_STATS = "stats/%s"
PATH_LENDBOOK = "lendbook/%s"
PATH_ORDERBOOK = "book/%s"
TIMEOUT = 5.0
class TradeClient:
def __init__(self, key, secret):
self.URL = "{0:s}://{1:s}/{2:s}".format(PROTOCOL, HOST, VERSION)
self.KEY = key
self.SECRET = secret
pass
@property
def _nonce(self):
return str(time.time() * 1000000)
def _sign_payload(self, payload):
j = json.dumps(payload)
data = base64.standard_b64encode(j.encode('utf8'))
h = hmac.new(self.SECRET.encode('utf8'), data, hashlib.sha384)
signature = h.hexdigest()
return {
"X-BFX-APIKEY": self.KEY,
"X-BFX-SIGNATURE": signature,
"X-BFX-PAYLOAD": data
}
def place_order(self, amount, price, side, ord_type, symbol='btcusd', exchange='bitfinex'):
payload = {
"request": "/v1/order/new",
"nonce": self._nonce,
"symbol": symbol,
"amount": amount,
"price": price,
"exchange": exchange,
"side": side,
"type": ord_type
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/new", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['order_id']
except:
return json_resp['message']
return json_resp
def delete_order(self, order_id):
payload = {
"request": "/v1/order/cancel",
"nonce": self._nonce,
"order_id": order_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/cancel", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['avg_execution_price']
except:
return json_resp['message']
return json_resp
def delete_all_orders(self):
payload = {
"request": "/v1/order/cancel/all",
"nonce": self._nonce,
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/cancel/all", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def status_order(self, order_id):
payload = {
"request": "/v1/order/status",
"nonce": self._nonce,
"order_id": order_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/order/status", headers=signed_payload, verify=True)
json_resp = r.json()
try:
json_resp['avg_execution_price']
except:
return json_resp['message']
return json_resp
def active_orders(self):
payload = {
"request": "/v1/orders",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/orders", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def active_positions(self):
payload = {
"request": "/v1/positions",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/positions", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def claim_position(self, position_id):
payload = {
"request": "/v1/position/claim",
"nonce": self._nonce,
"position_id": position_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/position/claim", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def past_trades(self, timestamp=0, symbol='btcusd'):
payload = {
"request": "/v1/mytrades",
"nonce": self._nonce,
"symbol": symbol,
"timestamp": timestamp
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/mytrades", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def place_offer(self, currency, amount, rate, period, direction):
payload = {
"request": "/v1/offer/new",
"nonce": self._nonce,
"currency": currency,
"amount": amount,
"rate": rate,
"period": period,
"direction": direction
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/new", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def cancel_offer(self, offer_id):
payload = {
"request": "/v1/offer/cancel",
"nonce": self._nonce,
"offer_id": offer_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/cancel", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def status_offer(self, offer_id):
payload = {
"request": "/v1/offer/status",
"nonce": self._nonce,
"offer_id": offer_id
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offer/status", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def active_offers(self):
payload = {
"request": "/v1/offers",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/offers", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def balances(self):
payload = {
"request": "/v1/balances",
"nonce": self._nonce
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/balances", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
def history(self, currency, since=0, until=9999999999, limit=500, wallet='exchange'):
payload = {
"request": "/v1/history",
"nonce": self._nonce,
"currency": currency,
"since": since,
"until": until,
"limit": limit,
"wallet": wallet
}
signed_payload = self._sign_payload(payload)
r = requests.post(self.URL + "/history", headers=signed_payload, verify=True)
json_resp = r.json()
return json_resp
class Client:
def server(self):
return u"{0:s}://{1:s}/{2:s}".format(PROTOCOL, HOST, VERSION)
def url_for(self, path, path_arg=None, parameters=None):
url = "%s/%s" % (self.server(), path)
if path_arg:
url = url % (path_arg)
if parameters:
url = "%s?%s" % (url, self._build_parameters(parameters))
return url
def symbols(self):
return self._get(self.url_for(PATH_SYMBOLS))
def ticker(self, symbol):
data = self._get(self.url_for(PATH_TICKER, (symbol)))
return self._convert_to_floats(data)
def today(self, symbol):
data = self._get(self.url_for(PATH_TODAY, (symbol)))
return self._convert_to_floats(data)
def stats(self, symbol):
data = self._get(self.url_for(PATH_STATS, (symbol)))
for period in data:
for key, value in period.items():
if key == 'period':
new_value = int(value)
elif key == 'volume':
new_value = float(value)
period[key] = new_value
return data
def lendbook(self, currency, parameters=None):
data = self._get(self.url_for(PATH_LENDBOOK, path_arg=currency, parameters=parameters))
for lend_type in data.keys():
for lend in data[lend_type]:
for key, value in lend.items():
if key in ['rate', 'amount', 'timestamp']:
new_value = float(value)
elif key == 'period':
new_value = int(value)
elif key == 'frr':
new_value = value == 'Yes'
lend[key] = new_value
return data
def order_book(self, symbol, parameters=None):
data = self._get(self.url_for(PATH_ORDERBOOK, path_arg=symbol, parameters=parameters))
for type_ in data.keys():
for list_ in data[type_]:
for key, value in list_.items():
list_[key] = float(value)
return data
def _convert_to_floats(self, data):
for key, value in data.items():
data[key] = float(value)
return data
def _get(self, url):
return requests.get(url, timeout=TIMEOUT).json()
def _build_parameters(self, parameters):
keys = list(parameters.keys())
keys.sort()
return '&'.join(["%s=%s" % (k, parameters[k]) for k in keys]) | true | true |
1c31b4a81218c83e6510e31d8999caf6d59eb188 | 411 | py | Python | mongodb_dialect/__init__.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | mongodb_dialect/__init__.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | mongodb_dialect/__init__.py | kitfactory/mongodb_dialect | 1746d2aca08be4b12adb74e3a6806f8d840f6382 | [
"MIT"
] | null | null | null | __version__ = '0.1.0'
from .connection import connect
from .exception import *
from . dialect import *
paramstyle = 'pyformat'
threadsafety = 2
__all__ = [
'MongoDBDialect',
'connect', 'apilevel', 'threadsafety', 'paramstyle',
'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError', 'IntegrityError',
'InternalError', 'ProgrammingError', 'NotSupportedError'
] | 27.4 | 109 | 0.705596 | __version__ = '0.1.0'
from .connection import connect
from .exception import *
from . dialect import *
paramstyle = 'pyformat'
threadsafety = 2
__all__ = [
'MongoDBDialect',
'connect', 'apilevel', 'threadsafety', 'paramstyle',
'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError', 'OperationalError', 'IntegrityError',
'InternalError', 'ProgrammingError', 'NotSupportedError'
] | true | true |
1c31b657f81ad541c8ada79849a28f80c42ac404 | 1,367 | py | Python | bluebottle/funding/migrations/0005_auto_20190604_1501.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | bluebottle/funding/migrations/0005_auto_20190604_1501.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | bluebottle/funding/migrations/0005_auto_20190604_1501.py | jayvdb/bluebottle | 305fea238e6aa831598a8b227223a1a2f34c4fcc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-06-04 13:01
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('funding', '0004_auto_20190604_1501'),
]
operations = [
migrations.AlterField(
model_name='donation',
name='amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='donation',
name='amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='funding',
name='target_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
]
| 35.051282 | 149 | 0.62692 |
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('funding', '0004_auto_20190604_1501'),
]
operations = [
migrations.AlterField(
model_name='donation',
name='amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='donation',
name='amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='funding',
name='target_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
]
| true | true |
1c31b77eb6b97b7bc5571ee6bb86ac4981a313f9 | 750 | py | Python | tests/test_supersuit.py | happyCoderJDFJJ/pyrfuniverse | 8ddb6e0d8f113015ba820a327388a528a8b215c7 | [
"Apache-2.0"
] | null | null | null | tests/test_supersuit.py | happyCoderJDFJJ/pyrfuniverse | 8ddb6e0d8f113015ba820a327388a528a8b215c7 | [
"Apache-2.0"
] | null | null | null | tests/test_supersuit.py | happyCoderJDFJJ/pyrfuniverse | 8ddb6e0d8f113015ba820a327388a528a8b215c7 | [
"Apache-2.0"
] | null | null | null | from stable_baselines3.ppo import CnnPolicy
from stable_baselines3 import PPO, SAC
from stable_baselines3.common.monitor import Monitor
from pettingzoo.butterfly import pistonball_v4
import supersuit as ss
env = pistonball_v4.parallel_env(n_pistons=20, local_ratio=0, time_penalty=-0.1, continuous=True, random_drop=True, random_rotate=True, ball_mass=0.75, ball_friction=0.3, ball_elasticity=1.5, max_cycles=125)
env = ss.color_reduction_v0(env, mode='B')
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.pettingzoo_env_to_vec_env_v0(env)
env = ss.concat_vec_envs_v0(env, 8, num_cpus=4, base_class='stable_baselines3')
model = PPO('CnnPolicy', env)
model.learn(total_timesteps=2000000)
model.save('policy')
| 39.473684 | 207 | 0.801333 | from stable_baselines3.ppo import CnnPolicy
from stable_baselines3 import PPO, SAC
from stable_baselines3.common.monitor import Monitor
from pettingzoo.butterfly import pistonball_v4
import supersuit as ss
env = pistonball_v4.parallel_env(n_pistons=20, local_ratio=0, time_penalty=-0.1, continuous=True, random_drop=True, random_rotate=True, ball_mass=0.75, ball_friction=0.3, ball_elasticity=1.5, max_cycles=125)
env = ss.color_reduction_v0(env, mode='B')
env = ss.resize_v0(env, x_size=84, y_size=84)
env = ss.frame_stack_v1(env, 3)
env = ss.pettingzoo_env_to_vec_env_v0(env)
env = ss.concat_vec_envs_v0(env, 8, num_cpus=4, base_class='stable_baselines3')
model = PPO('CnnPolicy', env)
model.learn(total_timesteps=2000000)
model.save('policy')
| true | true |
1c31b7f706635b11d0d43ccc58b346296e89beb9 | 252 | py | Python | Python/04. Sets/012. Check Subset.py | AnTeater515/HackerRank-Coding_Solutions | c5c1337ed6d70c6d7a1850b2a99aab10c285c290 | [
"MIT"
] | 1 | 2020-10-18T22:06:12.000Z | 2020-10-18T22:06:12.000Z | Python/04. Sets/012. Check Subset.py | AnTeater515/HackerRank-Coding-Solutions_Python3_C-_Oracle-SQL | c5c1337ed6d70c6d7a1850b2a99aab10c285c290 | [
"MIT"
] | null | null | null | Python/04. Sets/012. Check Subset.py | AnTeater515/HackerRank-Coding-Solutions_Python3_C-_Oracle-SQL | c5c1337ed6d70c6d7a1850b2a99aab10c285c290 | [
"MIT"
] | null | null | null | # Problem: https://www.hackerrank.com/challenges/py-check-subset/problem
# Score: 10
for i in range(int(input())):
_, a = input(), set(map(int, input().split()))
_, b = input(), set(map(int, input().split()))
print(a.issubset(b))
| 28 | 73 | 0.599206 |
for i in range(int(input())):
_, a = input(), set(map(int, input().split()))
_, b = input(), set(map(int, input().split()))
print(a.issubset(b))
| true | true |
1c31b82a678a1cc875ef2f4bf9fb114401204dc4 | 1,806 | py | Python | tools/pnnx/tests/ncnn/test_nn_ChannelShuffle.py | fzyzcjy/ncnn | 42e71609508fde1bd54d9d9de6ca5522ee3bcf37 | [
"BSD-3-Clause"
] | 14,886 | 2017-07-24T02:58:35.000Z | 2022-03-31T18:17:04.000Z | tools/pnnx/tests/ncnn/test_nn_ChannelShuffle.py | fzyzcjy/ncnn | 42e71609508fde1bd54d9d9de6ca5522ee3bcf37 | [
"BSD-3-Clause"
] | 3,361 | 2017-07-24T05:56:31.000Z | 2022-03-31T13:26:35.000Z | tools/pnnx/tests/ncnn/test_nn_ChannelShuffle.py | fzyzcjy/ncnn | 42e71609508fde1bd54d9d9de6ca5522ee3bcf37 | [
"BSD-3-Clause"
] | 3,786 | 2017-07-24T03:09:15.000Z | 2022-03-31T16:56:40.000Z | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.shuffle_0 = nn.ChannelShuffle(2)
self.shuffle_1 = nn.ChannelShuffle(16)
def forward(self, x, y):
x = self.shuffle_0(x)
x = self.shuffle_1(x)
y = self.shuffle_0(y)
y = self.shuffle_1(y)
return x, y
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 64, 6, 8)
y = torch.rand(1, 96, 7, 9)
a0, a1 = net(x, y)
# export torchscript
mod = torch.jit.trace(net, (x, y))
mod.save("test_nn_ChannelShuffle.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_nn_ChannelShuffle.pt inputshape=[1,64,6,8],[1,96,7,9]")
# ncnn inference
import test_nn_ChannelShuffle_ncnn
b0, b1 = test_nn_ChannelShuffle_ncnn.test_inference()
return torch.allclose(a0, b0, 1e-4, 1e-4) and torch.allclose(a1, b1, 1e-4, 1e-4)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
| 28.666667 | 91 | 0.669989 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.shuffle_0 = nn.ChannelShuffle(2)
self.shuffle_1 = nn.ChannelShuffle(16)
def forward(self, x, y):
x = self.shuffle_0(x)
x = self.shuffle_1(x)
y = self.shuffle_0(y)
y = self.shuffle_1(y)
return x, y
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 64, 6, 8)
y = torch.rand(1, 96, 7, 9)
a0, a1 = net(x, y)
mod = torch.jit.trace(net, (x, y))
mod.save("test_nn_ChannelShuffle.pt")
import os
os.system("../../src/pnnx test_nn_ChannelShuffle.pt inputshape=[1,64,6,8],[1,96,7,9]")
import test_nn_ChannelShuffle_ncnn
b0, b1 = test_nn_ChannelShuffle_ncnn.test_inference()
return torch.allclose(a0, b0, 1e-4, 1e-4) and torch.allclose(a1, b1, 1e-4, 1e-4)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
| true | true |
1c31b952bea35ca4d28bfa63b911709bd56f3ded | 1,550 | py | Python | clone_repo/parse_repo_url.py | micktwomey/clone-repo | f3d64f286174df4e64d25d89c21105d9a2cb99cf | [
"MIT"
] | null | null | null | clone_repo/parse_repo_url.py | micktwomey/clone-repo | f3d64f286174df4e64d25d89c21105d9a2cb99cf | [
"MIT"
] | null | null | null | clone_repo/parse_repo_url.py | micktwomey/clone-repo | f3d64f286174df4e64d25d89c21105d9a2cb99cf | [
"MIT"
] | null | null | null | """Parse git/hd/other repo urls into a neat usable format
Tries to handle all the exotic repos, suitable for cloning.
"""
import dataclasses
import pathlib
import re
import typing
@dataclasses.dataclass
class RepoURL:
url: str
tool: str
host: str
group: str
project: str
def parse_url(url: str) -> typing.Optional[RepoURL]:
"""Parses the given repo url"""
if url.startswith("file://") or str(pathlib.Path(url).expanduser()).startswith("/"):
return RepoURL(url, "git", "localhost", "file", pathlib.Path(url).name)
m = re.match(
r"(?P<tool>[^:@]+)(://|@)(?P<tool_user>hg@|git@){0,1}(?P<host>[^/:]+):{0,1}(?P<port>[0-9]+){0,1}(/|:)((?P<group>[^/]+)/){0,1}(?P<project>.+)",
url,
)
if m is None:
return None
matched = m.groupdict()
tool = matched["tool"]
tool_user = matched.get("tool_user", None)
if tool == "ssh":
if tool_user == "hg@":
tool = "hg"
else:
tool = "git"
if tool in ("keybase", "https", "man"):
tool = "git"
host = matched["host"]
if host.startswith("hg."):
tool = "hg"
port = matched.get("port", None)
if port is not None:
host = "{}:{}".format(host, port)
if matched["tool"] == "keybase":
host = "keybase"
group = matched["group"]
if group is None:
group = ""
project = re.split(r"\.git$", matched["project"])[0]
if project.endswith("/"):
project = project[:-1]
return RepoURL(url, tool, host, group, project)
| 26.724138 | 150 | 0.554194 |
import dataclasses
import pathlib
import re
import typing
@dataclasses.dataclass
class RepoURL:
url: str
tool: str
host: str
group: str
project: str
def parse_url(url: str) -> typing.Optional[RepoURL]:
if url.startswith("file://") or str(pathlib.Path(url).expanduser()).startswith("/"):
return RepoURL(url, "git", "localhost", "file", pathlib.Path(url).name)
m = re.match(
r"(?P<tool>[^:@]+)(://|@)(?P<tool_user>hg@|git@){0,1}(?P<host>[^/:]+):{0,1}(?P<port>[0-9]+){0,1}(/|:)((?P<group>[^/]+)/){0,1}(?P<project>.+)",
url,
)
if m is None:
return None
matched = m.groupdict()
tool = matched["tool"]
tool_user = matched.get("tool_user", None)
if tool == "ssh":
if tool_user == "hg@":
tool = "hg"
else:
tool = "git"
if tool in ("keybase", "https", "man"):
tool = "git"
host = matched["host"]
if host.startswith("hg."):
tool = "hg"
port = matched.get("port", None)
if port is not None:
host = "{}:{}".format(host, port)
if matched["tool"] == "keybase":
host = "keybase"
group = matched["group"]
if group is None:
group = ""
project = re.split(r"\.git$", matched["project"])[0]
if project.endswith("/"):
project = project[:-1]
return RepoURL(url, tool, host, group, project)
| true | true |
1c31b9609ade94f7c39616819c2323dc09113224 | 4,729 | py | Python | src/boxes/datatypes/units.py | Peilonrayz/alphabet_learner | 13229e53215e3c050f106e00e34f90ca2d6fa256 | [
"MIT"
] | null | null | null | src/boxes/datatypes/units.py | Peilonrayz/alphabet_learner | 13229e53215e3c050f106e00e34f90ca2d6fa256 | [
"MIT"
] | null | null | null | src/boxes/datatypes/units.py | Peilonrayz/alphabet_learner | 13229e53215e3c050f106e00e34f90ca2d6fa256 | [
"MIT"
] | null | null | null | import enum
from dataclasses import dataclass
from typing import Optional, ClassVar, Dict
class UnitTypes(enum.Enum):
NONE = 0
FLEX = enum.auto()
ANGLE = enum.auto()
FREQUENCY = enum.auto()
LENGTH = enum.auto()
RESOLUTION = enum.auto()
TIME = enum.auto()
@dataclass
class Unit:
value: float
UNIT: ClassVar[Optional[str]] = None
UNITS: ClassVar[Dict[str, "Unit"]] = {}
TYPE: ClassVar[UnitTypes] = UnitTypes.NONE
def __init_subclass__(cls, /, unit: Optional[str], type=None, **kwargs):
super().__init_subclass__(**kwargs)
cls.UNIT = unit
cls.UNITS = {}
cls.TYPE = type = _get_type(cls) if type is None else type
if unit is None:
return
cls2 = Unit.UNITS.get(unit)
if cls2 is not None:
raise ValueError("Multiple classes with unit {unit} - {cls} and {cls2}")
for c in cls.__mro__[1:-1]:
c.UNITS[(unit, type)] = cls
@classmethod
def build(cls, value: float, unit: Optional[str] = None, type: Optional[UnitTypes]=None):
if unit is None:
if value == 0:
return ZERO
else:
raise ValueError(f"Unit must be specified with non-zero value")
classes = _get_classes(
cls.UNITS,
unit,
list(UnitTypes) if type is None else [type],
)
if len(classes) == 0:
if type is None:
raise ValueError(f"Unknown unit {unit!r}")
else:
raise ValueError(f"Unknown unit {unit!r} with type {type}")
if len(classes) != 1:
raise ValueError(f"Ambiguous unit {unit!r} please specify a type")
return classes[0](value)
def __str__(self):
return f"{self.value}{'' if self.UNIT is None else self.UNIT}"
ZERO = Unit(0)
def _get_type(cls):
for _cls in cls.__mro__[1:-1]:
if _cls.TYPE is not UnitTypes.NONE:
return _cls.TYPE
return UnitTypes.NONE
def _get_classes(units, unit, UnitTypes):
classes = []
for type in UnitTypes:
cls = units.get((unit, type))
if cls is not None:
classes.append(cls)
return classes
class Dimension(Unit, unit=None):
pass
class Flex(Dimension, unit='fr', type=UnitTypes.FLEX):
pass
class Angle(Dimension, unit=None, type=UnitTypes.ANGLE):
pass
class AngleDegree(Angle, unit='deg'):
pass
class AngleRadian(Angle, unit='rad'):
pass
class AngleGradian(Angle, unit='grad'):
pass
class AngleTurn(Angle, unit='turn'):
pass
class Frequency(Dimension, unit=None, type=UnitTypes.FREQUENCY):
pass
class FrequencyHertz(Frequency, unit='Hz'):
pass
class FrequencyKiloHertz(Frequency, unit='KhZ'):
pass
class Length(Dimension, unit=None, type=UnitTypes.LENGTH):
pass
class LengthCap(Length, unit='cap'):
pass
class LengthCh(Length, unit='ch'):
pass
class LengthEm(Length, unit='em'):
pass
class LengthEx(Length, unit='ex'):
pass
class LengthIc(Length, unit='ic'):
pass
class LengthLineHeight(Length, unit='lh'):
pass
class LengthRootEm(Length, unit='rem'):
pass
class LengthRootLineHeight(Length, unit='rlh'):
pass
class LengthViewportHeight(Length, unit='vh'):
pass
class LengthViewportWidth(Length, unit='vw'):
pass
class LengthRootInline(Length, unit='vi'):
pass
class LengthRootBlock(Length, unit='vb'):
pass
class LengthViewportMin(Length, unit='vmin'):
pass
class LengthViewportMax(Length, unit='vmax'):
pass
class LengthPixel(Length, unit='px'):
pass
class LengthCentiMeter(Length, unit='cm'):
pass
class LengthMilliMeter(Length, unit='mm'):
pass
class LengthQ(Length, unit='Q'):
pass
class LengthInch(Length, unit='in'):
pass
class LengthPica(Length, unit='pc'):
pass
class LengthPoint(Length, unit='pt'):
pass
class Resolution(Dimension, unit=None, type=UnitTypes.RESOLUTION):
pass
class ResolutionInch(Resolution, unit='dpi'):
pass
class ResolutionCentiMeter(Resolution, unit='dpcm'):
pass
class ResolutionPixel(Resolution, unit='dppx'):
pass
class Time(Dimension, unit=None, type=UnitTypes.TIME):
pass
class TimeSeconds(Time, unit='s'):
pass
class TimeMilliSeconds(Time, unit='ms'):
pass
class Percentage(Unit, unit=None):
pass
class PercentageAngle(Percentage, unit='%', type=UnitTypes.ANGLE):
pass
class PercentageFrequency(Percentage, unit='%', type=UnitTypes.FREQUENCY):
pass
class PercentageLength(Percentage, unit='%', type=UnitTypes.LENGTH):
pass
class PercentageTime(Percentage, unit='%', type=UnitTypes.TIME):
pass
| 18.61811 | 93 | 0.635864 | import enum
from dataclasses import dataclass
from typing import Optional, ClassVar, Dict
class UnitTypes(enum.Enum):
NONE = 0
FLEX = enum.auto()
ANGLE = enum.auto()
FREQUENCY = enum.auto()
LENGTH = enum.auto()
RESOLUTION = enum.auto()
TIME = enum.auto()
@dataclass
class Unit:
value: float
UNIT: ClassVar[Optional[str]] = None
UNITS: ClassVar[Dict[str, "Unit"]] = {}
TYPE: ClassVar[UnitTypes] = UnitTypes.NONE
def __init_subclass__(cls, /, unit: Optional[str], type=None, **kwargs):
super().__init_subclass__(**kwargs)
cls.UNIT = unit
cls.UNITS = {}
cls.TYPE = type = _get_type(cls) if type is None else type
if unit is None:
return
cls2 = Unit.UNITS.get(unit)
if cls2 is not None:
raise ValueError("Multiple classes with unit {unit} - {cls} and {cls2}")
for c in cls.__mro__[1:-1]:
c.UNITS[(unit, type)] = cls
@classmethod
def build(cls, value: float, unit: Optional[str] = None, type: Optional[UnitTypes]=None):
if unit is None:
if value == 0:
return ZERO
else:
raise ValueError(f"Unit must be specified with non-zero value")
classes = _get_classes(
cls.UNITS,
unit,
list(UnitTypes) if type is None else [type],
)
if len(classes) == 0:
if type is None:
raise ValueError(f"Unknown unit {unit!r}")
else:
raise ValueError(f"Unknown unit {unit!r} with type {type}")
if len(classes) != 1:
raise ValueError(f"Ambiguous unit {unit!r} please specify a type")
return classes[0](value)
def __str__(self):
return f"{self.value}{'' if self.UNIT is None else self.UNIT}"
ZERO = Unit(0)
def _get_type(cls):
for _cls in cls.__mro__[1:-1]:
if _cls.TYPE is not UnitTypes.NONE:
return _cls.TYPE
return UnitTypes.NONE
def _get_classes(units, unit, UnitTypes):
classes = []
for type in UnitTypes:
cls = units.get((unit, type))
if cls is not None:
classes.append(cls)
return classes
class Dimension(Unit, unit=None):
pass
class Flex(Dimension, unit='fr', type=UnitTypes.FLEX):
pass
class Angle(Dimension, unit=None, type=UnitTypes.ANGLE):
pass
class AngleDegree(Angle, unit='deg'):
pass
class AngleRadian(Angle, unit='rad'):
pass
class AngleGradian(Angle, unit='grad'):
pass
class AngleTurn(Angle, unit='turn'):
pass
class Frequency(Dimension, unit=None, type=UnitTypes.FREQUENCY):
pass
class FrequencyHertz(Frequency, unit='Hz'):
pass
class FrequencyKiloHertz(Frequency, unit='KhZ'):
pass
class Length(Dimension, unit=None, type=UnitTypes.LENGTH):
pass
class LengthCap(Length, unit='cap'):
pass
class LengthCh(Length, unit='ch'):
pass
class LengthEm(Length, unit='em'):
pass
class LengthEx(Length, unit='ex'):
pass
class LengthIc(Length, unit='ic'):
pass
class LengthLineHeight(Length, unit='lh'):
pass
class LengthRootEm(Length, unit='rem'):
pass
class LengthRootLineHeight(Length, unit='rlh'):
pass
class LengthViewportHeight(Length, unit='vh'):
pass
class LengthViewportWidth(Length, unit='vw'):
pass
class LengthRootInline(Length, unit='vi'):
pass
class LengthRootBlock(Length, unit='vb'):
pass
class LengthViewportMin(Length, unit='vmin'):
pass
class LengthViewportMax(Length, unit='vmax'):
pass
class LengthPixel(Length, unit='px'):
pass
class LengthCentiMeter(Length, unit='cm'):
pass
class LengthMilliMeter(Length, unit='mm'):
pass
class LengthQ(Length, unit='Q'):
pass
class LengthInch(Length, unit='in'):
pass
class LengthPica(Length, unit='pc'):
pass
class LengthPoint(Length, unit='pt'):
pass
class Resolution(Dimension, unit=None, type=UnitTypes.RESOLUTION):
pass
class ResolutionInch(Resolution, unit='dpi'):
pass
class ResolutionCentiMeter(Resolution, unit='dpcm'):
pass
class ResolutionPixel(Resolution, unit='dppx'):
pass
class Time(Dimension, unit=None, type=UnitTypes.TIME):
pass
class TimeSeconds(Time, unit='s'):
pass
class TimeMilliSeconds(Time, unit='ms'):
pass
class Percentage(Unit, unit=None):
pass
class PercentageAngle(Percentage, unit='%', type=UnitTypes.ANGLE):
pass
class PercentageFrequency(Percentage, unit='%', type=UnitTypes.FREQUENCY):
pass
class PercentageLength(Percentage, unit='%', type=UnitTypes.LENGTH):
pass
class PercentageTime(Percentage, unit='%', type=UnitTypes.TIME):
pass
| true | true |
1c31bba40692dfe153c0db3dc2084f4d5e849a9e | 7,912 | py | Python | docs/conf.py | brendan1mcmanus/whartonfintech-v3 | 6de82024edfbf76e9fcf5dfce61ce528279bc888 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | brendan1mcmanus/whartonfintech-v3 | 6de82024edfbf76e9fcf5dfce61ce528279bc888 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | brendan1mcmanus/whartonfintech-v3 | 6de82024edfbf76e9fcf5dfce61ce528279bc888 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# whartonfintech documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'whartonfintech'
copyright = u"2015, Sudipta Bandyopadhyay"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'whartonfintechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'whartonfintech.tex',
u'whartonfintech Documentation',
u"Sudipta Bandyopadhyay", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'whartonfintech', u'whartonfintech Documentation',
[u"Sudipta Bandyopadhyay"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'whartonfintech', u'whartonfintech Documentation',
u"Sudipta Bandyopadhyay", 'whartonfintech',
'Website for the first student-run FinTech initiative', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.162602 | 80 | 0.710566 |
import os
import sys
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'whartonfintech'
copyright = u"2015, Sudipta Bandyopadhyay"
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'whartonfintechdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'whartonfintech.tex',
u'whartonfintech Documentation',
u"Sudipta Bandyopadhyay", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'whartonfintech', u'whartonfintech Documentation',
[u"Sudipta Bandyopadhyay"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'whartonfintech', u'whartonfintech Documentation',
u"Sudipta Bandyopadhyay", 'whartonfintech',
'Website for the first student-run FinTech initiative', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true | true |
1c31bbaacea3596ce294897cfb99e0b71ef1b8b6 | 3,104 | bzl | Python | asylo/bazel/sim_enclave.bzl | kevin405/mosl_vsgx_migration | 76ddd438c8caad1051ea9a7e2040bf6ccee996a2 | [
"Apache-2.0"
] | null | null | null | asylo/bazel/sim_enclave.bzl | kevin405/mosl_vsgx_migration | 76ddd438c8caad1051ea9a7e2040bf6ccee996a2 | [
"Apache-2.0"
] | null | null | null | asylo/bazel/sim_enclave.bzl | kevin405/mosl_vsgx_migration | 76ddd438c8caad1051ea9a7e2040bf6ccee996a2 | [
"Apache-2.0"
] | 1 | 2019-01-02T22:04:21.000Z | 2019-01-02T22:04:21.000Z | #
# Copyright 2018 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Rule definitions for creating targets for simulated Asylo enclaves."""
load("@com_google_asylo_backend_provider//:enclave_info.bzl", "EnclaveInfo")
SimEnclaveInfo = provider()
def _reprovide_binary_with_enclave_info_impl(ctx):
return [
DefaultInfo(
files = ctx.attr.binary[DefaultInfo].files,
data_runfiles = ctx.attr.binary[DefaultInfo].data_runfiles,
default_runfiles = ctx.attr.binary[DefaultInfo].default_runfiles,
),
SimEnclaveInfo(),
EnclaveInfo(),
]
_reprovide_binary_with_enclave_info = rule(
implementation = _reprovide_binary_with_enclave_info_impl,
attrs = {
"binary": attr.label(mandatory = True),
},
)
def sim_enclave(
name,
deps = [],
**kwargs):
"""Build rule for creating a simulated enclave shared object file.
A rule like cc_binary, but builds name_simulated.so and provides
name as a target that may be consumed as an enclave in Asylo.
Creates two targets:
name: A binary that may be provided to an enclave loader's enclaves.
name_simulated.so: The underlying cc_binary which is reprovided as an
enclave target. If name has a ".so" suffix, then it
is replaced with "_simulated.so".
Args:
name: The simulated enclave target name.
deps: Dependencies for the cc_binary
**kwargs: cc_binary arguments.
"""
if not kwargs.pop("linkshared", True):
fail("A sim_enclave must be build with linkshared = True")
if not kwargs.pop("linkstatic", True):
fail("A sim_enclave must be build with linkstatic = True")
binary_name = name + "_simulated.so"
if ".so" in name:
binary_name = name.replace(".so", "_simulated.so", 1)
if "asylo" in native.package_name():
_workspace_name = "//asylo"
else:
_workspace_name = "@com_google_asylo//asylo"
native.cc_binary(
name = binary_name,
deps = deps + [
_workspace_name + "/platform/primitives:trusted_primitives",
_workspace_name + "/platform/primitives/sim:trusted_sim",
],
linkopts = ["-Wl,-Bsymbolic"],
linkshared = True,
features = ["mostly_static_linking_mode"],
linkstatic = False, # Allow the .so to be created, not .a.
**kwargs
)
_reprovide_binary_with_enclave_info(
name = name,
testonly = kwargs.get("testonly", 0),
binary = binary_name,
)
| 33.73913 | 77 | 0.659794 |
load("@com_google_asylo_backend_provider//:enclave_info.bzl", "EnclaveInfo")
SimEnclaveInfo = provider()
def _reprovide_binary_with_enclave_info_impl(ctx):
return [
DefaultInfo(
files = ctx.attr.binary[DefaultInfo].files,
data_runfiles = ctx.attr.binary[DefaultInfo].data_runfiles,
default_runfiles = ctx.attr.binary[DefaultInfo].default_runfiles,
),
SimEnclaveInfo(),
EnclaveInfo(),
]
_reprovide_binary_with_enclave_info = rule(
implementation = _reprovide_binary_with_enclave_info_impl,
attrs = {
"binary": attr.label(mandatory = True),
},
)
def sim_enclave(
name,
deps = [],
**kwargs):
if not kwargs.pop("linkshared", True):
fail("A sim_enclave must be build with linkshared = True")
if not kwargs.pop("linkstatic", True):
fail("A sim_enclave must be build with linkstatic = True")
binary_name = name + "_simulated.so"
if ".so" in name:
binary_name = name.replace(".so", "_simulated.so", 1)
if "asylo" in native.package_name():
_workspace_name = "//asylo"
else:
_workspace_name = "@com_google_asylo//asylo"
native.cc_binary(
name = binary_name,
deps = deps + [
_workspace_name + "/platform/primitives:trusted_primitives",
_workspace_name + "/platform/primitives/sim:trusted_sim",
],
linkopts = ["-Wl,-Bsymbolic"],
linkshared = True,
features = ["mostly_static_linking_mode"],
linkstatic = False,
**kwargs
)
_reprovide_binary_with_enclave_info(
name = name,
testonly = kwargs.get("testonly", 0),
binary = binary_name,
)
| true | true |
1c31bbda924e7db5495072a5358f0e9be6fe68ac | 36,308 | py | Python | test/segwit.py | MichaelHDesigns/lunarium | 720e8815b8ac775efcdd99423f17337ad96b0e8c | [
"MIT"
] | 7 | 2018-09-01T18:44:03.000Z | 2022-03-05T15:10:47.000Z | test/segwit.py | MichaelHDesigns/lunarium | 720e8815b8ac775efcdd99423f17337ad96b0e8c | [
"MIT"
] | 30 | 2018-08-17T05:18:52.000Z | 2021-02-06T13:43:41.000Z | test/segwit.py | MichaelHDesigns/lunarium | 720e8815b8ac775efcdd99423f17337ad96b0e8c | [
"MIT"
] | 18 | 2018-08-15T19:56:15.000Z | 2022-02-17T02:14:01.000Z | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the SegWit changeover logic
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG
from io import BytesIO
from test_framework.mininode import FromHex
import time
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def witness_script(version, pubkey):
if (version == 0):
pubkeyhash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pubkey))))
pkscript = "0014" + pubkeyhash
elif (version == 1):
# 1-of-1 multisig
scripthash = bytes_to_hex_str(sha256(hex_str_to_bytes("5121" + pubkey + "51ae")))
pkscript = "0020" + scripthash
else:
assert("Wrong version" == "0 or 1")
return pkscript
def addlength(script):
scriptlen = format(len(script)//2, 'x')
assert(len(scriptlen) == 2)
return scriptlen + script
def create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount):
pkscript = witness_script(version, pubkey);
if (encode_p2sh):
p2sh_hash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pkscript))))
pkscript = "a914"+p2sh_hash+"87"
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]} )
coinbase = CTransaction()
DUMMY_P2SH = "8poAwF24PZJebPTtKJVyvAdFw1u7hX6uVX" # P2SH of "OP_1 OP_DROP"
outputs[DUMMY_P2SH] = amount
tx_to_witness = node.createrawtransaction(inputs,outputs)
#replace dummy output with our own
tx_to_witness = tx_to_witness[0:110] + addlength(pkscript) + tx_to_witness[-8:]
return tx_to_witness
def send_to_witness(version, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx_to_witness = tx_to_witness[0:82] + addlength(insert_redeem_script) + tx_to_witness[84:]
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value, max_value = 100000000):
for utxo in node.listunspent():
if utxo['amount'] >= min_value and utxo["amount"] <= max_value:
return utxo
class SegWitTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-walletprematurewitness", "-rpcserialversion=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
block = node.setgenerate(True, 1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
block = node.setgenerate(True, 1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, txid, sign, redeem_script=""):
try:
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
except JSONRPCException as exp:
assert(exp.error["code"] == -26)
else:
raise AssertionError("Tx should not have been accepted")
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
try:
node.setgenerate(True, 1)
except JSONRPCException as exp:
assert(exp.error["code"] == -1)
else:
raise AssertionError("Created valid block when TestBlockValidity should have failed")
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].setgenerate(True, 300) #block 161
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
utxo0 = find_unspent(self.nodes[0], 6, 7)
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], utxo0, self.pubkey[n], False, Decimal("6.999")))
utxo1 = find_unspent(self.nodes[0], 6, 7)
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], utxo1, self.pubkey[n], True, Decimal("6.999")))
self.nodes[0].setgenerate(True, 1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*7 + 20*Decimal("6.999") + 7)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("6.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("6.999"))
self.nodes[0].setgenerate(True, 260) #block 423
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False, addlength(witness_script(0, self.pubkey[0])))
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False, addlength(witness_script(1, self.pubkey[0])))
# signed
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True)
print("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# Lunarium: since witness won't be enabled until the fork, we don't care
# TODO: An old node would see these txs without witnesses and be able to mine them
print("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
print("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False)
# enable segwit through spork system
for node in self.nodes:
node.spork("SPORK_17_SEGWIT_ACTIVATION", int(time.time() - 100))
print("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].setgenerate(True, 1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
print("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, addlength(witness_script(0, self.pubkey[2])))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, addlength(witness_script(1, self.pubkey[2])))
print("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
print("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["yCGsx6q1rKBZfQTtjxMZwRyN1JMvYuASSt"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["y2yJUeCHgppMaaT5SgCPgo8G7rYfBexA1v"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
# spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
# spendable_after_importaddress = [] # These outputs should be seen after importaddress
# solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
# unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
# solvable_anytime = [] # These outputs should be solvable after importpubkey
# unseen_anytime = [] # These outputs should never be seen
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
# compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
# compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
# compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
# unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "yF6LbHnfjfHt3HxwUfDj4RCQ1Meo7JkESV"]
# # Test multisig_without_privkey
# # We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
# multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
# script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
# solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
# for i in compressed_spendable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # bare and p2sh multisig with compressed keys should always be spendable
# spendable_anytime.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
# spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with compressed keys should always be spendable
# spendable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
# spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in uncompressed_spendable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # bare and p2sh multisig with uncompressed keys should always be spendable
# spendable_anytime.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
# unseen_anytime.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with uncompressed keys should always be spendable
# spendable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
# spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# # witness with uncompressed keys are never seen
# unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in compressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# # Multisig without private is not seen after addmultisigaddress, but seen after importaddress
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with compressed keys should always be seen
# solvable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
# solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in uncompressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
# solvable_after_importaddress.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
# unseen_anytime.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with uncompressed keys should always be seen
# solvable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
# solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# # witness with uncompressed keys are never seen
# unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# op1 = CScript([OP_1])
# op0 = CScript([OP_0])
# # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
# unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
# unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
# unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
# unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
# p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
# p2wshop1 = CScript([OP_0, sha256(op1)])
# unsolvable_after_importaddress.append(unsolvablep2pkh)
# unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
# unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
# unsolvable_after_importaddress.append(p2wshop1)
# unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
# unsolvable_after_importaddress.append(p2shop0)
# spendable_txid = []
# solvable_txid = []
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
# self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
# importlist = []
# for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# bare = hex_str_to_bytes(v['hex'])
# importlist.append(bytes_to_hex_str(bare))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
# else:
# pubkey = hex_str_to_bytes(v['pubkey'])
# p2pk = CScript([pubkey, OP_CHECKSIG])
# p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
# importlist.append(bytes_to_hex_str(p2pk))
# importlist.append(bytes_to_hex_str(p2pkh))
# importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
# importlist.append(bytes_to_hex_str(unsolvablep2pkh))
# importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
# importlist.append(bytes_to_hex_str(op1))
# importlist.append(bytes_to_hex_str(p2wshop1))
# for i in importlist:
# try:
# self.nodes[0].importaddress(i,"",False,True)
# except JSONRPCException as exp:
# assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
# self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
# self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
# self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
# self.mine_and_test_listunspent(unseen_anytime, 0)
# # addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
# # not in the wallet
# # note that no witness address should be returned by unsolvable addresses
# # the multisig_without_privkey_address will fail because its keys were not added with importpubkey
# for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
# try:
# self.nodes[0].addwitnessaddress(i)
# except JSONRPCException as exp:
# assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
# else:
# assert(False)
# for i in compressed_spendable_address + compressed_solvable_address:
# witaddress = self.nodes[0].addwitnessaddress(i)
# # addwitnessaddress should return the same address if it is a known P2SH-witness address
# assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
# self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
# self.mine_and_test_listunspent(unseen_anytime, 0)
# # Repeat some tests. This time we don't add witness scripts with importaddress
# # Import a compressed key and an uncompressed key, generate some multisig addresses
# self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
# uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
# self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
# compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
# self.nodes[0].importpubkey(pubkeys[5])
# compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
# self.nodes[0].importpubkey(pubkeys[6])
# uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
# spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
# solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
# unseen_anytime = [] # These outputs should never be seen
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
# compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
# compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
# premature_witaddress = []
# for i in compressed_spendable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
# spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
# premature_witaddress.append(script_to_p2sh(p2wsh))
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
# spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
# premature_witaddress.append(script_to_p2sh(p2wpkh))
# for i in uncompressed_spendable_address + uncompressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
# unseen_anytime.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
# unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
# for i in compressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# # P2WSH multisig without private key are seen after addwitnessaddress
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
# premature_witaddress.append(script_to_p2sh(p2wsh))
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
# solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
# premature_witaddress.append(script_to_p2sh(p2wpkh))
# self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# # addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# # note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# # premature_witaddress are not accepted until the script is added with addwitnessaddress first
# for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
# try:
# self.nodes[0].addwitnessaddress(i)
# except JSONRPCException as exp:
# assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
# else:
# assert(False)
# # after importaddress it should pass addwitnessaddress
# v = self.nodes[0].validateaddress(compressed_solvable_address[1])
# self.nodes[0].importaddress(v['hex'],"",False,True)
# for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
# witaddress = self.nodes[0].addwitnessaddress(i)
# assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
# spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
# self.mine_and_test_listunspent(unseen_anytime, 0)
# # Check that spendable outputs are really spendable
# self.create_and_mine_tx_from_txids(spendable_txid)
# # import all the private keys so solvable addresses become spendable
# self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
# self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
# self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
# self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
# self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
# self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
# self.create_and_mine_tx_from_txids(solvable_txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_unspent(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| 61.538983 | 203 | 0.68478 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG
from io import BytesIO
from test_framework.mininode import FromHex
import time
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def witness_script(version, pubkey):
if (version == 0):
pubkeyhash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pubkey))))
pkscript = "0014" + pubkeyhash
elif (version == 1):
scripthash = bytes_to_hex_str(sha256(hex_str_to_bytes("5121" + pubkey + "51ae")))
pkscript = "0020" + scripthash
else:
assert("Wrong version" == "0 or 1")
return pkscript
def addlength(script):
scriptlen = format(len(script)//2, 'x')
assert(len(scriptlen) == 2)
return scriptlen + script
def create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount):
pkscript = witness_script(version, pubkey);
if (encode_p2sh):
p2sh_hash = bytes_to_hex_str(ripemd160(sha256(hex_str_to_bytes(pkscript))))
pkscript = "a914"+p2sh_hash+"87"
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]} )
coinbase = CTransaction()
DUMMY_P2SH = "8poAwF24PZJebPTtKJVyvAdFw1u7hX6uVX"
outputs[DUMMY_P2SH] = amount
tx_to_witness = node.createrawtransaction(inputs,outputs)
tx_to_witness = tx_to_witness[0:110] + addlength(pkscript) + tx_to_witness[-8:]
return tx_to_witness
def send_to_witness(version, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(version, node, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx_to_witness = tx_to_witness[0:82] + addlength(insert_redeem_script) + tx_to_witness[84:]
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value, max_value = 100000000):
for utxo in node.listunspent():
if utxo['amount'] >= min_value and utxo["amount"] <= max_value:
return utxo
class SegWitTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-logtimemicros", "-walletprematurewitness", "-rpcserialversion=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-logtimemicros", "-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-logtimemicros", "-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
block = node.setgenerate(True, 1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
block = node.setgenerate(True, 1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, txid, sign, redeem_script=""):
try:
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
except JSONRPCException as exp:
assert(exp.error["code"] == -26)
else:
raise AssertionError("Tx should not have been accepted")
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("6.998"), sign, redeem_script)
try:
node.setgenerate(True, 1)
except JSONRPCException as exp:
assert(exp.error["code"] == -1)
else:
raise AssertionError("Created valid block when TestBlockValidity should have failed")
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].setgenerate(True, 300)
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = []
wit_ids = []
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
utxo0 = find_unspent(self.nodes[0], 6, 7)
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], utxo0, self.pubkey[n], False, Decimal("6.999")))
utxo1 = find_unspent(self.nodes[0], 6, 7)
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], utxo1, self.pubkey[n], True, Decimal("6.999")))
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*7 + 20*Decimal("6.999") + 7)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("6.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("6.999"))
self.nodes[0].setgenerate(True, 260)
sync_blocks(self.nodes)
print("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], False, addlength(witness_script(0, self.pubkey[0])))
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], False, addlength(witness_script(1, self.pubkey[0])))
# signed
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True)
print("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# Lunarium: since witness won't be enabled until the fork, we don't care
# TODO: An old node would see these txs without witnesses and be able to mine them
print("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
print("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False)
# enable segwit through spork system
for node in self.nodes:
node.spork("SPORK_17_SEGWIT_ACTIVATION", int(time.time() - 100))
print("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].setgenerate(True, 1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
print("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
print("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, addlength(witness_script(0, self.pubkey[2])))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, addlength(witness_script(1, self.pubkey[2])))
print("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
print("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["yCGsx6q1rKBZfQTtjxMZwRyN1JMvYuASSt"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["y2yJUeCHgppMaaT5SgCPgo8G7rYfBexA1v"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
# spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
# spendable_after_importaddress = [] # These outputs should be seen after importaddress
# solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
# unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
# solvable_anytime = [] # These outputs should be solvable after importpubkey
# unseen_anytime = [] # These outputs should never be seen
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
# compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
# uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
# compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
# compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
# unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "yF6LbHnfjfHt3HxwUfDj4RCQ1Meo7JkESV"]
# # Test multisig_without_privkey
# # We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
# multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
# script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
# solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
# for i in compressed_spendable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # bare and p2sh multisig with compressed keys should always be spendable
# spendable_anytime.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
# spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with compressed keys should always be spendable
# spendable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
# spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in uncompressed_spendable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # bare and p2sh multisig with uncompressed keys should always be spendable
# spendable_anytime.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
# unseen_anytime.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with uncompressed keys should always be spendable
# spendable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
# spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# # witness with uncompressed keys are never seen
# unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in compressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# # Multisig without private is not seen after addmultisigaddress, but seen after importaddress
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with compressed keys should always be seen
# solvable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
# solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# for i in uncompressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
# solvable_after_importaddress.extend([bare, p2sh])
# # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
# unseen_anytime.extend([p2wsh, p2sh_p2wsh])
# else:
# [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# # normal P2PKH and P2PK with uncompressed keys should always be seen
# solvable_anytime.extend([p2pkh, p2pk])
# # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
# solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# # witness with uncompressed keys are never seen
# unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# op1 = CScript([OP_1])
# op0 = CScript([OP_0])
# # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
# unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
# unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
# unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
# unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
# p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
# p2wshop1 = CScript([OP_0, sha256(op1)])
# unsolvable_after_importaddress.append(unsolvablep2pkh)
# unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
# unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
# unsolvable_after_importaddress.append(p2wshop1)
# unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
# unsolvable_after_importaddress.append(p2shop0)
# spendable_txid = []
# solvable_txid = []
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
# self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
# importlist = []
# for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
# v = self.nodes[0].validateaddress(i)
# if (v['isscript']):
# bare = hex_str_to_bytes(v['hex'])
# importlist.append(bytes_to_hex_str(bare))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
# else:
# pubkey = hex_str_to_bytes(v['pubkey'])
# p2pk = CScript([pubkey, OP_CHECKSIG])
# p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
# importlist.append(bytes_to_hex_str(p2pk))
# importlist.append(bytes_to_hex_str(p2pkh))
# importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
# importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
# importlist.append(bytes_to_hex_str(unsolvablep2pkh))
# importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
# importlist.append(bytes_to_hex_str(op1))
# importlist.append(bytes_to_hex_str(p2wshop1))
# for i in importlist:
# try:
# self.nodes[0].importaddress(i,"",False,True)
# except JSONRPCException as exp:
# assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
# self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
# self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
# self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
# self.mine_and_test_listunspent(unseen_anytime, 0)
# # addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
# # not in the wallet
# # note that no witness address should be returned by unsolvable addresses
# # the multisig_without_privkey_address will fail because its keys were not added with importpubkey
# for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
# try:
# self.nodes[0].addwitnessaddress(i)
# except JSONRPCException as exp:
# assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
# else:
# assert(False)
# for i in compressed_spendable_address + compressed_solvable_address:
# witaddress = self.nodes[0].addwitnessaddress(i)
# # addwitnessaddress should return the same address if it is a known P2SH-witness address
# assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
# spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
# solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
# self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
# self.mine_and_test_listunspent(unseen_anytime, 0)
# # Repeat some tests. This time we don't add witness scripts with importaddress
erialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| true | true |
1c31bc0db543d1938a9073d536d060604a0f8615 | 38,282 | py | Python | mindspore/_checkparam.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | 1 | 2021-06-01T12:34:37.000Z | 2021-06-01T12:34:37.000Z | mindspore/_checkparam.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | mindspore/_checkparam.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Check parameters."""
import re
import inspect
import math
from enum import Enum
from functools import reduce, wraps
from itertools import repeat, zip_longest
from collections import deque
from collections.abc import Iterable
import numpy as np
from mindspore import log as logger
from mindspore.common import dtype as mstype
class Rel(Enum):
"""Numerical relationship between variables, logical relationship enumeration definition of range."""
# scalar compare
EQ = 1 # ==
NE = 2 # !=
LT = 3 # <
LE = 4 # <=
GT = 5 # >
GE = 6 # >=
# scalar range check
INC_NEITHER = 7 # (), include neither
INC_LEFT = 8 # [), include left
INC_RIGHT = 9 # (], include right
INC_BOTH = 10 # [], include both
# collection in, not in
IN = 11
NOT_IN = 12
@staticmethod
def get_strs(rel):
"""Get value from rel_strs."""
return rel_strs.get(rel, "")
@staticmethod
def get_fns(rel):
"""Get value from rel_fns."""
return rel_fns.get(rel, lambda *args: False)
rel_fns = {
# scalar compare
Rel.EQ: lambda x, y: x == y,
Rel.NE: lambda x, y: x != y,
Rel.LT: lambda x, y: x < y,
Rel.LE: lambda x, y: x <= y,
Rel.GT: lambda x, y: x > y,
Rel.GE: lambda x, y: x >= y,
# scalar range check
Rel.INC_NEITHER: lambda x, lower, upper: (lower < x < upper),
Rel.INC_LEFT: lambda x, lower, upper: (lower <= x < upper),
Rel.INC_RIGHT: lambda x, lower, upper: (lower < x <= upper),
Rel.INC_BOTH: lambda x, lower, upper: (lower <= x <= upper),
# collection in, not in
Rel.IN: lambda x, y: x in y,
Rel.NOT_IN: lambda x, y: x not in y,
}
rel_strs = {
# scalar compare
Rel.EQ: "== {}",
Rel.NE: "!= {}",
Rel.LT: "< {}",
Rel.LE: "<= {}",
Rel.GT: "> {}",
Rel.GE: ">= {}",
# scalar range check
Rel.INC_NEITHER: "({}, {})",
Rel.INC_LEFT: "[{}, {})",
Rel.INC_RIGHT: "({}, {}]",
Rel.INC_BOTH: "[{}, {}]",
# collection in, not in
Rel.IN: "in {}",
Rel.NOT_IN: "not in {}",
}
def _check_3d_int_or_tuple(arg_name, arg_value, prim_name, allow_five=False, ret_five=False,
greater_zero=True, third_one=False, three_input=False):
"""
Checks whether an argument is a positive int or tuple with 3 or 5(when allow_five is True) positive int elements.
"""
def _raise_message(third_one_flag=False, three_input_flag=False):
if third_one_flag:
raise ValueError(f"For '{prim_name}' the depth of attr '{arg_name}' should be 1, but got {ret_value[-3]}")
if three_input_flag:
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of "
f"three positive int numbers, but got {arg_value}")
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of three "
f"{'or five ' if allow_five else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value, arg_value) if ret_five else (arg_value, arg_value, arg_value)
elif len(arg_value) == 3:
ret = (1, 1, arg_value[0], arg_value[1], arg_value[2]) if ret_five else arg_value
elif len(arg_value) == 5:
if not allow_five:
_raise_message()
ret = arg_value if ret_five else (arg_value[1], arg_value[2], arg_value[3])
else:
_raise_message()
return ret
Validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
if three_input and isinstance(arg_value, tuple):
if len(arg_value) != 3:
_raise_message(three_input_flag=three_input)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and not isinstance(item, bool):
if greater_zero and item > 0:
continue
if not greater_zero and item >= 0:
continue
_raise_message()
if third_one:
if ret_value[-3] != 1:
_raise_message(third_one_flag=third_one)
return tuple(ret_value)
def check_number(arg_value, value, rel, arg_type=int, arg_name=None, prim_name=None):
"""
Check argument integer.
Example:
- number = check_int(number, 0, Rel.GE, "number", None) # number >= 0
"""
rel_fn = Rel.get_fns(rel)
prim_name = f'in `{prim_name}`' if prim_name else ''
arg_name = f'`{arg_name}`' if arg_name else ''
if isinstance(arg_value, arg_type):
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
raise ValueError(f'{arg_name} {prim_name} must be legal value, but got `{arg_value}`.')
else:
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
type_mismatch = not isinstance(arg_value, arg_type) or isinstance(arg_value, bool)
type_except = TypeError if type_mismatch else ValueError
if type_mismatch or not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(value)
raise type_except(f'{arg_name} {prim_name} should be an {arg_type.__name__} and must {rel_str}, '
f'but got `{arg_value}` with type `{type(arg_value).__name__}`.')
return arg_value
def check_is_number(arg_value, arg_type, arg_name=None, prim_name=None):
"""
Checks input value is float type or not.
Usage:
- number = check_is_number(number, int)
- number = check_is_number(number, int, "bias")
- number = check_is_number(number, int, "bias", "bias_class")
"""
prim_name = f'in \'{prim_name}\'' if prim_name else ''
arg_name = f'\'{arg_name}\'' if arg_name else 'Input value'
if isinstance(arg_value, arg_type) and not isinstance(arg_value, bool):
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
raise ValueError(f'{arg_name} {prim_name} must be legal float, but got `{arg_value}`.')
return arg_value
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
def check_number_range(arg_value, lower_limit, upper_limit, rel, value_type, arg_name=None, prim_name=None):
"""
Method for checking whether an int value is in some range.
Usage:
- number = check_number_range(number, 0.0, 1.0, Rel.INC_NEITHER, "number", float) # number in [0.0, 1.0]
- number = check_number_range(number, 0, 1, Rel.INC_NEITHER, "number", int) # number in [0, 1]
"""
rel_fn = Rel.get_fns(rel)
prim_name = f'in `{prim_name}`' if prim_name else ''
arg_name = f'`{arg_name}`' if arg_name else ''
type_mismatch = not isinstance(arg_value, (np.ndarray, np.generic, value_type)) or isinstance(arg_value, bool)
if type_mismatch:
raise TypeError("{} {} must be `{}`, but got `{}`.".format(
arg_name, prim_name, value_type.__name__, type(arg_value).__name__))
if not rel_fn(arg_value, lower_limit, upper_limit):
rel_str = Rel.get_strs(rel).format(lower_limit, upper_limit)
raise ValueError("{} {} should be in range of {}, but got {:.3e} with type `{}`.".format(
arg_name, prim_name, rel_str, arg_value, type(arg_value).__name__))
return arg_value
class Validator:
"""validator for checking input parameters"""
@staticmethod
def check(arg_name, arg_value, value_name, value, rel=Rel.EQ, prim_name=None, excp_cls=ValueError):
"""
Method for judging relation between two int values or list/tuple made up of ints.
This method is not suitable for judging relation between floats, since it does not consider float error.
"""
rel_fn = Rel.get_fns(rel)
if not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(f'{value_name}: {value}')
msg_prefix = f'For \'{prim_name}\' the' if prim_name else "The"
raise excp_cls(f'{msg_prefix} `{arg_name}` should be {rel_str}, but got {arg_value}.')
return arg_value
@staticmethod
def check_int(arg_value, value, rel, arg_name=None, prim_name=None):
"""
Checks input integer value `arg_value` compare to `value`.
Usage:
- number = check_int(number, 0, Rel.GE, "number", None) # number >= 0
"""
return check_number(arg_value, value, rel, int, arg_name, prim_name)
@staticmethod
def check_is_int(arg_value, arg_name=None, prim_name=None):
"""
Checks input value is float type or not.
Usage:
- number = check_is_int(number, int)
- number = check_is_int(number, int, "bias")
- number = check_is_int(number, int, "bias", "bias_class")
"""
return check_is_number(arg_value, int, arg_name, prim_name)
@staticmethod
def check_equal_int(arg_value, value, arg_name=None, prim_name=None):
"""
Checks input integer value `arg_value` compare to `value`.
Usage:
- number = check_int(number, 0, Rel.GE, "number", None) # number >= 0
"""
return check_number(arg_value, value, Rel.EQ, int, arg_name, prim_name)
@staticmethod
def check_positive_int(arg_value, arg_name=None, prim_name=None):
"""
Check argument is positive integer, which mean arg_value > 0.
Usage:
- number = check_positive_int(number)
- number = check_positive_int(number, "bias")
"""
return check_number(arg_value, 0, Rel.GT, int, arg_name, prim_name)
@staticmethod
def check_negative_int(arg_value, arg_name=None, prim_name=None):
"""
Check argument is negative integer, which mean arg_value < 0.
Usage:
- number = check_negative_int(number)
- number = check_negative_int(number, "bias")
"""
return check_number(arg_value, 0, Rel.LT, int, arg_name, prim_name)
@staticmethod
def check_non_positive_int(arg_value, arg_name=None, prim_name=None):
"""
Check argument is non-negative integer, which mean arg_value <= 0.
Usage:
- number = check_non_positive_int(number)
- number = check_non_positive_int(number, "bias")
"""
return check_number(arg_value, 0, Rel.LE, int, arg_name, prim_name)
@staticmethod
def check_non_negative_int(arg_value, arg_name=None, prim_name=None):
"""
Check argument is non-negative integer, which mean arg_value >= 0.
Usage:
- number = check_non_negative_int(number)
- number = check_non_negative_int(number, "bias")
"""
return check_number(arg_value, 0, Rel.GE, int, arg_name, prim_name)
@staticmethod
def check_float(arg_value, value, rel, arg_name=None, prim_name=None):
"""
Checks input float value `arg_value` compare to `value`.
Usage:
- number = check_float(number, 0.0, Rel.GE, "number", None) # number >= 0
"""
return check_number(arg_value, value, rel, float, arg_name, prim_name)
@staticmethod
def check_is_float(arg_value, arg_name=None, prim_name=None):
"""
Checks input value is float type or not.
Usage:
- number = check_is_float(number, int)
- number = check_is_float(number, int, "bias")
- number = check_is_float(number, int, "bias", "bias_class")
"""
return check_is_number(arg_value, float, arg_name, prim_name)
@staticmethod
def check_positive_float(arg_value, arg_name=None, prim_name=None):
"""
Check argument is positive float, which mean arg_value > 0.
Usage:
- number = check_positive_float(number)
- number = check_positive_float(number, "bias")
- number = check_positive_float(number, "bias", "bias_class")
"""
return check_number(arg_value, 0, Rel.GT, float, arg_name, prim_name)
@staticmethod
def check_negative_float(arg_value, arg_name=None, prim_name=None):
"""
Check argument is negative float, which mean arg_value < 0.
Usage:
- number = check_negative_float(number)
- number = check_negative_float(number, "bias")
"""
return check_number(arg_value, 0, Rel.LT, float, arg_name, prim_name)
@staticmethod
def check_non_positive_float(arg_value, arg_name=None, prim_name=None):
"""
Check argument is non-negative float, which mean arg_value <= 0.
Usage:
- number = check_non_positive_float(number)
- number = check_non_positive_float(number, "bias")
"""
return check_number(arg_value, 0, Rel.LE, float, arg_name, prim_name)
@staticmethod
def check_non_negative_float(arg_value, arg_name=None, prim_name=None):
"""
Check argument is non-negative float, which mean arg_value >= 0.
Usage:
- number = check_non_negative_float(number)
- number = check_non_negative_float(number, "bias")
"""
return check_number(arg_value, 0, Rel.GE, float, arg_name, prim_name)
@staticmethod
def check_number(arg_name, arg_value, value, rel, prim_name):
"""Number value judgment."""
rel_fn = Rel.get_fns(rel)
if not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(value)
raise ValueError(f'For \'{prim_name}\' the `{arg_name}` must {rel_str}, but got {arg_value}.')
return arg_value
@staticmethod
def check_isinstance(arg_name, arg_value, classes):
"""Check arg isinstance of classes"""
if not isinstance(arg_value, classes):
raise ValueError(f'The `{arg_name}` should be isinstance of {classes}, but got {arg_value}.')
return arg_value
@staticmethod
def check_bool(arg_value, arg_name=None):
"""
Check argument is instance of bool.
Usage:
- has_bias = check_bool(has_bias)
- has_bias = check_bool(has_bias, "has_bias")
"""
if not isinstance(arg_value, bool):
arg_name = arg_name if arg_name else "Parameter"
raise TypeError(f'`{arg_name}` should be isinstance of bool, but got `{arg_value}`.')
return arg_value
@staticmethod
def check_int_range(arg_value, lower_limit, upper_limit, rel, arg_name=None, prim_name=None):
"""
Method for checking whether input value is in int range.
Usage:
- number = check_int_range(number, 0, 1, Rel.INC_NEITHER) # number in [0, 1]
- number = check_int_range(number, 0, 1, Rel.INC_NEITHER, "number") # number in [0, 1]
"""
return check_number_range(arg_value, lower_limit, upper_limit, rel, int, arg_name, prim_name)
@staticmethod
def check_float_range(arg_value, lower_limit, upper_limit, rel, arg_name=None, prim_name=None):
"""
Method for checking whether input value is in float range.
Usage:
- number = check_float_range(number, 0.0, 1.0, Rel.INC_NEITHER) # number in [0.0, 1.0]
- number = check_float_range(number, 0.0, 1.0, Rel.INC_NEITHER, "number") # number in [0.0, 1.0]
"""
return check_number_range(arg_value, lower_limit, upper_limit, rel, float, arg_name, prim_name)
@staticmethod
def check_string(arg_value, valid_values, arg_name=None, prim_name=None):
"""
Check whether string is in some value list.
Usage:
- method = check_string(method, ["string1", "string2", "string3"], "method")
"""
if isinstance(arg_value, str) and arg_value in valid_values:
return arg_value
arg_name = arg_name if arg_name else "Parameter"
msg_prefix = f'For \'{prim_name}\' the' if prim_name else "The"
raise ValueError(f'{msg_prefix} `{arg_name}` should be str and must be in `{valid_values}`,'
f' but got `{arg_value}`.')
@staticmethod
def check_str_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
if reg is None:
# Named string regular expression
reg = r"^\w+[0-9a-zA-Z\_\.]*$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(
target, prim_name, reg, flag))
return True
@staticmethod
def check_file_name_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
"""Check whether file name is legitimate."""
if not isinstance(target, str):
raise ValueError("Args file_name {} must be string, please check it".format(target))
if target.endswith("\\") or target.endswith("/"):
raise ValueError("File name cannot be a directory path.")
if reg is None:
reg = r"^[0-9a-zA-Z\_\-\.\:\/\\]+$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(
target, prim_name, reg, flag))
return True
@staticmethod
def check_pad_value_by_mode(pad_mode, padding, prim_name):
"""Validates value of padding according to pad_mode"""
if pad_mode != 'pad' and padding != 0:
raise ValueError(f"For '{prim_name}', padding must be zero when pad_mode is '{pad_mode}'.")
return padding
@staticmethod
def check_subclass(arg_name, type_, template_types, prim_name, addition_error_info=None):
"""Checks whether some type is subclass of another type"""
if not isinstance(template_types, Iterable):
template_types = (template_types,)
hit = False
for template_type in template_types:
if isinstance(template_type, mstype.Type):
if mstype.issubclass_(type_, template_type):
hit = True
break
elif type_ is template_type:
hit = True
break
if not hit:
if addition_error_info is None:
addition_error_info = ''
type_str = (type(type_).__name__ if isinstance(type_, (tuple, list)) else "") + str(type_)
raise TypeError(f'For \'{prim_name}\', the type of `{arg_name}` should be subclass'
f' of {", ".join((str(x) for x in template_types))}, but got {type_str}.'
f' {addition_error_info}')
@staticmethod
def check_const_input(arg_name, arg_value, prim_name):
"""Checks valid value."""
if arg_value is None:
raise ValueError(f'For \'{prim_name}\', the `{arg_name}` must be a const input, but got {arg_value}.')
return arg_value
@staticmethod
def check_types_same_and_valid(args, valid_values, prim_name):
"""Checks whether the types of inputs are the same and valid."""
def _check_type_valid(arg):
arg_key, arg_val = arg
elem_type = arg_val
Validator.check_subclass(arg_key, elem_type, valid_values, prim_name)
return (arg_key, elem_type)
def _check_types_same(arg1, arg2):
arg1_name, arg1_type = arg1
arg2_name, arg2_type = arg2
if arg1_type != arg2_type:
raise TypeError(f'For \'{prim_name}\', type of `{arg2_name}` should be same as `{arg1_name}`,'
f' but `{arg1_name}` with type {arg1_type} and `{arg2_name}` with type {arg2_type}.')
return arg1
elem_types = map(_check_type_valid, args.items())
reduce(_check_types_same, elem_types)
@staticmethod
def check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim_name):
"""Checks whether the element types of input tensors are the same and valid."""
valid_dtypes = valid_dtypes if isinstance(valid_dtypes, Iterable) else [valid_dtypes]
tensor_types = [mstype.tensor_type(t) for t in valid_dtypes]
Validator.check_types_same_and_valid(args, tensor_types, prim_name)
@staticmethod
def check_tensor_dtype_valid(arg_name, arg_type, valid_dtypes, prim_name):
"""Checks whether the element types of input tensors are valid."""
valid_dtypes = valid_dtypes if isinstance(valid_dtypes, Iterable) else [valid_dtypes]
tensor_types = [mstype.tensor_type(t) for t in valid_dtypes]
Validator.check_subclass(arg_name, arg_type, tensor_types, prim_name)
@staticmethod
def check_scalar_or_tensor_types_same(args, valid_values, prim_name, allow_mix=False):
"""
Checks whether the types of inputs are the same. If the input args are tensors, checks their element types.
If `allow_mix` is True, Tensor(float32) and float32 are type compatible, otherwise an exception will be raised.
"""
def _check_argument_type(arg):
arg_key, arg_val = arg
if isinstance(arg_val, type(mstype.tensor)):
arg_val = arg_val.element_type()
if not arg_val in valid_values:
raise TypeError(f'For \'{prim_name}\', the `{arg_key}` should be in {valid_values},'
f' but `{arg_key}` is {arg_val}.')
return arg
def _check_types_same(arg1, arg2):
arg1_name, arg1_type = arg1
arg2_name, arg2_type = arg2
except_flag = False
if isinstance(arg1_type, type(mstype.tensor)) and isinstance(arg2_type, type(mstype.tensor)):
arg1_type = arg1_type.element_type()
arg2_type = arg2_type.element_type()
elif not (isinstance(arg1_type, type(mstype.tensor)) or isinstance(arg2_type, type(mstype.tensor))):
pass
elif allow_mix:
arg1_type = arg1_type.element_type() if isinstance(arg1_type, type(mstype.tensor)) else arg1_type
arg2_type = arg2_type.element_type() if isinstance(arg2_type, type(mstype.tensor)) else arg2_type
else:
except_flag = True
if except_flag or arg1_type != arg2_type:
raise TypeError(f'For \'{prim_name}\' type of `{arg2_name}` should be same as `{arg1_name}`,'
f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.')
return arg1
reduce(_check_types_same, map(_check_argument_type, args.items()))
@staticmethod
def check_value_type(arg_name, arg_value, valid_types, prim_name=None):
"""Checks whether a value is instance of some types."""
valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,)
def raise_error_msg():
"""func for raising error message when check failed"""
type_names = [t.__name__ if hasattr(t, '__name__') else str(t) for t in valid_types]
num_types = len(valid_types)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
raise TypeError(f'{msg_prefix} type of `{arg_name}` should be {"one of " if num_types > 1 else ""}'
f'{type_names if num_types > 1 else type_names[0]}, '
f'but got {arg_value} with type {type(arg_value).__name__}.')
# Notice: bool is subclass of int, so `check_value_type('x', True, [int])` will check fail, and
# `check_value_type('x', True, [bool, int])` will check pass
if isinstance(arg_value, bool) and bool not in tuple(valid_types):
raise_error_msg()
if not isinstance(arg_value, tuple(valid_types)):
raise_error_msg()
return arg_value
@staticmethod
def check_type_name(arg_name, arg_type, valid_types, prim_name):
"""Checks whether a type in some specified types"""
valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,)
def raise_error_msg():
"""func for raising error message when check failed"""
type_names = [t.__name__ if hasattr(t, '__name__') else t for t in valid_types]
num_types = len(valid_types)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
raise TypeError(f"{msg_prefix} '{arg_name}' should be {'one of ' if num_types > 1 else ''}"
f"{type_names if num_types > 1 else type_names[0]}, "
f"but got {arg_type.__name__ if hasattr(arg_type, '__name__') else repr(arg_type)}.")
if isinstance(arg_type, type(mstype.tensor)):
arg_type = arg_type.element_type()
if arg_type not in valid_types:
raise_error_msg()
return arg_type
@staticmethod
def check_reduce_shape(ori_shape, shape, axis, prim_name):
"""Checks whether shape is ori_shape reduced on axis"""
axis = axis if isinstance(axis, Iterable) else (axis,)
exp_shape = [ori_shape[i] for i in range(len(ori_shape)) if i not in axis]
if list(shape) != exp_shape:
raise ValueError(f'For {prim_name}, {ori_shape} reduce on {axis} should be '
f'{tuple(exp_shape)}, but got {shape}.')
@staticmethod
def check_astype_dtype(dtype):
"""Check whether dtype is a valid input, and convert to mstype"""
all_types = mstype.__dtype__ + ["int", "float", "bool"]
if isinstance(dtype, str):
if dtype.lower() not in all_types:
raise TypeError(f"`{dtype}` not understood.")
dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
elif isinstance(dtype, type):
dtype = mstype.pytype_to_dtype(dtype)
elif not dtype in mstype.number_type + (mstype.bool_,):
raise TypeError(f"`{dtype}` not understood.")
return dtype
@staticmethod
def check_transpose_axis(axes, ndim):
"""Check the axis argument for tensor.transpose"""
if not axes or (len(axes) == 1 and axes[0] is None):
return tuple(range(ndim-1, -1, -1))
if len(axes) == 1:
perm = axes[0]
# if only one argument provided, it must be tuple or list
if isinstance(perm, list):
perm = tuple(perm)
else:
if not isinstance(perm, tuple):
raise TypeError(f"The `axes` should be a tuple/list, or series of int, but got {type(axes[0])}")
return perm
# if multiple arguments provided, it must be `ndim` number of ints
if len(axes) != ndim:
raise ValueError("The number of axes must equal to the dimension of tensor.")
return axes
@staticmethod
def check_reshape_shp(shp):
"""Check the shape argument for tensor.reshape"""
if len(shp) == 1:
new_shape = shp[0]
# if only one argument provided, it must be int, tuple or list
if isinstance(new_shape, int):
return shp
if isinstance(new_shape, list):
new_shape = tuple(new_shape)
else:
if not isinstance(new_shape, tuple):
raise TypeError(
f"The `shape` should be an int, or tuple/list, or series of int, but got {type(shp[0])}")
return new_shape
return shp
@staticmethod
def check_flatten_order(order):
"""Check flatten function input order"""
if not isinstance(order, str):
raise TypeError(f"The order variable should be a string, but got {type(order)}")
if order not in ('C', 'F'):
raise ValueError(f"only `C` and `F` are supported as order, but got {order}")
return order
@staticmethod
def check_swapaxes_axis(axes, ndim):
"""Check all the axes argument for tensor.swapaxes"""
if isinstance(axes, int):
Validator.check_axis_in_range(axes, ndim)
return axes % ndim
if isinstance(axes, (tuple, list)):
for axis in axes:
if not isinstance(axis, int):
raise TypeError(f"axis argument should be integer, but got {type(axis)}.")
Validator.check_axis_in_range(axis, ndim)
axes = tuple(map(lambda x: x % ndim, axes))
return axes
raise TypeError(f"axes should be integer, list or tuple for check, but got {type(axes)}.")
@staticmethod
def prepare_shape_for_squeeze(shape, axes):
"""
Creates the squeezed new shape based on the tensor and given axes.
Args:
shape (tuple): the shape of the tensor
axes Union[int, tuple(int), list(int)]: the axes with dimensions need to
be squeezed.
Returns:
new_shape(tuple): the shape with dimensions squeezed.
"""
new_shape = []
ndim = len(shape)
# Convert to set
if isinstance(axes, int):
if axes >= ndim or axes < -ndim:
raise ValueError(f"axis {axes} is out of bounds for tensor of dimension {ndim}")
axes = {axes}
elif isinstance(axes, (list, tuple)):
for axis in axes:
if axis >= ndim or axis < -ndim:
raise ValueError(f"axis {axis} is out of bounds for tensor of dimension {ndim}")
axes = set(axes)
else:
raise TypeError(f"only int, tuple and list are allowed for axes, but got {type(axes)}")
for idx, s in enumerate(shape):
if s != 1 or (idx not in axes) and (idx - ndim not in axes):
new_shape.append(s)
# if an axis is selected with shape entry greater than one, an error is raised.
if s != 1 and ((idx in axes) or (idx - ndim in axes)):
raise ValueError(f"axis {axes} has shape entry {s} > 1, cannot be squeezed.")
return tuple(new_shape)
@staticmethod
def check_axis_in_range(axis, ndim):
"""Checks axes are with the bounds of ndim"""
if not isinstance(axis, int):
raise TypeError(f'axes should be integers, not {type(axis)}')
if not -ndim <= axis < ndim:
raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}')
return axis % ndim
@staticmethod
def check_axis_valid(axes, ndim):
"""
Checks axes are valid given ndim, and returns axes that can be passed
to the built-in operator (non-negative, int or tuple)
"""
if axes is None:
axes = tuple(range(ndim))
return axes
if isinstance(axes, (tuple, list)):
for axis in axes:
Validator.check_axis_in_range(axis, ndim)
axes = tuple(map(lambda x: x % ndim, axes))
if any(axes.count(el) > 1 for el in axes):
raise ValueError('duplicate value in "axis"')
return axes
Validator.check_axis_in_range(axes, ndim)
return (axes % ndim,)
@staticmethod
def max_(*args):
return max(*args)
@staticmethod
def min_(*args):
return min(*args)
@staticmethod
def expanded_shape(ndim, axis_size, axis):
"""
Returns a shape with size = 1 for all dimensions
except at axis.
"""
return tuple(axis_size if i == axis else 1 for i in range(ndim))
@staticmethod
def tuple_slice(tup, start, end):
"""get sliced tuple from start and end."""
return tup[start:end]
@staticmethod
def infer_out_shape(*shapes):
"""
Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
"""
shape_out = deque()
reversed_shapes = map(reversed, shapes)
for items in zip_longest(*reversed_shapes, fillvalue=1):
max_size = 0 if 0 in items else max(items)
if any(item not in (1, max_size) for item in items):
raise ValueError(f'operands could not be broadcast together with shapes {*shapes,}')
shape_out.appendleft(max_size)
return tuple(shape_out)
@staticmethod
def get_log2_size(size):
return math.ceil(math.log2(size))
@staticmethod
def check_axis_type(axis, type_int=True, type_tuple=True, type_list=True):
"""Check axis argument type."""
if type_int and isinstance(axis, int):
return True
if (type_tuple and isinstance(axis, tuple)) or (type_list and isinstance(axis, list)):
for ax in axis:
if not isinstance(ax, int):
raise TypeError(f"Each axis should be integer, but got {type(ax)} in {axis}.")
return True
type_str = ""
if type_int: type_str += "int, "
if type_tuple: type_str += "tuple, "
if type_list: type_str += "list, "
raise TypeError(f"Axis should be {type_str}but got {type(axis)}.")
@staticmethod
def check_and_canonicalize_axes(axes, ndim):
"""Check whether the types and values of input axes are valid."""
axes = axes if isinstance(axes, tuple) else (axes,)
new_axes = ()
for ax in axes:
if not isinstance(ax, int):
raise TypeError((f"Each axis should be integer, but got {type(ax)} in {axes}."))
if not -ndim <= ax < ndim:
raise ValueError(f'axis {ax} is out of bounds for array of dimension {ndim}')
ax = ax if ax >= 0 else ax + ndim
new_axes += (ax,)
return new_axes
def check_input_format(input_param):
"""Judge input format."""
if input_param == "NCHW":
return input_param
raise ValueError("The data format must be NCHW.")
def _expand_tuple(n_dimensions):
"""To expand a int number to tuple."""
def convert(m):
if not isinstance(m, tuple):
if isinstance(m, int) and not isinstance(m, bool):
return tuple(repeat(m, n_dimensions))
raise TypeError("Input type must be int or tuple[int].")
if not len(m) is n_dimensions:
raise TypeError("Input tuple dimension is incorrect.")
for i in m:
if not isinstance(i, int) or isinstance(i, bool):
raise TypeError("Incorrect type inside of a tuple, must be int!")
return m
return convert
def _check_data_type_valid(data, valid_type):
"""Check data type valid."""
if valid_type is None:
return data is None
if isinstance(data, valid_type):
if hasattr(data, 'size') and data.size == 0:
msg = "Please provide non-empty data."
logger.error(msg)
raise ValueError(msg)
return True
return False
def check_input_data(*data, data_class):
"""Input data check."""
for item in data:
if isinstance(item, (list, tuple)):
for v in item:
check_input_data(v, data_class=data_class)
elif isinstance(item, dict):
for v in item.values():
check_input_data(v, data_class=data_class)
else:
if isinstance(data_class, (tuple, list)):
ret = True in tuple(_check_data_type_valid(item, data_type) for data_type in data_class)
else:
ret = _check_data_type_valid(item, data_class)
if not ret:
data_class_str = tuple(i.__name__ if hasattr(i, '__name__') else i for i in data_class) \
if isinstance(data_class, (tuple, list)) else \
(data_class if data_class is None else data_class.__name__)
raise ValueError(f'Please provide as model inputs either a single or '
f'a tuple or a list or a dict of {data_class_str}, '
f'but got part data type is {item if item is None else type(item).__name__}.')
def check_output_data(data):
"""Output data check."""
if data is None:
raise RuntimeError('Executor return data ' + str(data) + ', please check your net or input data.')
once = _expand_tuple(1)
twice = _expand_tuple(2)
triple = _expand_tuple(3)
def args_type_check(*type_args, **type_kwargs):
"""Check whether input data type is correct."""
def type_check(func):
sig = inspect.signature(func)
bound_types = sig.bind_partial(*type_args, **type_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal bound_types
bound_values = sig.bind(*args, **kwargs)
argument_dict = bound_values.arguments
if "kwargs" in bound_types:
bound_types = bound_types["kwargs"]
if "kwargs" in argument_dict:
argument_dict = argument_dict["kwargs"]
for name, value in argument_dict.items():
if name in bound_types:
if value is not None and not isinstance(value, bound_types[name]):
raise TypeError('Argument {} must be {}'.format(name, bound_types[name]))
return func(*args, **kwargs)
return wrapper
return type_check
| 40.812367 | 119 | 0.61094 |
import re
import inspect
import math
from enum import Enum
from functools import reduce, wraps
from itertools import repeat, zip_longest
from collections import deque
from collections.abc import Iterable
import numpy as np
from mindspore import log as logger
from mindspore.common import dtype as mstype
class Rel(Enum):
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
INC_NEITHER = 7
INC_LEFT = 8
INC_RIGHT = 9
INC_BOTH = 10
IN = 11
NOT_IN = 12
@staticmethod
def get_strs(rel):
return rel_strs.get(rel, "")
@staticmethod
def get_fns(rel):
return rel_fns.get(rel, lambda *args: False)
rel_fns = {
Rel.EQ: lambda x, y: x == y,
Rel.NE: lambda x, y: x != y,
Rel.LT: lambda x, y: x < y,
Rel.LE: lambda x, y: x <= y,
Rel.GT: lambda x, y: x > y,
Rel.GE: lambda x, y: x >= y,
Rel.INC_NEITHER: lambda x, lower, upper: (lower < x < upper),
Rel.INC_LEFT: lambda x, lower, upper: (lower <= x < upper),
Rel.INC_RIGHT: lambda x, lower, upper: (lower < x <= upper),
Rel.INC_BOTH: lambda x, lower, upper: (lower <= x <= upper),
Rel.IN: lambda x, y: x in y,
Rel.NOT_IN: lambda x, y: x not in y,
}
rel_strs = {
Rel.EQ: "== {}",
Rel.NE: "!= {}",
Rel.LT: "< {}",
Rel.LE: "<= {}",
Rel.GT: "> {}",
Rel.GE: ">= {}",
Rel.INC_NEITHER: "({}, {})",
Rel.INC_LEFT: "[{}, {})",
Rel.INC_RIGHT: "({}, {}]",
Rel.INC_BOTH: "[{}, {}]",
Rel.IN: "in {}",
Rel.NOT_IN: "not in {}",
}
def _check_3d_int_or_tuple(arg_name, arg_value, prim_name, allow_five=False, ret_five=False,
greater_zero=True, third_one=False, three_input=False):
def _raise_message(third_one_flag=False, three_input_flag=False):
if third_one_flag:
raise ValueError(f"For '{prim_name}' the depth of attr '{arg_name}' should be 1, but got {ret_value[-3]}")
if three_input_flag:
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of "
f"three positive int numbers, but got {arg_value}")
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of three "
f"{'or five ' if allow_five else ''}positive int numbers, but got {arg_value}")
def _get_return_value():
if isinstance(arg_value, int):
ret = (1, 1, arg_value, arg_value, arg_value) if ret_five else (arg_value, arg_value, arg_value)
elif len(arg_value) == 3:
ret = (1, 1, arg_value[0], arg_value[1], arg_value[2]) if ret_five else arg_value
elif len(arg_value) == 5:
if not allow_five:
_raise_message()
ret = arg_value if ret_five else (arg_value[1], arg_value[2], arg_value[3])
else:
_raise_message()
return ret
Validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)
if three_input and isinstance(arg_value, tuple):
if len(arg_value) != 3:
_raise_message(three_input_flag=three_input)
ret_value = _get_return_value()
for item in ret_value:
if isinstance(item, int) and not isinstance(item, bool):
if greater_zero and item > 0:
continue
if not greater_zero and item >= 0:
continue
_raise_message()
if third_one:
if ret_value[-3] != 1:
_raise_message(third_one_flag=third_one)
return tuple(ret_value)
def check_number(arg_value, value, rel, arg_type=int, arg_name=None, prim_name=None):
rel_fn = Rel.get_fns(rel)
prim_name = f'in `{prim_name}`' if prim_name else ''
arg_name = f'`{arg_name}`' if arg_name else ''
if isinstance(arg_value, arg_type):
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
raise ValueError(f'{arg_name} {prim_name} must be legal value, but got `{arg_value}`.')
else:
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
type_mismatch = not isinstance(arg_value, arg_type) or isinstance(arg_value, bool)
type_except = TypeError if type_mismatch else ValueError
if type_mismatch or not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(value)
raise type_except(f'{arg_name} {prim_name} should be an {arg_type.__name__} and must {rel_str}, '
f'but got `{arg_value}` with type `{type(arg_value).__name__}`.')
return arg_value
def check_is_number(arg_value, arg_type, arg_name=None, prim_name=None):
prim_name = f'in \'{prim_name}\'' if prim_name else ''
arg_name = f'\'{arg_name}\'' if arg_name else 'Input value'
if isinstance(arg_value, arg_type) and not isinstance(arg_value, bool):
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
raise ValueError(f'{arg_name} {prim_name} must be legal float, but got `{arg_value}`.')
return arg_value
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
def check_number_range(arg_value, lower_limit, upper_limit, rel, value_type, arg_name=None, prim_name=None):
rel_fn = Rel.get_fns(rel)
prim_name = f'in `{prim_name}`' if prim_name else ''
arg_name = f'`{arg_name}`' if arg_name else ''
type_mismatch = not isinstance(arg_value, (np.ndarray, np.generic, value_type)) or isinstance(arg_value, bool)
if type_mismatch:
raise TypeError("{} {} must be `{}`, but got `{}`.".format(
arg_name, prim_name, value_type.__name__, type(arg_value).__name__))
if not rel_fn(arg_value, lower_limit, upper_limit):
rel_str = Rel.get_strs(rel).format(lower_limit, upper_limit)
raise ValueError("{} {} should be in range of {}, but got {:.3e} with type `{}`.".format(
arg_name, prim_name, rel_str, arg_value, type(arg_value).__name__))
return arg_value
class Validator:
@staticmethod
def check(arg_name, arg_value, value_name, value, rel=Rel.EQ, prim_name=None, excp_cls=ValueError):
rel_fn = Rel.get_fns(rel)
if not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(f'{value_name}: {value}')
msg_prefix = f'For \'{prim_name}\' the' if prim_name else "The"
raise excp_cls(f'{msg_prefix} `{arg_name}` should be {rel_str}, but got {arg_value}.')
return arg_value
@staticmethod
def check_int(arg_value, value, rel, arg_name=None, prim_name=None):
return check_number(arg_value, value, rel, int, arg_name, prim_name)
@staticmethod
def check_is_int(arg_value, arg_name=None, prim_name=None):
return check_is_number(arg_value, int, arg_name, prim_name)
@staticmethod
def check_equal_int(arg_value, value, arg_name=None, prim_name=None):
return check_number(arg_value, value, Rel.EQ, int, arg_name, prim_name)
@staticmethod
def check_positive_int(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.GT, int, arg_name, prim_name)
@staticmethod
def check_negative_int(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.LT, int, arg_name, prim_name)
@staticmethod
def check_non_positive_int(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.LE, int, arg_name, prim_name)
@staticmethod
def check_non_negative_int(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.GE, int, arg_name, prim_name)
@staticmethod
def check_float(arg_value, value, rel, arg_name=None, prim_name=None):
return check_number(arg_value, value, rel, float, arg_name, prim_name)
@staticmethod
def check_is_float(arg_value, arg_name=None, prim_name=None):
return check_is_number(arg_value, float, arg_name, prim_name)
@staticmethod
def check_positive_float(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.GT, float, arg_name, prim_name)
@staticmethod
def check_negative_float(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.LT, float, arg_name, prim_name)
@staticmethod
def check_non_positive_float(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.LE, float, arg_name, prim_name)
@staticmethod
def check_non_negative_float(arg_value, arg_name=None, prim_name=None):
return check_number(arg_value, 0, Rel.GE, float, arg_name, prim_name)
@staticmethod
def check_number(arg_name, arg_value, value, rel, prim_name):
rel_fn = Rel.get_fns(rel)
if not rel_fn(arg_value, value):
rel_str = Rel.get_strs(rel).format(value)
raise ValueError(f'For \'{prim_name}\' the `{arg_name}` must {rel_str}, but got {arg_value}.')
return arg_value
@staticmethod
def check_isinstance(arg_name, arg_value, classes):
if not isinstance(arg_value, classes):
raise ValueError(f'The `{arg_name}` should be isinstance of {classes}, but got {arg_value}.')
return arg_value
@staticmethod
def check_bool(arg_value, arg_name=None):
if not isinstance(arg_value, bool):
arg_name = arg_name if arg_name else "Parameter"
raise TypeError(f'`{arg_name}` should be isinstance of bool, but got `{arg_value}`.')
return arg_value
@staticmethod
def check_int_range(arg_value, lower_limit, upper_limit, rel, arg_name=None, prim_name=None):
return check_number_range(arg_value, lower_limit, upper_limit, rel, int, arg_name, prim_name)
@staticmethod
def check_float_range(arg_value, lower_limit, upper_limit, rel, arg_name=None, prim_name=None):
return check_number_range(arg_value, lower_limit, upper_limit, rel, float, arg_name, prim_name)
@staticmethod
def check_string(arg_value, valid_values, arg_name=None, prim_name=None):
if isinstance(arg_value, str) and arg_value in valid_values:
return arg_value
arg_name = arg_name if arg_name else "Parameter"
msg_prefix = f'For \'{prim_name}\' the' if prim_name else "The"
raise ValueError(f'{msg_prefix} `{arg_name}` should be str and must be in `{valid_values}`,'
f' but got `{arg_value}`.')
@staticmethod
def check_str_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
if reg is None:
reg = r"^\w+[0-9a-zA-Z\_\.]*$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(
target, prim_name, reg, flag))
return True
@staticmethod
def check_file_name_by_regular(target, reg=None, flag=re.ASCII, prim_name=None):
if not isinstance(target, str):
raise ValueError("Args file_name {} must be string, please check it".format(target))
if target.endswith("\\") or target.endswith("/"):
raise ValueError("File name cannot be a directory path.")
if reg is None:
reg = r"^[0-9a-zA-Z\_\-\.\:\/\\]+$"
if re.match(reg, target, flag) is None:
prim_name = f'in `{prim_name}`' if prim_name else ""
raise ValueError("'{}' {} is illegal, it should be match regular'{}' by flags'{}'".format(
target, prim_name, reg, flag))
return True
@staticmethod
def check_pad_value_by_mode(pad_mode, padding, prim_name):
if pad_mode != 'pad' and padding != 0:
raise ValueError(f"For '{prim_name}', padding must be zero when pad_mode is '{pad_mode}'.")
return padding
@staticmethod
def check_subclass(arg_name, type_, template_types, prim_name, addition_error_info=None):
if not isinstance(template_types, Iterable):
template_types = (template_types,)
hit = False
for template_type in template_types:
if isinstance(template_type, mstype.Type):
if mstype.issubclass_(type_, template_type):
hit = True
break
elif type_ is template_type:
hit = True
break
if not hit:
if addition_error_info is None:
addition_error_info = ''
type_str = (type(type_).__name__ if isinstance(type_, (tuple, list)) else "") + str(type_)
raise TypeError(f'For \'{prim_name}\', the type of `{arg_name}` should be subclass'
f' of {", ".join((str(x) for x in template_types))}, but got {type_str}.'
f' {addition_error_info}')
@staticmethod
def check_const_input(arg_name, arg_value, prim_name):
if arg_value is None:
raise ValueError(f'For \'{prim_name}\', the `{arg_name}` must be a const input, but got {arg_value}.')
return arg_value
@staticmethod
def check_types_same_and_valid(args, valid_values, prim_name):
def _check_type_valid(arg):
arg_key, arg_val = arg
elem_type = arg_val
Validator.check_subclass(arg_key, elem_type, valid_values, prim_name)
return (arg_key, elem_type)
def _check_types_same(arg1, arg2):
arg1_name, arg1_type = arg1
arg2_name, arg2_type = arg2
if arg1_type != arg2_type:
raise TypeError(f'For \'{prim_name}\', type of `{arg2_name}` should be same as `{arg1_name}`,'
f' but `{arg1_name}` with type {arg1_type} and `{arg2_name}` with type {arg2_type}.')
return arg1
elem_types = map(_check_type_valid, args.items())
reduce(_check_types_same, elem_types)
@staticmethod
def check_tensors_dtypes_same_and_valid(args, valid_dtypes, prim_name):
valid_dtypes = valid_dtypes if isinstance(valid_dtypes, Iterable) else [valid_dtypes]
tensor_types = [mstype.tensor_type(t) for t in valid_dtypes]
Validator.check_types_same_and_valid(args, tensor_types, prim_name)
@staticmethod
def check_tensor_dtype_valid(arg_name, arg_type, valid_dtypes, prim_name):
valid_dtypes = valid_dtypes if isinstance(valid_dtypes, Iterable) else [valid_dtypes]
tensor_types = [mstype.tensor_type(t) for t in valid_dtypes]
Validator.check_subclass(arg_name, arg_type, tensor_types, prim_name)
@staticmethod
def check_scalar_or_tensor_types_same(args, valid_values, prim_name, allow_mix=False):
def _check_argument_type(arg):
arg_key, arg_val = arg
if isinstance(arg_val, type(mstype.tensor)):
arg_val = arg_val.element_type()
if not arg_val in valid_values:
raise TypeError(f'For \'{prim_name}\', the `{arg_key}` should be in {valid_values},'
f' but `{arg_key}` is {arg_val}.')
return arg
def _check_types_same(arg1, arg2):
arg1_name, arg1_type = arg1
arg2_name, arg2_type = arg2
except_flag = False
if isinstance(arg1_type, type(mstype.tensor)) and isinstance(arg2_type, type(mstype.tensor)):
arg1_type = arg1_type.element_type()
arg2_type = arg2_type.element_type()
elif not (isinstance(arg1_type, type(mstype.tensor)) or isinstance(arg2_type, type(mstype.tensor))):
pass
elif allow_mix:
arg1_type = arg1_type.element_type() if isinstance(arg1_type, type(mstype.tensor)) else arg1_type
arg2_type = arg2_type.element_type() if isinstance(arg2_type, type(mstype.tensor)) else arg2_type
else:
except_flag = True
if except_flag or arg1_type != arg2_type:
raise TypeError(f'For \'{prim_name}\' type of `{arg2_name}` should be same as `{arg1_name}`,'
f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.')
return arg1
reduce(_check_types_same, map(_check_argument_type, args.items()))
@staticmethod
def check_value_type(arg_name, arg_value, valid_types, prim_name=None):
valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,)
def raise_error_msg():
type_names = [t.__name__ if hasattr(t, '__name__') else str(t) for t in valid_types]
num_types = len(valid_types)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
raise TypeError(f'{msg_prefix} type of `{arg_name}` should be {"one of " if num_types > 1 else ""}'
f'{type_names if num_types > 1 else type_names[0]}, '
f'but got {arg_value} with type {type(arg_value).__name__}.')
if isinstance(arg_value, bool) and bool not in tuple(valid_types):
raise_error_msg()
if not isinstance(arg_value, tuple(valid_types)):
raise_error_msg()
return arg_value
@staticmethod
def check_type_name(arg_name, arg_type, valid_types, prim_name):
valid_types = valid_types if isinstance(valid_types, Iterable) else (valid_types,)
def raise_error_msg():
type_names = [t.__name__ if hasattr(t, '__name__') else t for t in valid_types]
num_types = len(valid_types)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
raise TypeError(f"{msg_prefix} '{arg_name}' should be {'one of ' if num_types > 1 else ''}"
f"{type_names if num_types > 1 else type_names[0]}, "
f"but got {arg_type.__name__ if hasattr(arg_type, '__name__') else repr(arg_type)}.")
if isinstance(arg_type, type(mstype.tensor)):
arg_type = arg_type.element_type()
if arg_type not in valid_types:
raise_error_msg()
return arg_type
@staticmethod
def check_reduce_shape(ori_shape, shape, axis, prim_name):
axis = axis if isinstance(axis, Iterable) else (axis,)
exp_shape = [ori_shape[i] for i in range(len(ori_shape)) if i not in axis]
if list(shape) != exp_shape:
raise ValueError(f'For {prim_name}, {ori_shape} reduce on {axis} should be '
f'{tuple(exp_shape)}, but got {shape}.')
@staticmethod
def check_astype_dtype(dtype):
all_types = mstype.__dtype__ + ["int", "float", "bool"]
if isinstance(dtype, str):
if dtype.lower() not in all_types:
raise TypeError(f"`{dtype}` not understood.")
dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
elif isinstance(dtype, type):
dtype = mstype.pytype_to_dtype(dtype)
elif not dtype in mstype.number_type + (mstype.bool_,):
raise TypeError(f"`{dtype}` not understood.")
return dtype
@staticmethod
def check_transpose_axis(axes, ndim):
if not axes or (len(axes) == 1 and axes[0] is None):
return tuple(range(ndim-1, -1, -1))
if len(axes) == 1:
perm = axes[0]
if isinstance(perm, list):
perm = tuple(perm)
else:
if not isinstance(perm, tuple):
raise TypeError(f"The `axes` should be a tuple/list, or series of int, but got {type(axes[0])}")
return perm
if len(axes) != ndim:
raise ValueError("The number of axes must equal to the dimension of tensor.")
return axes
@staticmethod
def check_reshape_shp(shp):
if len(shp) == 1:
new_shape = shp[0]
if isinstance(new_shape, int):
return shp
if isinstance(new_shape, list):
new_shape = tuple(new_shape)
else:
if not isinstance(new_shape, tuple):
raise TypeError(
f"The `shape` should be an int, or tuple/list, or series of int, but got {type(shp[0])}")
return new_shape
return shp
@staticmethod
def check_flatten_order(order):
if not isinstance(order, str):
raise TypeError(f"The order variable should be a string, but got {type(order)}")
if order not in ('C', 'F'):
raise ValueError(f"only `C` and `F` are supported as order, but got {order}")
return order
@staticmethod
def check_swapaxes_axis(axes, ndim):
if isinstance(axes, int):
Validator.check_axis_in_range(axes, ndim)
return axes % ndim
if isinstance(axes, (tuple, list)):
for axis in axes:
if not isinstance(axis, int):
raise TypeError(f"axis argument should be integer, but got {type(axis)}.")
Validator.check_axis_in_range(axis, ndim)
axes = tuple(map(lambda x: x % ndim, axes))
return axes
raise TypeError(f"axes should be integer, list or tuple for check, but got {type(axes)}.")
@staticmethod
def prepare_shape_for_squeeze(shape, axes):
new_shape = []
ndim = len(shape)
if isinstance(axes, int):
if axes >= ndim or axes < -ndim:
raise ValueError(f"axis {axes} is out of bounds for tensor of dimension {ndim}")
axes = {axes}
elif isinstance(axes, (list, tuple)):
for axis in axes:
if axis >= ndim or axis < -ndim:
raise ValueError(f"axis {axis} is out of bounds for tensor of dimension {ndim}")
axes = set(axes)
else:
raise TypeError(f"only int, tuple and list are allowed for axes, but got {type(axes)}")
for idx, s in enumerate(shape):
if s != 1 or (idx not in axes) and (idx - ndim not in axes):
new_shape.append(s)
if s != 1 and ((idx in axes) or (idx - ndim in axes)):
raise ValueError(f"axis {axes} has shape entry {s} > 1, cannot be squeezed.")
return tuple(new_shape)
@staticmethod
def check_axis_in_range(axis, ndim):
if not isinstance(axis, int):
raise TypeError(f'axes should be integers, not {type(axis)}')
if not -ndim <= axis < ndim:
raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}')
return axis % ndim
@staticmethod
def check_axis_valid(axes, ndim):
if axes is None:
axes = tuple(range(ndim))
return axes
if isinstance(axes, (tuple, list)):
for axis in axes:
Validator.check_axis_in_range(axis, ndim)
axes = tuple(map(lambda x: x % ndim, axes))
if any(axes.count(el) > 1 for el in axes):
raise ValueError('duplicate value in "axis"')
return axes
Validator.check_axis_in_range(axes, ndim)
return (axes % ndim,)
@staticmethod
def max_(*args):
return max(*args)
@staticmethod
def min_(*args):
return min(*args)
@staticmethod
def expanded_shape(ndim, axis_size, axis):
return tuple(axis_size if i == axis else 1 for i in range(ndim))
@staticmethod
def tuple_slice(tup, start, end):
return tup[start:end]
@staticmethod
def infer_out_shape(*shapes):
shape_out = deque()
reversed_shapes = map(reversed, shapes)
for items in zip_longest(*reversed_shapes, fillvalue=1):
max_size = 0 if 0 in items else max(items)
if any(item not in (1, max_size) for item in items):
raise ValueError(f'operands could not be broadcast together with shapes {*shapes,}')
shape_out.appendleft(max_size)
return tuple(shape_out)
@staticmethod
def get_log2_size(size):
return math.ceil(math.log2(size))
@staticmethod
def check_axis_type(axis, type_int=True, type_tuple=True, type_list=True):
if type_int and isinstance(axis, int):
return True
if (type_tuple and isinstance(axis, tuple)) or (type_list and isinstance(axis, list)):
for ax in axis:
if not isinstance(ax, int):
raise TypeError(f"Each axis should be integer, but got {type(ax)} in {axis}.")
return True
type_str = ""
if type_int: type_str += "int, "
if type_tuple: type_str += "tuple, "
if type_list: type_str += "list, "
raise TypeError(f"Axis should be {type_str}but got {type(axis)}.")
@staticmethod
def check_and_canonicalize_axes(axes, ndim):
axes = axes if isinstance(axes, tuple) else (axes,)
new_axes = ()
for ax in axes:
if not isinstance(ax, int):
raise TypeError((f"Each axis should be integer, but got {type(ax)} in {axes}."))
if not -ndim <= ax < ndim:
raise ValueError(f'axis {ax} is out of bounds for array of dimension {ndim}')
ax = ax if ax >= 0 else ax + ndim
new_axes += (ax,)
return new_axes
def check_input_format(input_param):
if input_param == "NCHW":
return input_param
raise ValueError("The data format must be NCHW.")
def _expand_tuple(n_dimensions):
def convert(m):
if not isinstance(m, tuple):
if isinstance(m, int) and not isinstance(m, bool):
return tuple(repeat(m, n_dimensions))
raise TypeError("Input type must be int or tuple[int].")
if not len(m) is n_dimensions:
raise TypeError("Input tuple dimension is incorrect.")
for i in m:
if not isinstance(i, int) or isinstance(i, bool):
raise TypeError("Incorrect type inside of a tuple, must be int!")
return m
return convert
def _check_data_type_valid(data, valid_type):
if valid_type is None:
return data is None
if isinstance(data, valid_type):
if hasattr(data, 'size') and data.size == 0:
msg = "Please provide non-empty data."
logger.error(msg)
raise ValueError(msg)
return True
return False
def check_input_data(*data, data_class):
for item in data:
if isinstance(item, (list, tuple)):
for v in item:
check_input_data(v, data_class=data_class)
elif isinstance(item, dict):
for v in item.values():
check_input_data(v, data_class=data_class)
else:
if isinstance(data_class, (tuple, list)):
ret = True in tuple(_check_data_type_valid(item, data_type) for data_type in data_class)
else:
ret = _check_data_type_valid(item, data_class)
if not ret:
data_class_str = tuple(i.__name__ if hasattr(i, '__name__') else i for i in data_class) \
if isinstance(data_class, (tuple, list)) else \
(data_class if data_class is None else data_class.__name__)
raise ValueError(f'Please provide as model inputs either a single or '
f'a tuple or a list or a dict of {data_class_str}, '
f'but got part data type is {item if item is None else type(item).__name__}.')
def check_output_data(data):
if data is None:
raise RuntimeError('Executor return data ' + str(data) + ', please check your net or input data.')
once = _expand_tuple(1)
twice = _expand_tuple(2)
triple = _expand_tuple(3)
def args_type_check(*type_args, **type_kwargs):
def type_check(func):
sig = inspect.signature(func)
bound_types = sig.bind_partial(*type_args, **type_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
nonlocal bound_types
bound_values = sig.bind(*args, **kwargs)
argument_dict = bound_values.arguments
if "kwargs" in bound_types:
bound_types = bound_types["kwargs"]
if "kwargs" in argument_dict:
argument_dict = argument_dict["kwargs"]
for name, value in argument_dict.items():
if name in bound_types:
if value is not None and not isinstance(value, bound_types[name]):
raise TypeError('Argument {} must be {}'.format(name, bound_types[name]))
return func(*args, **kwargs)
return wrapper
return type_check
| true | true |
1c31bf028ea222d543c043876f5486296c5d330b | 769 | py | Python | var/spack/repos/builtin/packages/py-python-xmp-toolkit/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-python-xmp-toolkit/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-python-xmp-toolkit/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPythonXmpToolkit(PythonPackage):
"""Python XMP Toolkit for working with metadata."""
homepage = "https://github.com/python-xmp-toolkit/python-xmp-toolkit"
pypi = "python-xmp-toolkit/python-xmp-toolkit-2.0.1.tar.gz"
version('2.0.1', sha256='f8d912946ff9fd46ed5c7c355aa5d4ea193328b3f200909ef32d9a28a1419a38')
depends_on('python@2.6:2.7,3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytz', type=('build', 'run'))
depends_on('exempi@2.2.0:', type=('build', 'run'))
| 36.619048 | 95 | 0.708713 |
from spack import *
class PyPythonXmpToolkit(PythonPackage):
homepage = "https://github.com/python-xmp-toolkit/python-xmp-toolkit"
pypi = "python-xmp-toolkit/python-xmp-toolkit-2.0.1.tar.gz"
version('2.0.1', sha256='f8d912946ff9fd46ed5c7c355aa5d4ea193328b3f200909ef32d9a28a1419a38')
depends_on('python@2.6:2.7,3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pytz', type=('build', 'run'))
depends_on('exempi@2.2.0:', type=('build', 'run'))
| true | true |
1c31bf04d83632ae657e52862ea3133cde8a472f | 4,081 | py | Python | .venv/Lib/site-packages/docutils/examples.py | sirZer0/Replace | e59ad89c43f901d409215353a7403781fb689c7e | [
"MIT"
] | null | null | null | .venv/Lib/site-packages/docutils/examples.py | sirZer0/Replace | e59ad89c43f901d409215353a7403781fb689c7e | [
"MIT"
] | 1 | 2020-05-16T02:22:36.000Z | 2020-05-16T02:22:36.000Z | .venv/Lib/site-packages/docutils/examples.py | sirZer0/Replace | e59ad89c43f901d409215353a7403781fb689c7e | [
"MIT"
] | null | null | null | # $Id: examples.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
doctitle=True,
initial_header_level=1,
):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {
"input_encoding": input_encoding,
"doctitle_xform": doctitle,
"initial_header_level": initial_header_level,
}
parts = core.publish_parts(
source=input_string,
source_path=source_path,
destination_path=destination_path,
writer_name="html",
settings_overrides=overrides,
)
return parts
def html_body(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
output_encoding="unicode",
doctitle=True,
initial_header_level=1,
):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string,
source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding,
doctitle=doctitle,
initial_header_level=initial_header_level,
)
fragment = parts["html_body"]
if output_encoding != "unicode":
fragment = fragment.encode(output_encoding)
return fragment
def internals(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
settings_overrides=None,
):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides["input_encoding"] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput,
source=input_string,
source_path=source_path,
destination_class=io.NullOutput,
destination=None,
destination_path=destination_path,
reader=None,
reader_name="standalone",
parser=None,
parser_name="restructuredtext",
writer=None,
writer_name="null",
settings=None,
settings_spec=None,
settings_overrides=overrides,
config_section=None,
enable_exit_status=None,
)
return pub.writer.document, pub
| 30.684211 | 77 | 0.684146 |
from docutils import core, io
def html_parts(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
doctitle=True,
initial_header_level=1,
):
overrides = {
"input_encoding": input_encoding,
"doctitle_xform": doctitle,
"initial_header_level": initial_header_level,
}
parts = core.publish_parts(
source=input_string,
source_path=source_path,
destination_path=destination_path,
writer_name="html",
settings_overrides=overrides,
)
return parts
def html_body(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
output_encoding="unicode",
doctitle=True,
initial_header_level=1,
):
parts = html_parts(
input_string=input_string,
source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding,
doctitle=doctitle,
initial_header_level=initial_header_level,
)
fragment = parts["html_body"]
if output_encoding != "unicode":
fragment = fragment.encode(output_encoding)
return fragment
def internals(
input_string,
source_path=None,
destination_path=None,
input_encoding="unicode",
settings_overrides=None,
):
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides["input_encoding"] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput,
source=input_string,
source_path=source_path,
destination_class=io.NullOutput,
destination=None,
destination_path=destination_path,
reader=None,
reader_name="standalone",
parser=None,
parser_name="restructuredtext",
writer=None,
writer_name="null",
settings=None,
settings_spec=None,
settings_overrides=overrides,
config_section=None,
enable_exit_status=None,
)
return pub.writer.document, pub
| true | true |
1c31bf17913afdeb4bdb573c7ce2766dc07ba73a | 5,397 | py | Python | httpclient.py | Rikyyyyyyyyyy/Rikyyyyyyyyyy-CMPUT404-assignment-web-client- | 6cb9604474d833b46dd647b7ad0af4d15e17822f | [
"Apache-2.0"
] | null | null | null | httpclient.py | Rikyyyyyyyyyy/Rikyyyyyyyyyy-CMPUT404-assignment-web-client- | 6cb9604474d833b46dd647b7ad0af4d15e17822f | [
"Apache-2.0"
] | null | null | null | httpclient.py | Rikyyyyyyyyyy/Rikyyyyyyyyyy-CMPUT404-assignment-web-client- | 6cb9604474d833b46dd647b7ad0af4d15e17822f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.socket.settimeout(2)
return None
def get_code(self, data):
code = data[0].split(' ')[1]
print("code333")
print(code)
return int(code)
def get_headers(self,data):
return data.split("\r\n")
def get_body(self, data):
return data.split('\r\n')[-1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
return self.recvall(self.socket)
def close(self):
self.socket.close()
def getHost(self, url):
data = urllib.parse.urlparse(url)
return data.hostname
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
try:
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
except socket.timeout:
print("TimeOut")
return buffer.decode('utf-8')
def GET(self, url, args=None):
urlLine = urllib.parse.urlparse(url)
if urlLine.port:
urlPort = urlLine.port
else:
# if the urlLine.port is none we make it 80 as default
urlPort = 80
#now we get the port we can connect the host by the port
self.connect(self.getHost(url),urlPort)
if urlLine.path:
urlPath = urlLine.path
else:
# if it is none then we use '/' as default
urlPath = '/'
send_message = """GET {path} HTTP/1.1\r\nHost: {host}\r\n\r\n""".format(path=urlPath, host=self.getHost(url))
print("header6")
print(send_message)
urlData = self.sendall(send_message)
urlHeader = self.get_headers(urlData)
code = self.get_code(urlHeader)
self.close()
return HTTPResponse(code,urlData)
def POST(self, url, args=None):
urlBody = " "
#parse the url into 6 component
# kind of decode the url in specific form
urlLine = urllib.parse.urlparse(url)
# if urlLine.port is not none we using the port
if urlLine.port:
urlPort = urlLine.port
else:
# if the urlLine.port is none we make it 80 as default
urlPort = 80
#now we get the port we can connect the host by the port
self.connect(socket.gethostbyname(urlLine.hostname),urlPort)
if args != None:
urlBody = urllib.parse.urlencode(args)
else:
urlBody = ""
# try to get the path for url, if the one from urlparse is not none we use it
if urlLine.path:
urlPath = urlLine.path
else:
# if it is none then we use '/' as default
urlPath = '/'
# the message we are giubg
print("tester2")
urlLen = len(urlBody)
send_message = """POST {path} HTTP/1.1\r\nHost: {host}\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: {length}\r\n\r\n{data}""".format(path=urlPath, data=urlBody, length=urlLen, host=self.getHost(url))
self.connect(self.getHost(url), urlPort)
print("header99")
print(send_message)
urlData = self.sendall(send_message)
urlHeaders = self.get_headers(urlData)
body = urlHeaders[-1]
print("data999")
print(urlData)
code = self.get_code(urlHeaders)
self.close()
return HTTPResponse(code,body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| 30.664773 | 235 | 0.593293 |
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.socket.settimeout(2)
return None
def get_code(self, data):
code = data[0].split(' ')[1]
print("code333")
print(code)
return int(code)
def get_headers(self,data):
return data.split("\r\n")
def get_body(self, data):
return data.split('\r\n')[-1]
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
return self.recvall(self.socket)
def close(self):
self.socket.close()
def getHost(self, url):
data = urllib.parse.urlparse(url)
return data.hostname
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
try:
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
except socket.timeout:
print("TimeOut")
return buffer.decode('utf-8')
def GET(self, url, args=None):
urlLine = urllib.parse.urlparse(url)
if urlLine.port:
urlPort = urlLine.port
else:
# if the urlLine.port is none we make it 80 as default
urlPort = 80
#now we get the port we can connect the host by the port
self.connect(self.getHost(url),urlPort)
if urlLine.path:
urlPath = urlLine.path
else:
# if it is none then we use '/' as default
urlPath = '/'
send_message = """GET {path} HTTP/1.1\r\nHost: {host}\r\n\r\n""".format(path=urlPath, host=self.getHost(url))
print("header6")
print(send_message)
urlData = self.sendall(send_message)
urlHeader = self.get_headers(urlData)
code = self.get_code(urlHeader)
self.close()
return HTTPResponse(code,urlData)
def POST(self, url, args=None):
urlBody = " "
#parse the url into 6 component
# kind of decode the url in specific form
urlLine = urllib.parse.urlparse(url)
# if urlLine.port is not none we using the port
if urlLine.port:
urlPort = urlLine.port
else:
# if the urlLine.port is none we make it 80 as default
urlPort = 80
#now we get the port we can connect the host by the port
self.connect(socket.gethostbyname(urlLine.hostname),urlPort)
if args != None:
urlBody = urllib.parse.urlencode(args)
else:
urlBody = ""
# try to get the path for url, if the one from urlparse is not none we use it
if urlLine.path:
urlPath = urlLine.path
else:
# if it is none then we use '/' as default
urlPath = '/'
# the message we are giubg
print("tester2")
urlLen = len(urlBody)
send_message = """POST {path} HTTP/1.1\r\nHost: {host}\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: {length}\r\n\r\n{data}""".format(path=urlPath, data=urlBody, length=urlLen, host=self.getHost(url))
self.connect(self.getHost(url), urlPort)
print("header99")
print(send_message)
urlData = self.sendall(send_message)
urlHeaders = self.get_headers(urlData)
body = urlHeaders[-1]
print("data999")
print(urlData)
code = self.get_code(urlHeaders)
self.close()
return HTTPResponse(code,body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| true | true |
1c31bfa0b91d181c179596686bba312a5a272888 | 567 | py | Python | opencv-semantic-segmentation/additional_code.py | Gengarrr/RN-nuScenes | 00b0909db06439c489e06874f8f7aa1d3d3c9498 | [
"Apache-2.0"
] | null | null | null | opencv-semantic-segmentation/additional_code.py | Gengarrr/RN-nuScenes | 00b0909db06439c489e06874f8f7aa1d3d3c9498 | [
"Apache-2.0"
] | null | null | null | opencv-semantic-segmentation/additional_code.py | Gengarrr/RN-nuScenes | 00b0909db06439c489e06874f8f7aa1d3d3c9498 | [
"Apache-2.0"
] | null | null | null | # loop over each of the individual class IDs in the image
for classID in np.unique(classMap):
# build a binary mask for the current class and then use the mask
# to visualize all pixels in the image belonging to the class
print("[INFO] class: {}".format(CLASSES[classID]))
classMask = (mask == COLORS[classID]).astype("uint8") * 255
classMask = classMask[:, :, 0]
classOutput = cv2.bitwise_and(image, image, mask=classMask)
classMask = np.hstack([image, classOutput])
# show the output class visualization
cv2.imshow("Class Vis", classMask)
cv2.waitKey(0) | 40.5 | 66 | 0.731922 |
for classID in np.unique(classMap):
print("[INFO] class: {}".format(CLASSES[classID]))
classMask = (mask == COLORS[classID]).astype("uint8") * 255
classMask = classMask[:, :, 0]
classOutput = cv2.bitwise_and(image, image, mask=classMask)
classMask = np.hstack([image, classOutput])
cv2.imshow("Class Vis", classMask)
cv2.waitKey(0) | true | true |
1c31c08e34ba376884f8639f325d9d7d97731cba | 1,312 | py | Python | tests/mock_sender.py | Inrixia/pyais | b50fd4d75c687d71b3c70ee939ac9112cfec991e | [
"MIT"
] | null | null | null | tests/mock_sender.py | Inrixia/pyais | b50fd4d75c687d71b3c70ee939ac9112cfec991e | [
"MIT"
] | null | null | null | tests/mock_sender.py | Inrixia/pyais | b50fd4d75c687d71b3c70ee939ac9112cfec991e | [
"MIT"
] | null | null | null | import socket
import time
MESSAGES = [
b"!AIVDM,1,1,,B,133S0:0P00PCsJ:MECBR0gv:0D8N,0*7F",
b"!AIVDM,1,1,,A,4h2=a@Quho;O306WMpMIK<Q00826,0*42",
b"!AIVDM,1,1,,A,402M3b@000Htt0K0Q0R3T<700t24,0*52",
b"!AIVDM,1,1,,A,1>qc9ww000OkfS@MMI5004R60<0B,0*31",
b"!AIVDM,1,1,,A,13P<GAh01pwM`GPDdu>T8SDV0@2c,0*7D",
b"!AIVDM,1,1,,A,133ma5P0000Cj9lMG484pbN60D<P,0*42",
b"!AIVDM,1,1,,B,13aBKV5P0qPFeWJMakbGjgv820SM,0*6E",
b"!AIVDM,1,1,,A,15Mvsu000aqSG3RF;B?A@0v4082c,0*60",
b"!AIVDM,1,1,,A,13aI9EwP?w<tSF0l4Q@>4?wvPl6=,0*38",
b"!AIVDM,1,1,,A,15NJIs0P0?JeI0RGBjbCCwv:282W,0*2E",
b"!AIVDM,1,1,,A,15Mw<ePP00ISvvpA8Hi<Mwv6082J,0*45",
b"!AIVDM,1,1,,A,15MooR0P0SJe;2>GC2pdQOv:282b,0*0C",
]
def udp_mock_server(host, port) -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
while True:
while True:
print(f"Sending {len(MESSAGES)} messages all at once.")
# send all at once and then close
for msg in MESSAGES:
sock.sendto(msg + b"\r\n", (host, port))
time.sleep(2)
finally:
sock.close()
if __name__ == '__main__':
host = "127.0.0.1"
port = 12346
print(f"Starting Mock UDP server on {host}:{port}")
udp_mock_server(host, port)
| 32.8 | 71 | 0.61128 | import socket
import time
MESSAGES = [
b"!AIVDM,1,1,,B,133S0:0P00PCsJ:MECBR0gv:0D8N,0*7F",
b"!AIVDM,1,1,,A,4h2=a@Quho;O306WMpMIK<Q00826,0*42",
b"!AIVDM,1,1,,A,402M3b@000Htt0K0Q0R3T<700t24,0*52",
b"!AIVDM,1,1,,A,1>qc9ww000OkfS@MMI5004R60<0B,0*31",
b"!AIVDM,1,1,,A,13P<GAh01pwM`GPDdu>T8SDV0@2c,0*7D",
b"!AIVDM,1,1,,A,133ma5P0000Cj9lMG484pbN60D<P,0*42",
b"!AIVDM,1,1,,B,13aBKV5P0qPFeWJMakbGjgv820SM,0*6E",
b"!AIVDM,1,1,,A,15Mvsu000aqSG3RF;B?A@0v4082c,0*60",
b"!AIVDM,1,1,,A,13aI9EwP?w<tSF0l4Q@>4?wvPl6=,0*38",
b"!AIVDM,1,1,,A,15NJIs0P0?JeI0RGBjbCCwv:282W,0*2E",
b"!AIVDM,1,1,,A,15Mw<ePP00ISvvpA8Hi<Mwv6082J,0*45",
b"!AIVDM,1,1,,A,15MooR0P0SJe;2>GC2pdQOv:282b,0*0C",
]
def udp_mock_server(host, port) -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
while True:
while True:
print(f"Sending {len(MESSAGES)} messages all at once.")
for msg in MESSAGES:
sock.sendto(msg + b"\r\n", (host, port))
time.sleep(2)
finally:
sock.close()
if __name__ == '__main__':
host = "127.0.0.1"
port = 12346
print(f"Starting Mock UDP server on {host}:{port}")
udp_mock_server(host, port)
| true | true |
1c31c1230e25d2b77a8ade22d528f72fdb565b38 | 13,113 | py | Python | main.py | imtheohuang/HeytapTask | 21607c6d070a87faeb9229c763e87a389ec02eb0 | [
"MIT"
] | 306 | 2021-07-13T10:09:25.000Z | 2022-03-28T05:45:08.000Z | main.py | imtheohuang/HeytapTask | 21607c6d070a87faeb9229c763e87a389ec02eb0 | [
"MIT"
] | 65 | 2021-07-14T13:18:25.000Z | 2021-11-29T02:37:09.000Z | main.py | imtheohuang/HeytapTask | 21607c6d070a87faeb9229c763e87a389ec02eb0 | [
"MIT"
] | 173 | 2021-07-13T10:09:27.000Z | 2022-03-26T02:31:44.000Z | # -*- coding: utf-8 -*-
# @Time : 2021/8/17
# @Author : hwkxk(丶大K丶)
# @Email : k@hwkxk.cn
import requests,json,time,logging,traceback,os,random,notify,datetime,configparser
#用户登录全局变量
client = None
session = None
#日志基础配置
# 创建一个logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# 创建一个handler,用于写入日志文件
# w 模式会记住上次日志记录的位置
fh = logging.FileHandler('./log.txt', mode='a', encoding='utf-8')
fh.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(fh)
# 创建一个handler,输出到控制台
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("[%(asctime)s]:%(levelname)s:%(message)s"))
logger.addHandler(ch)
#读取用户配置信息
def readConfig():
try:
#用户配置信息
global userconfig
userconfig = configparser.RawConfigParser()
path ="./config.ini"
userconfig.read(path,encoding="utf-8")
return userconfig
except Exception as e:
print(traceback.format_exc())
logging.error('1.请检查是否在目录下建立了config.ini')
#获取个人信息,判断登录状态
def get_infouser(HT_cookies,HT_UA):
flag = False
global session
session = requests.Session()
headers = {
'Host': 'www.heytap.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UA,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies
}
response = session.get('https://www.heytap.com/cn/oapi/users/web/member/info', headers=headers)
response.encoding='utf-8'
try:
result = response.json()
if result['code'] == 200:
logger.info('==== 欢太商城任务 ====')
logger.info('【登录成功】: ' + result['data']['realName'])
flag = True
else:
logger.info('【登录失败】: ' + result['errorMessage'])
except Exception as e:
print(traceback.format_exc())
logger.error('【登录】: 发生错误,原因为: ' + str(e))
if flag:
return session
else:
return False
#任务中心列表,获取任务及任务状态
def taskCenter():
headers = {
'Host': 'store.oppo.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'referer':'https://store.oppo.com/cn/app/taskCenter/index'
}
res1 = client.get('https://store.oppo.com/cn/oapi/credits/web/credits/show', headers=headers)
res1 = res1.json()
return res1
#每日签到
#位置: APP → 我的 → 签到
def daySign_task():
try:
dated = time.strftime("%Y-%m-%d")
headers = {
'Host': 'store.oppo.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'referer':'https://store.oppo.com/cn/app/taskCenter/index'
}
res = taskCenter()
status = res['data']['userReportInfoForm']['status']
if status == 0:
res = res['data']['userReportInfoForm']['gifts']
for data in res:
if data['date'] == dated:
qd = data
if qd['today'] == False:
data = "amount=" + str(qd['credits'])
res1 = client.post('https://store.oppo.com/cn/oapi/credits/web/report/immediately', headers=headers,data=data)
res1 = res1.json()
if res1['code'] == 200:
logger.info('【每日签到成功】: ' + res1['data']['message'])
else:
logger.info('【每日签到失败】: ' + str(res1))
else:
print(str(qd['credits']),str(qd['type']),str(qd['gift']))
if len(str(qd['type'])) < 1 :
data = "amount=" + str(qd['credits'])
else:
data = "amount=" + str(qd['credits']) + "&type=" + str(qd['type']) + "&gift=" + str(qd['gift'])
res1 = client.post('https://store.oppo.com/cn/oapi/credits/web/report/immediately', headers=headers,data=data)
res1 = res1.json()
if res1['code'] == 200:
logger.info('【每日签到成功】: ' + res1['data']['message'])
else:
logger.info('【每日签到失败】: ' + str(res1))
else:
logger.info('【每日签到】: 已经签到过了!' )
time.sleep(1)
except Exception as e:
print(traceback.format_exc())
logging.error('【每日签到】: 错误,原因为: ' + str(e))
#浏览商品 10个sku +20 分
#位置: APP → 我的 → 签到 → 每日任务 → 浏览商品
def daily_viewgoods():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
res = taskCenter()
res = res['data']['everydayList']
for data in res:
if data['name'] == '浏览商品':
qd = data
if qd['completeStatus'] == 0:
shopList = client.get('https://msec.opposhop.cn/goods/v1/SeckillRound/goods/115?pageSize=12¤tPage=1')
res = shopList.json()
if res['meta']['code'] == 200:
for skuinfo in res['detail']:
skuid = skuinfo['skuid']
print('正在浏览商品ID:', skuid)
client.get('https://msec.opposhop.cn/goods/v1/info/sku?skuId='+ str(skuid), headers=headers)
time.sleep(5)
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日浏览商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日浏览商品】: ' + "领取积分奖励出错!")
else:
logger.info('【每日浏览商品】: ' + '错误,获取商品列表失败')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日浏览商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日浏览商品】: ' + '领取积分奖励出错!')
else:
logger.info('【每日浏览商品】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日浏览任务】: 错误,原因为: ' + str(e))
def daily_sharegoods():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
daySignList = taskCenter()
res = daySignList
res = res['data']['everydayList']
for data in res:
if data['name'] == '分享商品到微信':
qd = data
if qd['completeStatus'] == 0:
count = qd['readCount']
endcount = qd['times']
while (count <= endcount):
client.get('https://msec.opposhop.cn/users/vi/creditsTask/pushTask?marking=daily_sharegoods', headers=headers)
count += 1
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日分享商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日分享商品】: ' + '领取积分奖励出错!')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日分享商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日分享商品】: ' + '领取积分奖励出错!')
else:
logger.info('【每日分享商品】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日分享商品】: 错误,原因为: ' + str(e))
def daily_viewpush():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
daySignList = taskCenter()
res = daySignList
res = res['data']['everydayList']
for data in res:
if data['name'] == '点推送消息':
qd = data
if qd['completeStatus'] == 0:
count = qd['readCount']
endcount = qd['times']
while (count <= endcount):
client.get('https://msec.opposhop.cn/users/vi/creditsTask/pushTask?marking=daily_viewpush', headers=headers)
count += 1
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日点推送】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日点推送】: ' + '领取积分奖励出错!')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日点推送】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日点推送】: ' + '领取积分奖励出错!')
else:
logger.info('【每日点推送】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日推送消息】: 错误,原因为: ' + str(e))
#执行完成任务领取奖励
def cashingCredits(info_marking,info_type,info_credits):
headers = {
'Host': 'store.oppo.com',
'clientPackage': 'com.oppo.store',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'Origin': 'https://store.oppo.com',
'X-Requested-With': 'com.oppo.store',
'referer':'https://store.oppo.com/cn/app/taskCenter/index?us=gerenzhongxin&um=hudongleyuan&uc=renwuzhongxin'
}
data = "marking=" + str(info_marking) + "&type=" + str(info_type) + "&amount=" + str(info_credits)
res = client.post('https://store.oppo.com/cn/oapi/credits/web/credits/cashingCredits', data=data, headers=headers)
res = res.json()
if res['code'] == 200:
return True
else:
return False
#函数入口
def main(event, context):
users = readConfig()
#清空上一个用户的日志记录
open('./log.txt',mode='w',encoding='utf-8')
global client
global HT_cookies
global HT_UserAgent
HT_cookies = users.get("config","cookies")
HT_UserAgent = users.get("config","User-Agent")
#print(HT_cookies,HT_UserAgent)
client = get_infouser(HT_cookies,HT_UserAgent)
#如果不想做特定任务可以手动注释
if client != False:
daySign_task() #执行每日签到
daily_viewgoods() #执行每日商品浏览任务
daily_sharegoods() #执行每日商品分享任务
#daily_viewpush() #执行每日点推送任务 该任务在2021-9-16下线 注释
if users.has_option("dingding", 'dingtalkWebhook'):
notify.sendDing(users.get("dingding","dingtalkWebhook"),users.get("dingding","dingtalksecret")) #钉钉推送日记
if users.has_option("telegramBot", 'tgToken'):
notify.sendTg(users.get("telegramBot","tgToken"),users.get("telegramBot","tgUserId"),users.get("telegramBot","tghost")) #TG机器人推送日记
if users.has_option("pushplus", 'pushplusToken'):
notify.sendPushplus(users.get("pushplus","pushplusToken")) #push+ 推送日记
if users.has_option("enterpriseWechat", 'id'):
notify.sendWechat(users.get("enterpriseWechat","id"),users.get("enterpriseWechat","secret"),users.get("enterpriseWechat","agentld")) #企业微信通知
if users.has_option("IFTTT", 'apiKey'):
notify.sendIFTTT(users.get("IFTTT","apiKey"),users.get("IFTTT","eventName")) #IFTTT 推送日记
if users.has_option("Bark", 'Barkkey'):
notify.sendBark(users.get("Bark","Barkkey"),users.get("Bark","Barksave")) #Bark推送助手
#主函数入口
if __name__ == '__main__':
main("","")
| 39.260479 | 148 | 0.563487 |
import requests,json,time,logging,traceback,os,random,notify,datetime,configparser
client = None
session = None
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler('./log.txt', mode='a', encoding='utf-8')
fh.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("[%(asctime)s]:%(levelname)s:%(message)s"))
logger.addHandler(ch)
def readConfig():
try:
global userconfig
userconfig = configparser.RawConfigParser()
path ="./config.ini"
userconfig.read(path,encoding="utf-8")
return userconfig
except Exception as e:
print(traceback.format_exc())
logging.error('1.请检查是否在目录下建立了config.ini')
def get_infouser(HT_cookies,HT_UA):
flag = False
global session
session = requests.Session()
headers = {
'Host': 'www.heytap.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UA,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies
}
response = session.get('https://www.heytap.com/cn/oapi/users/web/member/info', headers=headers)
response.encoding='utf-8'
try:
result = response.json()
if result['code'] == 200:
logger.info('==== 欢太商城任务 ====')
logger.info('【登录成功】: ' + result['data']['realName'])
flag = True
else:
logger.info('【登录失败】: ' + result['errorMessage'])
except Exception as e:
print(traceback.format_exc())
logger.error('【登录】: 发生错误,原因为: ' + str(e))
if flag:
return session
else:
return False
def taskCenter():
headers = {
'Host': 'store.oppo.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'referer':'https://store.oppo.com/cn/app/taskCenter/index'
}
res1 = client.get('https://store.oppo.com/cn/oapi/credits/web/credits/show', headers=headers)
res1 = res1.json()
return res1
def daySign_task():
try:
dated = time.strftime("%Y-%m-%d")
headers = {
'Host': 'store.oppo.com',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'referer':'https://store.oppo.com/cn/app/taskCenter/index'
}
res = taskCenter()
status = res['data']['userReportInfoForm']['status']
if status == 0:
res = res['data']['userReportInfoForm']['gifts']
for data in res:
if data['date'] == dated:
qd = data
if qd['today'] == False:
data = "amount=" + str(qd['credits'])
res1 = client.post('https://store.oppo.com/cn/oapi/credits/web/report/immediately', headers=headers,data=data)
res1 = res1.json()
if res1['code'] == 200:
logger.info('【每日签到成功】: ' + res1['data']['message'])
else:
logger.info('【每日签到失败】: ' + str(res1))
else:
print(str(qd['credits']),str(qd['type']),str(qd['gift']))
if len(str(qd['type'])) < 1 :
data = "amount=" + str(qd['credits'])
else:
data = "amount=" + str(qd['credits']) + "&type=" + str(qd['type']) + "&gift=" + str(qd['gift'])
res1 = client.post('https://store.oppo.com/cn/oapi/credits/web/report/immediately', headers=headers,data=data)
res1 = res1.json()
if res1['code'] == 200:
logger.info('【每日签到成功】: ' + res1['data']['message'])
else:
logger.info('【每日签到失败】: ' + str(res1))
else:
logger.info('【每日签到】: 已经签到过了!' )
time.sleep(1)
except Exception as e:
print(traceback.format_exc())
logging.error('【每日签到】: 错误,原因为: ' + str(e))
def daily_viewgoods():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
res = taskCenter()
res = res['data']['everydayList']
for data in res:
if data['name'] == '浏览商品':
qd = data
if qd['completeStatus'] == 0:
shopList = client.get('https://msec.opposhop.cn/goods/v1/SeckillRound/goods/115?pageSize=12¤tPage=1')
res = shopList.json()
if res['meta']['code'] == 200:
for skuinfo in res['detail']:
skuid = skuinfo['skuid']
print('正在浏览商品ID:', skuid)
client.get('https://msec.opposhop.cn/goods/v1/info/sku?skuId='+ str(skuid), headers=headers)
time.sleep(5)
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日浏览商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日浏览商品】: ' + "领取积分奖励出错!")
else:
logger.info('【每日浏览商品】: ' + '错误,获取商品列表失败')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日浏览商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日浏览商品】: ' + '领取积分奖励出错!')
else:
logger.info('【每日浏览商品】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日浏览任务】: 错误,原因为: ' + str(e))
def daily_sharegoods():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
daySignList = taskCenter()
res = daySignList
res = res['data']['everydayList']
for data in res:
if data['name'] == '分享商品到微信':
qd = data
if qd['completeStatus'] == 0:
count = qd['readCount']
endcount = qd['times']
while (count <= endcount):
client.get('https://msec.opposhop.cn/users/vi/creditsTask/pushTask?marking=daily_sharegoods', headers=headers)
count += 1
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日分享商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日分享商品】: ' + '领取积分奖励出错!')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日分享商品】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日分享商品】: ' + '领取积分奖励出错!')
else:
logger.info('【每日分享商品】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日分享商品】: 错误,原因为: ' + str(e))
def daily_viewpush():
try:
headers = {
'clientPackage': 'com.oppo.store',
'Host': 'msec.opposhop.cn',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': 'okhttp/3.12.12.200sp1',
'Accept-Encoding': 'gzip',
'cookie': HT_cookies,
}
daySignList = taskCenter()
res = daySignList
res = res['data']['everydayList']
for data in res:
if data['name'] == '点推送消息':
qd = data
if qd['completeStatus'] == 0:
count = qd['readCount']
endcount = qd['times']
while (count <= endcount):
client.get('https://msec.opposhop.cn/users/vi/creditsTask/pushTask?marking=daily_viewpush', headers=headers)
count += 1
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日点推送】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日点推送】: ' + '领取积分奖励出错!')
elif qd['completeStatus'] == 1:
res2 = cashingCredits(qd['marking'],qd['type'],qd['credits'])
if res2 == True:
logger.info('【每日点推送】: ' + '任务完成!积分领取+' + str(qd['credits']))
else:
logger.info('【每日点推送】: ' + '领取积分奖励出错!')
else:
logger.info('【每日点推送】: ' + '任务已完成!')
except Exception as e:
print(traceback.format_exc())
logging.error('【每日推送消息】: 错误,原因为: ' + str(e))
def cashingCredits(info_marking,info_type,info_credits):
headers = {
'Host': 'store.oppo.com',
'clientPackage': 'com.oppo.store',
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'User-Agent': HT_UserAgent,
'Accept-Language': 'zh-cn',
'Accept-Encoding': 'gzip, deflate, br',
'cookie': HT_cookies,
'Origin': 'https://store.oppo.com',
'X-Requested-With': 'com.oppo.store',
'referer':'https://store.oppo.com/cn/app/taskCenter/index?us=gerenzhongxin&um=hudongleyuan&uc=renwuzhongxin'
}
data = "marking=" + str(info_marking) + "&type=" + str(info_type) + "&amount=" + str(info_credits)
res = client.post('https://store.oppo.com/cn/oapi/credits/web/credits/cashingCredits', data=data, headers=headers)
res = res.json()
if res['code'] == 200:
return True
else:
return False
def main(event, context):
users = readConfig()
open('./log.txt',mode='w',encoding='utf-8')
global client
global HT_cookies
global HT_UserAgent
HT_cookies = users.get("config","cookies")
HT_UserAgent = users.get("config","User-Agent")
client = get_infouser(HT_cookies,HT_UserAgent)
if client != False:
daySign_task()
daily_viewgoods()
daily_sharegoods()
ngding", 'dingtalkWebhook'):
notify.sendDing(users.get("dingding","dingtalkWebhook"),users.get("dingding","dingtalksecret"))
if users.has_option("telegramBot", 'tgToken'):
notify.sendTg(users.get("telegramBot","tgToken"),users.get("telegramBot","tgUserId"),users.get("telegramBot","tghost"))
if users.has_option("pushplus", 'pushplusToken'):
notify.sendPushplus(users.get("pushplus","pushplusToken"))
if users.has_option("enterpriseWechat", 'id'):
notify.sendWechat(users.get("enterpriseWechat","id"),users.get("enterpriseWechat","secret"),users.get("enterpriseWechat","agentld"))
if users.has_option("IFTTT", 'apiKey'):
notify.sendIFTTT(users.get("IFTTT","apiKey"),users.get("IFTTT","eventName"))
if users.has_option("Bark", 'Barkkey'):
notify.sendBark(users.get("Bark","Barkkey"),users.get("Bark","Barksave"))
if __name__ == '__main__':
main("","")
| true | true |
1c31c1d12e2620556ec42dcce135fa3807ea0e6f | 108 | py | Python | module1-entry point.py | dronovroman/gh2021 | bd01d55b20c02916f28213776224f31bd96ac479 | [
"BSD-3-Clause"
] | null | null | null | module1-entry point.py | dronovroman/gh2021 | bd01d55b20c02916f28213776224f31bd96ac479 | [
"BSD-3-Clause"
] | null | null | null | module1-entry point.py | dronovroman/gh2021 | bd01d55b20c02916f28213776224f31bd96ac479 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 20 23:29:59 2021
@author: roman
Module 1 - entry point
"""
| 12 | 35 | 0.601852 | true | true | |
1c31c270461e1b3f25e6ec99f42bacf1c46670f5 | 17,592 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_images_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_images_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_virtual_machine_images_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
skus, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.VirtualMachineImage"
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.VirtualMachineImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualMachineImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'} # type: ignore
def list(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
skus, # type: str
expand=None, # type: Optional[str]
top=None, # type: Optional[int]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["models.VirtualMachineImageResource"]
"""Gets a list of all virtual machine image versions for the specified location, publisher, offer,
and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:param top:
:type top: int
:param orderby:
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'} # type: ignore
def list_offers(
self,
location, # type: str
publisher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_offers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'} # type: ignore
def list_publishers(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_publishers.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'} # type: ignore
def list_skus(
self,
location, # type: str
publisher_name, # type: str
offer, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.VirtualMachineImageResource"]
"""Gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2020_06_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_skus.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'} # type: ignore
| 46.66313 | 221 | 0.659277 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineImagesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
location,
publisher_name,
offer,
skus,
version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'version': self._serialize.url("version", version, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}'}
def list(
self,
location,
publisher_name,
offer,
skus,
expand=None,
top=None,
orderby=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'skus': self._serialize.url("skus", skus, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions'}
def list_offers(
self,
location,
publisher_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
url = self.list_offers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers'}
def list_publishers(
self,
location,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
url = self.list_publishers.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers'}
def list_skus(
self,
location,
publisher_name,
offer,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
url = self.list_skus.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'publisherName': self._serialize.url("publisher_name", publisher_name, 'str'),
'offer': self._serialize.url("offer", offer, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus'}
| true | true |
1c31c3da9136c83732f112cb3a9388021eb3870c | 393 | py | Python | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/EXT/texture_mirror_clamp.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/EXT/texture_mirror_clamp.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | Cartwheel/lib/Python26/Lib/site-packages/OpenGL/GL/EXT/texture_mirror_clamp.py | MontyThibault/centre-of-mass-awareness | 58778f148e65749e1dfc443043e9fc054ca3ff4d | [
"MIT"
] | null | null | null | '''OpenGL extension EXT.texture_mirror_clamp
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_mirror_clamp to provide a more
Python-friendly API
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.texture_mirror_clamp import *
### END AUTOGENERATED SECTION | 32.75 | 57 | 0.819338 | from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.texture_mirror_clamp import *
| true | true |
1c31c470c48b8e919191977565ad6b3e548938d3 | 153 | py | Python | activecollab_digger/apps.py | kingsdigitallab/django-activecollab-digger | 508c31eb4a3fe9887aa9d3a86ea160f3bc1e60b0 | [
"MIT"
] | null | null | null | activecollab_digger/apps.py | kingsdigitallab/django-activecollab-digger | 508c31eb4a3fe9887aa9d3a86ea160f3bc1e60b0 | [
"MIT"
] | null | null | null | activecollab_digger/apps.py | kingsdigitallab/django-activecollab-digger | 508c31eb4a3fe9887aa9d3a86ea160f3bc1e60b0 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
class ActivecollabDiggerConfig(AppConfig):
name = 'activecollab_digger'
| 19.125 | 42 | 0.823529 | from __future__ import unicode_literals
from django.apps import AppConfig
class ActivecollabDiggerConfig(AppConfig):
name = 'activecollab_digger'
| true | true |
1c31c5189c1a2703669a7843fe0163d5c35cc3c0 | 370 | py | Python | tests/test_verdict.py | denverpost/trial-results | 9d56570658b71c3ac5cfab0abe7979678b432572 | [
"Unlicense",
"MIT"
] | null | null | null | tests/test_verdict.py | denverpost/trial-results | 9d56570658b71c3ac5cfab0abe7979678b432572 | [
"Unlicense",
"MIT"
] | 10 | 2015-07-14T22:13:25.000Z | 2016-07-05T20:25:38.000Z | tests/test_verdict.py | denverpost/trial-results | 9d56570658b71c3ac5cfab0abe7979678b432572 | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from spreadsheet import Sheet
from verdict import Verdict
def test_publish():
""" Test publish method.
"""
sheet = Sheet('test-sheet', 'worksheet-name')
publish = Verdict(sheet)
publish_value = publish.publish()
assert publish_value == True
| 24.666667 | 49 | 0.691892 |
from __future__ import unicode_literals
import pytest
from spreadsheet import Sheet
from verdict import Verdict
def test_publish():
sheet = Sheet('test-sheet', 'worksheet-name')
publish = Verdict(sheet)
publish_value = publish.publish()
assert publish_value == True
| true | true |
1c31c6199865b682ec556fa6878dcf7a1d7cb9b4 | 30,617 | py | Python | kontranto_igra/game_logic.py | zd-mioc/Kontranto | 928f8e6ca1c8c136878d0dd036321053ab461049 | [
"MIT"
] | 1 | 2020-07-19T11:11:08.000Z | 2020-07-19T11:11:08.000Z | kontranto_igra/game_logic.py | zd-mioc/Kontranto | 928f8e6ca1c8c136878d0dd036321053ab461049 | [
"MIT"
] | null | null | null | kontranto_igra/game_logic.py | zd-mioc/Kontranto | 928f8e6ca1c8c136878d0dd036321053ab461049 | [
"MIT"
] | 1 | 2021-03-01T08:39:41.000Z | 2021-03-01T08:39:41.000Z | from kontranto_igra.models import Game, Move
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import timezone
from random import choice
import string
import json
def BlackOrWhite(color): #univerzalna funkcija - moze i kod jednog i kod drugog igraca
if color == '':
return choice(['black','white'])
elif color == 'white':
return 'black'
else:
return 'white'
def new_game_f(player_id):
game_id = "".join(choice(string.ascii_letters + string.digits) for i in range(10))
color_1 = BlackOrWhite('')
if color_1 == 'black':
g = Game.objects.create(game = game_id, game_state = "WAITING_FOR_SECOND_PLAYER", white_score = 0, black_score = 0, board = [["","","",""], ["","","",""], ["","","",""], ["","","",""]], black_player_id = player_id, white_player_id = "")
else:
g = Game.objects.create(game = game_id, game_state = "WAITING_FOR_SECOND_PLAYER", white_score = 0, black_score = 0, board = [["","","",""], ["","","",""], ["","","",""], ["","","",""]], white_player_id = player_id, black_player_id = "")
new_game_f_resp = {
"status": "Waiting for second player",
"game_id": game_id,
"my_id": player_id,
"my_color": color_1
}
return new_game_f_resp
def check_game_new(player_id): #provjerava uvjete za new_game
if player_id == "":
return {"status": "Greska: player_id nije validan."}
else:
return {"status": "OK"}
def join_game_f(game_id, player_id):
g = Game.objects.get(game = game_id)
if g.white_player_id == "":
g.white_player_id = player_id
color_2 = "white"
else:
g.black_player_id = player_id
color_2 = "black"
g.game_state = "INIT"
g.save()
join_game_f_resp = {
"status": "OK",
"game_id" : g.game,
"my_id" : player_id,
"my_color": color_2
}
return join_game_f_resp
def check_game_join(game_id, player_id): #provjerava uvjete za join_game
if player_id == "":
return {"status": "Greska: player_id nije validan."}
# return json.dumps({"status": "Greska: player_id nije validan."})
try:
g = Game.objects.get(game = game_id) #provjerava postoji li igra s tim game_id-em
except AttributeError:
pass
except ObjectDoesNotExist:
return {"status": "Greska: ne postoji igra s tim game_id-em."}
if g.game_state == "INIT": #provjerava je li igra vec pokrenuta
return {"status": "Greska: ta je igra vec pokrenuta."}
elif g.white_player_id == player_id or g.black_player_id == player_id: #provjerava je li taj igrac vec u igri
return {"status": "Greska: vec ste ukljuceni u tu igru."}
return {"status": "OK"}
def game_state_f(game_id, my_color):
try:
g = Game.objects.get(game = game_id)
if my_color == "white":
opponent_id = g.black_player_id
else:
opponent_id = g.white_player_id
# m = Move.objects.filter(game_id = g.id).order_by('-move_timestamp')[0]
get_game_state_resp = {
"opponent_id": opponent_id,
"white_score": g.white_score,
"black_score": g.black_score,
"game_state": g.game_state
}
return json.dumps(get_game_state_resp) #cls=DjangoJSONEncoder - ide u return zbog timestampa
except ObjectDoesNotExist:
return json.dumps({"status": "Greska: ne postoji igra s tim game_id-em."})
def get_move_f(game_id, my_color, opponent_color, ntp, ncp):
ntp = chr(97+int(ntp[2]))+str(4-int(ntp[0]))
ncp = chr(97+int(ncp[2]))+str(4-int(ncp[0]))
g = Game.objects.get(game = game_id)
try:
m00 = Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
return json.dumps({"ntp": ntp, "ncp": ncp, "otp": "null", "ocp": "null", "ntp_m": "null", "ncp_m": "null"})
if g.game_state=="WAITING_FOR_MOVE" or (my_color=="white" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE"):
mm = Move.objects.filter(game_id=g.id, color=my_color).order_by('-move_timestamp')[0]
elif (my_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mm = Move.objects.filter(game_id=g.id, color=my_color).order_by('-move_timestamp')[1]
if g.game_state=="WAITING_FOR_MOVE" or (my_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mo = Move.objects.filter(game_id=g.id, color=opponent_color).order_by('-move_timestamp')[0]
elif (my_color=="black" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="white" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mo = Move.objects.filter(game_id=g.id, color=opponent_color).order_by('-move_timestamp')[1]
ntp_m = chr(97+int(mm.triangle_position[4]))+str(4-int(mm.triangle_position[1]))
ncp_m = chr(97+int(mm.circle_position[4]))+str(4-int(mm.circle_position[1]))
otp = chr(97+int(mo.triangle_position[4]))+str(4-int(mo.triangle_position[1]))
ocp = chr(97+int(mo.circle_position[4]))+str(4-int(mo.circle_position[1]))
return json.dumps({"ntp": ntp, "ncp": ncp, "otp": otp, "ocp": ocp, "ntp_m": ntp_m, "ncp_m": ncp_m})
# funkcija rotate potrebna unutar funkcije make_move pri provjeri razdvojene ploce
def rotate(l):
for i in range(int(len(l)/2)):
if l[2*i]==1 and l[2*i+1]==1:
l[2*i+1]=2
elif l[2*i]==1 and l[2*i+1]==2:
l[2*i]=2
elif l[2*i]==2 and l[2*i+1]==2:
l[2*i+1]=1
elif l[2*i]==2 and l[2*i+1]==1:
l[2*i]=1
else:
x=l[2*i]
y=l[2*i+1]
x=3-x
l[2*i]=y
l[2*i+1]=x
def make_move (game_id, player_id, new_triangle_position, new_circle_position):
new_triangle_position = [int(new_triangle_position[0]), int(new_triangle_position[2])]
new_circle_position = [int(new_circle_position[0]), int(new_circle_position[2])]
try:
g=Game.objects.get(game=game_id)
except ObjectDoesNotExist:
return json.dumps({"status": "Greska: ne postoji igra s tim game_id-em."})
if g.game_state=="WAITING_FOR_SECOND_PLAYER":
return json.dumps({"status": "Greska: nedostaje drugi igrac."})
elif g.game_state=="OVER":
return json.dumps({"status": "Greska: igra je gotova."})
if player_id==g.white_player_id:
player_color="white"
elif player_id==g.black_player_id:
player_color="black"
else:
return json.dumps({"status": "Greska: player_id nije validan."})
if player_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE":
return json.dumps({"status": "Greska: vec ste odigrali potez; cekajte potez crnog igraca."})
elif player_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE":
return json.dumps({"status": "Greska: vec ste odigrali potez; cekajte potez bijelog igraca."})
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[0]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
if new_triangle_position==new_circle_position:
return json.dumps({"status": "Greska: ne mozete pomaknuti obje figure na isto polje"})
pass
else:
triangle_position=new_triangle_position
circle_position=new_circle_position
move0=Move.objects.filter(color=player_color).order_by('-move_timestamp')[0] # ovo dohvacamo radi error responsea (snapback)
s=move0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=move0.circle_position
circle0_position=[int(s[1]), int(s[4])]
error_resp = {"status": "Greska: ne mozete se pomaknuti na ponisteno polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["new_triangle_position"] = chr(97+int(new_triangle_position[1]))+str(4-int(new_triangle_position[0]))
error_resp["new_circle_position"] = chr(97+int(new_circle_position[1]))+str(4-int(new_circle_position[0]))
if "X" in g.board[triangle_position[0]][triangle_position[1]]:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
# return json.dumps({"status": "Greska: ne mozete se pomaknuti na ponisteno polje."})
if "X" in g.board[circle_position[0]][circle_position[1]]:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
# return json.dumps({"status": "Greska: ne mozete se pomaknuti na ponisteno polje."})
if new_triangle_position==new_circle_position:
error_resp = {"status": "Greska: ne mozete pomaknuti obje figure na isto polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
# return json.dumps({"status": "Greska: ne mozete pomaknuti obje figure na isto polje"})
if error_resp["triangle0_position"] != "X" or error_resp["circle0_position"] != "X":
return json.dumps(error_resp)
# greska ako je igrac odigrao na nedohvativo polje
# treba prvo provjeriti radi li se o pocetku igre jer su tada sva polja dostupna
# provjeravamo imaju li zadnja dva poteza odgovarajuci game_id, odnosno ima li prethodnih poteza u ovoj igri; ako ne onda je nova igra
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[0]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
pass
else:
max_range=[-1, 0, 1]
move0=Move.objects.filter(color=player_color).order_by('-move_timestamp')[0] # dohvacamo prijasnji potez igraca ove boje kako bismo mu utvrdili trenutnu lokaciju
s=move0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=move0.circle_position
circle0_position=[int(s[1]), int(s[4])]
error_resp = {"status": "Greska: ne mozete se pomaknuti na nedohvativo polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["new_triangle_position"] = chr(97+int(new_triangle_position[1]))+str(4-int(new_triangle_position[0]))
error_resp["new_circle_position"] = chr(97+int(new_circle_position[1]))+str(4-int(new_circle_position[0]))
if triangle_position[0]-triangle0_position[0] not in max_range:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
elif triangle_position[1]-triangle0_position[1] not in max_range:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
if circle_position[0]-circle0_position[0] not in max_range:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
elif circle_position[1]-circle0_position[1] not in max_range:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
if error_resp["triangle0_position"] != "X" or error_resp["circle0_position"] != "X":
return json.dumps(error_resp)
# trebalo bi uracunati i kraj igre u kojem nije moguce to uciniti pa se figura izbacuje
# npr postoji jedno available polje i to je zauzeto drugom figurom iste boje
# ovo bih mogao dodati u igru kad bi osmislili nacin da se izbaci figura iz igre
m=Move.objects.create(game_id=g.id, color=player_color, triangle_position=new_triangle_position, circle_position=new_circle_position, move_timestamp=timezone.now())
g.save()
if g.game_state=="INIT" or g.game_state=="WAITING_FOR_MOVE":
if player_color=="white":
g.game_state="WAITING_FOR_BLACK_PLAYER_MOVE"
else:
g.game_state="WAITING_FOR_WHITE_PLAYER_MOVE"
g.save()
elif g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE" or g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE":
w_score=g.white_score
b_score=g.black_score
triangle_position=new_triangle_position
circle_position=new_circle_position
# dohvacamo 2 zadnja poteza iz tablice Move
# zadnji bi trebao biti nas potez koji je upravo upisan, pa dohvacamo samo onaj prije njega
previous_move=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
s=previous_move.triangle_position
triangle2_position=[int(s[1]), int(s[4])]
s=previous_move.circle_position
circle2_position=[int(s[1]), int(s[4])]
# provjeravamo je li doslo do sudara
collision="none"
# return ({"triangle2_position" : triangle2_position, "circle2_position" : circle2_position, "collision" : collision})
if triangle_position==triangle2_position and circle_position==circle2_position:
collision="double_collision_same"
w_score+=2
elif triangle_position==circle2_position and circle_position==triangle2_position:
collision="double_collision_different"
b_score+=2
elif triangle_position==triangle2_position:
collision="triangle_triangle2"
w_score+=1
elif circle_position==circle2_position:
collision="circle_circle2"
w_score+=1
elif triangle_position==circle2_position:
collision="triangle_circle2"
b_score+=1
elif circle_position==triangle2_position:
collision="circle_triangle2"
b_score+=1
# mijenjamo pozicije igraca na ploci
# ako ima prijasnjih poteza, moramo ih ukloniti iz ploce, s tim da ostavimo ponistena polja
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[2]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[3]
except:
pass
else:
s=m0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=m0.circle_position
circle0_position=[int(s[1]), int(s[4])]
s=m00.triangle_position
triangle00_position=[int(s[1]), int(s[4])]
s=m00.circle_position
circle00_position=[int(s[1]), int(s[4])]
if "WX" in g.board[triangle0_position[0]][triangle0_position[1]]:
g.board[triangle0_position[0]][triangle0_position[1]]="WX"
elif "BX" in g.board[triangle0_position[0]][triangle0_position[1]]:
g.board[triangle0_position[0]][triangle0_position[1]]="BX"
else:
g.board[triangle0_position[0]][triangle0_position[1]]=""
if "WX" in g.board[circle0_position[0]][circle0_position[1]]:
g.board[circle0_position[0]][circle0_position[1]]="WX"
elif "BX" in g.board[circle0_position[0]][circle0_position[1]]:
g.board[circle0_position[0]][circle0_position[1]]="BX"
else:
g.board[circle0_position[0]][circle0_position[1]]=""
if "WX" in g.board[triangle00_position[0]][triangle00_position[1]]:
g.board[triangle00_position[0]][triangle00_position[1]]="WX"
elif "BX" in g.board[triangle00_position[0]][triangle00_position[1]]:
g.board[triangle00_position[0]][triangle00_position[1]]="BX"
else:
g.board[triangle00_position[0]][triangle00_position[1]]=""
if "WX" in g.board[circle00_position[0]][circle00_position[1]]:
g.board[circle00_position[0]][circle00_position[1]]="WX"
elif "BX" in g.board[circle00_position[0]][circle00_position[1]]:
g.board[circle00_position[0]][circle00_position[1]]="BX"
else:
g.board[circle00_position[0]][circle00_position[1]]=""
if collision=="double_collision_same":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
elif collision=="double_collision_different":
g.board[triangle_position[0]][triangle_position[1]]="BX,WT,BC"
g.board[circle_position[0]][circle_position[1]]="BX,WC,BT"
elif player_color=="white":
if collision=="none":
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
g.board[circle2_position[0]][circle2_position[1]]="BC"
elif collision=="triangle_triangle2":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[circle2_position[0]][circle2_position[1]]="BC"
elif collision=="circle_circle2":
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
elif collision=="triangle_circle2":
g.board[triangle_position[0]][triangle_position[1]]="BX,WT,BC"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
elif collision=="circle_triangle2":
g.board[circle_position[0]][circle_position[1]]="BX,WC,BT"
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[circle2_position[0]][circle2_position[1]]="BC"
else:
if collision=="none":
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
g.board[circle2_position[0]][circle2_position[1]]="WC"
elif collision=="triangle_triangle2":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[circle2_position[0]][circle2_position[1]]="WC"
elif collision=="circle_circle2":
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
elif collision=="triangle_circle2":
g.board[triangle_position[0]][triangle_position[1]]="BX,WC,BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
elif collision=="circle_triangle2":
g.board[circle_position[0]][circle_position[1]]="BX,WT,BC"
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[circle2_position[0]][circle2_position[1]]="WC"
# provjera ponovljenih pozicija
# try:
# m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[2]
# except:
# null_fields=[]
# else:
# null_fields=m00.null_fields
# if collision=="double_collision_same" or collision=="double_collision_different":
# null_fields+=[chr(97+triangle_position[0]*4+triangle_position[1])]
# null_fields+=[chr(97+circle_position[0]*4+circle_position[1])]
# elif collision=="triangle_triangle2" or collision=="triangle_circle2":
# null_fields+=[chr(97+triangle_position[0]*4+triangle_position[1])]
# elif collision=="circle_circle2" or collision=="circle_triangle2":
# null_fields+=[chr(97+circle_position[0]*4+circle_position[1])]
# elif collision=="none" and Move.objects.order_by('-move_timestamp')[3].null_fields==Move.objects.order_by('-move_timestamp')[2].null_fields:
# i=0
# while Move.objects.order_by('-move_timestamp')[i].null_fields==Move.objects.order_by('-move_timestamp')[0].null_fields:
# if i!=1 and i%2==1:
# move_i_a=Move.objects.order_by('-move_timestamp')[i]
# move_i_b=Move.objects.order_by('-move_timestamp')[i-1]
# move_a=Move.objects.filter(color=move_i_a.color).order_by('-move_timestamp')[0]
# move_b=Move.objects.filter(color=move_i_b.color).order_by('-move_timestamp')[0]
# if move_i_a.triangle_position==move_a.triangle_position and move_i_a.circle_position==move_a.circle_position and move_i_b.triangle_position==move_b.triangle_position and move_i_b.circle_position==move_b.circle_position:
# # triba sad izbrisati te poteze nekako i uciniti da i drugi igrac igra ponovno
# g.game_state="WAITING_FOR_MOVE"
# g.save()
# return json.dumps({"status": "Greska: prijasnja pozicija ne smije biti ponovljena; oba igraca igraju ponovno"})
# i+=1
# m.null_fields=null_fields
g.white_score=w_score
g.black_score=b_score
g.save()
if g.white_score==9 or g.black_score==9:
g.game_state="OVER"
g.save()
# provjera razdvojene ploce
# elif g.board[1][1]==("WX" or "BX") or g.board[1][2]==("WX" or "BX") or g.board[2][1]==("WX" or "BX") or g.board[2][2]==("WX" or "BX"):
# # provjerava koliko je sredisnjih polja ponisteno; prema tome razvrstavamo razlicite mogucnosti
# n=0
# try:
# m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[2]
# except:
# wt_sep=False
# wc_sep=False
# bt_sep=False
# bc_sep=False
# else:
# wt_sep=g.locked["WT"]
# wc_sep=g.locked["WC"]
# bt_sep=g.locked["BT"]
# bc_sep=g.locked["BC"]
# if g.board[1][1]==("WX" or "BX"):
# n+=1
# if g.board[1][2]==("WX" or "BX"):
# n+=1
# if g.board[2][1]==("WX" or "BX"):
# n+=1
# if g.board[2][2]==("WX" or "BX"):
# n+=1
# if n==1:
# # postoji 1 obrazac razdvojene ploce s jednim ponistenim sredisnjim poljem; ima 4 rotacije
# l=[0, 1, 1, 1, 1, 0]
# h=[0, 0]
# for i in range(4):
# if g.board[l[0]][l[1]]==("WX" or "BX") and g.board[l[2]][l[3]]==("WX" or "BX") and g.board[l[4]][l[5]]==("WX" or "BX"):
# # razdvojena ploca - kod koji ce se izvrsiti
# if wt_sep==False and "WT" in g.board[h[0]][h[1]]:
# wt_sep=True
# if wc_sep==False and "WC" in g.board[h[0]][h[1]]:
# wc_sep=True
# if bt_sep==False and "BT" in g.board[h[0]][h[1]]:
# bt_sep=True
# if bc_sep==False and "BC" in g.board[h[0]][h[1]]:
# bc_sep=True
# rotate(l)
# rotate(h)
# if n==2:
# # postoje 3 obrasca razdvojene ploce s dva ponistena sredisnja polja; svaki ima 4 rotacije
# for i in range(3):
# if i==0:
# l=[0, 1, 1, 1, 2, 1, 3, 1]
# elif i==1:
# l=[0, 1, 1, 1, 2, 1, 2, 0]
# elif i==2:
# l=[1, 0, 1, 1, 2, 1, 3, 1]
# h=[0, 0, 1, 0, 2, 0, 3, 0]
# for j in range(4):
# if g.board[l[0]][l[1]]==("WX" or "BX") and g.board[l[2]][l[3]]==("WX" or "BX") and g.board[l[4]][l[5]]==("WX" or "BX") and g.board[l[6]][l[7]]==("WX" or "BX"):
# # razdvojena ploca - kod koji ce se izvrsiti
# if wt_sep==False and "WT" in (g.board[h[2]][h[3]] or g.board[h[4]][h[5]]):
# wt_sep=True
# if wc_sep==False and "WC" in (g.board[h[2]][h[3]] or g.board[h[4]][h[5]]):
# wc_sep=True
# if bt_sep==False and "BT" in (g.board[h[2]][h[3]] or g.board[h[4]][h[5]]):
# bt_sep=True
# if bc_sep==False and "BC" in (g.board[h[2]][h[3]] or g.board[h[4]][h[5]]):
# bc_sep=True
# if i==0 or i==1:
# if wt_sep==False and "WT" in g.board[h[0]][h[1]]:
# wt_sep=True
# if wc_sep==False and "WC" in g.board[h[0]][h[1]]:
# wc_sep=True
# if bt_sep==False and "BT" in g.board[h[0]][h[1]]:
# bt_sep=True
# if bc_sep==False and "BC" in g.board[h[0]][h[1]]:
# bc_sep=True
# if i==0 or i==2:
# if wt_sep==False and "WT" in g.board[h[6]][h[7]]:
# wt_sep=True
# if wc_sep==False and "WC" in g.board[h[6]][h[7]]:
# wc_sep=True
# if bt_sep==False and "BT" in g.board[h[6]][h[7]]:
# bt_sep=True
# if bc_sep==False and "BC" in g.board[h[6]][h[7]]:
# bc_sep=True
# rotate(l)
# rotate(h)
# if n==3:
# # postoje 4 obrasca razdvojene ploce s tri ponistena sredisnja polja; svaki ima 4 rotacije
# for i in range(4):
# if i==0:
# l=[0, 2, 1, 2, 1, 1, 2, 1, 3, 1]
# elif i==1:
# l=[0, 2, 1, 2, 1, 1, 2, 1, 2, 0]
# elif i==2:
# l=[1, 3, 1, 2, 1, 1, 2, 1, 2, 0]
# elif i==3:
# l=[1, 3, 1, 2, 1, 1, 2, 1, 3, 1]
# h=[0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 2, 0, 3, 0]
# for j in range(4):
# if g.board[l[0]][l[1]]==("WX" or "BX") and g.board[l[2]][l[3]]==("WX" or "BX") and g.board[l[4]][l[5]]==("WX" or "BX") and g.board[l[6]][l[7]]==("WX" or "BX") and g.board[l[8]][l[9]]==("WX" or "BX"):
# # razdvojena ploca - kod koji ce se izvrsiti
# if wt_sep==False and "WT" in (g.board[h[0]][h[1]] or g.board[h[2]][h[3]] or g.board[h[4]][h[5]] or g.board[h[8]][h[9]] or g.board[h[10]][h[11]]):
# wt_sep=True
# if wc_sep==False and "WC" in (g.board[h[0]][h[1]] or g.board[h[2]][h[3]] or g.board[h[4]][h[5]] or g.board[h[8]][h[9]] or g.board[h[10]][h[11]]):
# wc_sep=True
# if bt_sep==False and "BT" in (g.board[h[0]][h[1]] or g.board[h[2]][h[3]] or g.board[h[4]][h[5]] or g.board[h[8]][h[9]] or g.board[h[10]][h[11]]):
# bt_sep=True
# if bc_sep==False and "BC" in (g.board[h[0]][h[1]] or g.board[h[2]][h[3]] or g.board[h[4]][h[5]] or g.board[h[8]][h[9]] or g.board[h[10]][h[11]]):
# bc_sep=True
# if i==0 or i==3:
# if wt_sep==False and "WT" in g.board[h[12]][h[13]]:
# wt_sep=True
# if wc_sep==False and "WC" in g.board[h[12]][h[13]]:
# wc_sep=True
# if bt_sep==False and "BT" in g.board[h[12]][h[13]]:
# bt_sep=True
# if bc_sep==False and "BC" in g.board[h[12]][h[13]]:
# bc_sep=True
# if i==2 or i==3:
# if wt_sep==False and "WT" in g.board[h[6]][h[7]]:
# wt_sep=True
# if wc_sep==False and "WC" in g.board[h[6]][h[7]]:
# wc_sep=True
# if bt_sep==False and "BT" in g.board[h[6]][h[7]]:
# bt_sep=True
# if bc_sep==False and "BC" in g.board[h[6]][h[7]]:
# bc_sep=True
# rotate(l)
# rotate(h)
# if (wt_sep==True and wc_sep==True) or (bt_sep==True and bc_sep==True):
# g.game_state="OVER"
# g.save()
# else:
# g.locked["WT"]=wt_sep
# g.locked["WC"]=wc_sep
# g.locked["BT"]=bt_sep
# g.locked["BC"]=bc_sep
# g.save()
if g.game_state!="OVER":
g.game_state="WAITING_FOR_MOVE"
g.save()
return json.dumps({"status": "OK"})
| 57.877127 | 246 | 0.549727 | from kontranto_igra.models import Game, Move
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import timezone
from random import choice
import string
import json
def BlackOrWhite(color):
if color == '':
return choice(['black','white'])
elif color == 'white':
return 'black'
else:
return 'white'
def new_game_f(player_id):
game_id = "".join(choice(string.ascii_letters + string.digits) for i in range(10))
color_1 = BlackOrWhite('')
if color_1 == 'black':
g = Game.objects.create(game = game_id, game_state = "WAITING_FOR_SECOND_PLAYER", white_score = 0, black_score = 0, board = [["","","",""], ["","","",""], ["","","",""], ["","","",""]], black_player_id = player_id, white_player_id = "")
else:
g = Game.objects.create(game = game_id, game_state = "WAITING_FOR_SECOND_PLAYER", white_score = 0, black_score = 0, board = [["","","",""], ["","","",""], ["","","",""], ["","","",""]], white_player_id = player_id, black_player_id = "")
new_game_f_resp = {
"status": "Waiting for second player",
"game_id": game_id,
"my_id": player_id,
"my_color": color_1
}
return new_game_f_resp
def check_game_new(player_id):
if player_id == "":
return {"status": "Greska: player_id nije validan."}
else:
return {"status": "OK"}
def join_game_f(game_id, player_id):
g = Game.objects.get(game = game_id)
if g.white_player_id == "":
g.white_player_id = player_id
color_2 = "white"
else:
g.black_player_id = player_id
color_2 = "black"
g.game_state = "INIT"
g.save()
join_game_f_resp = {
"status": "OK",
"game_id" : g.game,
"my_id" : player_id,
"my_color": color_2
}
return join_game_f_resp
def check_game_join(game_id, player_id):
if player_id == "":
return {"status": "Greska: player_id nije validan."}
try:
g = Game.objects.get(game = game_id)
except AttributeError:
pass
except ObjectDoesNotExist:
return {"status": "Greska: ne postoji igra s tim game_id-em."}
if g.game_state == "INIT":
return {"status": "Greska: ta je igra vec pokrenuta."}
elif g.white_player_id == player_id or g.black_player_id == player_id:
return {"status": "Greska: vec ste ukljuceni u tu igru."}
return {"status": "OK"}
def game_state_f(game_id, my_color):
try:
g = Game.objects.get(game = game_id)
if my_color == "white":
opponent_id = g.black_player_id
else:
opponent_id = g.white_player_id
get_game_state_resp = {
"opponent_id": opponent_id,
"white_score": g.white_score,
"black_score": g.black_score,
"game_state": g.game_state
}
return json.dumps(get_game_state_resp)
except ObjectDoesNotExist:
return json.dumps({"status": "Greska: ne postoji igra s tim game_id-em."})
def get_move_f(game_id, my_color, opponent_color, ntp, ncp):
ntp = chr(97+int(ntp[2]))+str(4-int(ntp[0]))
ncp = chr(97+int(ncp[2]))+str(4-int(ncp[0]))
g = Game.objects.get(game = game_id)
try:
m00 = Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
return json.dumps({"ntp": ntp, "ncp": ncp, "otp": "null", "ocp": "null", "ntp_m": "null", "ncp_m": "null"})
if g.game_state=="WAITING_FOR_MOVE" or (my_color=="white" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE"):
mm = Move.objects.filter(game_id=g.id, color=my_color).order_by('-move_timestamp')[0]
elif (my_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mm = Move.objects.filter(game_id=g.id, color=my_color).order_by('-move_timestamp')[1]
if g.game_state=="WAITING_FOR_MOVE" or (my_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mo = Move.objects.filter(game_id=g.id, color=opponent_color).order_by('-move_timestamp')[0]
elif (my_color=="black" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE") or (my_color=="white" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE"):
mo = Move.objects.filter(game_id=g.id, color=opponent_color).order_by('-move_timestamp')[1]
ntp_m = chr(97+int(mm.triangle_position[4]))+str(4-int(mm.triangle_position[1]))
ncp_m = chr(97+int(mm.circle_position[4]))+str(4-int(mm.circle_position[1]))
otp = chr(97+int(mo.triangle_position[4]))+str(4-int(mo.triangle_position[1]))
ocp = chr(97+int(mo.circle_position[4]))+str(4-int(mo.circle_position[1]))
return json.dumps({"ntp": ntp, "ncp": ncp, "otp": otp, "ocp": ocp, "ntp_m": ntp_m, "ncp_m": ncp_m})
def rotate(l):
for i in range(int(len(l)/2)):
if l[2*i]==1 and l[2*i+1]==1:
l[2*i+1]=2
elif l[2*i]==1 and l[2*i+1]==2:
l[2*i]=2
elif l[2*i]==2 and l[2*i+1]==2:
l[2*i+1]=1
elif l[2*i]==2 and l[2*i+1]==1:
l[2*i]=1
else:
x=l[2*i]
y=l[2*i+1]
x=3-x
l[2*i]=y
l[2*i+1]=x
def make_move (game_id, player_id, new_triangle_position, new_circle_position):
new_triangle_position = [int(new_triangle_position[0]), int(new_triangle_position[2])]
new_circle_position = [int(new_circle_position[0]), int(new_circle_position[2])]
try:
g=Game.objects.get(game=game_id)
except ObjectDoesNotExist:
return json.dumps({"status": "Greska: ne postoji igra s tim game_id-em."})
if g.game_state=="WAITING_FOR_SECOND_PLAYER":
return json.dumps({"status": "Greska: nedostaje drugi igrac."})
elif g.game_state=="OVER":
return json.dumps({"status": "Greska: igra je gotova."})
if player_id==g.white_player_id:
player_color="white"
elif player_id==g.black_player_id:
player_color="black"
else:
return json.dumps({"status": "Greska: player_id nije validan."})
if player_color=="white" and g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE":
return json.dumps({"status": "Greska: vec ste odigrali potez; cekajte potez crnog igraca."})
elif player_color=="black" and g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE":
return json.dumps({"status": "Greska: vec ste odigrali potez; cekajte potez bijelog igraca."})
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[0]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
if new_triangle_position==new_circle_position:
return json.dumps({"status": "Greska: ne mozete pomaknuti obje figure na isto polje"})
pass
else:
triangle_position=new_triangle_position
circle_position=new_circle_position
move0=Move.objects.filter(color=player_color).order_by('-move_timestamp')[0]
s=move0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=move0.circle_position
circle0_position=[int(s[1]), int(s[4])]
error_resp = {"status": "Greska: ne mozete se pomaknuti na ponisteno polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["new_triangle_position"] = chr(97+int(new_triangle_position[1]))+str(4-int(new_triangle_position[0]))
error_resp["new_circle_position"] = chr(97+int(new_circle_position[1]))+str(4-int(new_circle_position[0]))
if "X" in g.board[triangle_position[0]][triangle_position[1]]:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
if "X" in g.board[circle_position[0]][circle_position[1]]:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
if new_triangle_position==new_circle_position:
error_resp = {"status": "Greska: ne mozete pomaknuti obje figure na isto polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
if error_resp["triangle0_position"] != "X" or error_resp["circle0_position"] != "X":
return json.dumps(error_resp)
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[0]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
except:
pass
else:
max_range=[-1, 0, 1]
move0=Move.objects.filter(color=player_color).order_by('-move_timestamp')[0]
s=move0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=move0.circle_position
circle0_position=[int(s[1]), int(s[4])]
error_resp = {"status": "Greska: ne mozete se pomaknuti na nedohvativo polje.", "triangle0_position": "X", "circle0_position": "X"}
error_resp["new_triangle_position"] = chr(97+int(new_triangle_position[1]))+str(4-int(new_triangle_position[0]))
error_resp["new_circle_position"] = chr(97+int(new_circle_position[1]))+str(4-int(new_circle_position[0]))
if triangle_position[0]-triangle0_position[0] not in max_range:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
elif triangle_position[1]-triangle0_position[1] not in max_range:
error_resp["triangle0_position"] = chr(97+int(triangle0_position[1]))+str(4-int(triangle0_position[0]))
if circle_position[0]-circle0_position[0] not in max_range:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
elif circle_position[1]-circle0_position[1] not in max_range:
error_resp["circle0_position"] = chr(97+int(circle0_position[1]))+str(4-int(circle0_position[0]))
if error_resp["triangle0_position"] != "X" or error_resp["circle0_position"] != "X":
return json.dumps(error_resp)
m=Move.objects.create(game_id=g.id, color=player_color, triangle_position=new_triangle_position, circle_position=new_circle_position, move_timestamp=timezone.now())
g.save()
if g.game_state=="INIT" or g.game_state=="WAITING_FOR_MOVE":
if player_color=="white":
g.game_state="WAITING_FOR_BLACK_PLAYER_MOVE"
else:
g.game_state="WAITING_FOR_WHITE_PLAYER_MOVE"
g.save()
elif g.game_state=="WAITING_FOR_BLACK_PLAYER_MOVE" or g.game_state=="WAITING_FOR_WHITE_PLAYER_MOVE":
w_score=g.white_score
b_score=g.black_score
triangle_position=new_triangle_position
circle_position=new_circle_position
previous_move=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[1]
s=previous_move.triangle_position
triangle2_position=[int(s[1]), int(s[4])]
s=previous_move.circle_position
circle2_position=[int(s[1]), int(s[4])]
collision="none"
if triangle_position==triangle2_position and circle_position==circle2_position:
collision="double_collision_same"
w_score+=2
elif triangle_position==circle2_position and circle_position==triangle2_position:
collision="double_collision_different"
b_score+=2
elif triangle_position==triangle2_position:
collision="triangle_triangle2"
w_score+=1
elif circle_position==circle2_position:
collision="circle_circle2"
w_score+=1
elif triangle_position==circle2_position:
collision="triangle_circle2"
b_score+=1
elif circle_position==triangle2_position:
collision="circle_triangle2"
b_score+=1
try:
m0=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[2]
m00=Move.objects.filter(game_id=g.id).order_by('-move_timestamp')[3]
except:
pass
else:
s=m0.triangle_position
triangle0_position=[int(s[1]), int(s[4])]
s=m0.circle_position
circle0_position=[int(s[1]), int(s[4])]
s=m00.triangle_position
triangle00_position=[int(s[1]), int(s[4])]
s=m00.circle_position
circle00_position=[int(s[1]), int(s[4])]
if "WX" in g.board[triangle0_position[0]][triangle0_position[1]]:
g.board[triangle0_position[0]][triangle0_position[1]]="WX"
elif "BX" in g.board[triangle0_position[0]][triangle0_position[1]]:
g.board[triangle0_position[0]][triangle0_position[1]]="BX"
else:
g.board[triangle0_position[0]][triangle0_position[1]]=""
if "WX" in g.board[circle0_position[0]][circle0_position[1]]:
g.board[circle0_position[0]][circle0_position[1]]="WX"
elif "BX" in g.board[circle0_position[0]][circle0_position[1]]:
g.board[circle0_position[0]][circle0_position[1]]="BX"
else:
g.board[circle0_position[0]][circle0_position[1]]=""
if "WX" in g.board[triangle00_position[0]][triangle00_position[1]]:
g.board[triangle00_position[0]][triangle00_position[1]]="WX"
elif "BX" in g.board[triangle00_position[0]][triangle00_position[1]]:
g.board[triangle00_position[0]][triangle00_position[1]]="BX"
else:
g.board[triangle00_position[0]][triangle00_position[1]]=""
if "WX" in g.board[circle00_position[0]][circle00_position[1]]:
g.board[circle00_position[0]][circle00_position[1]]="WX"
elif "BX" in g.board[circle00_position[0]][circle00_position[1]]:
g.board[circle00_position[0]][circle00_position[1]]="BX"
else:
g.board[circle00_position[0]][circle00_position[1]]=""
if collision=="double_collision_same":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
elif collision=="double_collision_different":
g.board[triangle_position[0]][triangle_position[1]]="BX,WT,BC"
g.board[circle_position[0]][circle_position[1]]="BX,WC,BT"
elif player_color=="white":
if collision=="none":
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
g.board[circle2_position[0]][circle2_position[1]]="BC"
elif collision=="triangle_triangle2":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[circle2_position[0]][circle2_position[1]]="BC"
elif collision=="circle_circle2":
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
elif collision=="triangle_circle2":
g.board[triangle_position[0]][triangle_position[1]]="BX,WT,BC"
g.board[circle_position[0]][circle_position[1]]="WC"
g.board[triangle2_position[0]][triangle2_position[1]]="BT"
elif collision=="circle_triangle2":
g.board[circle_position[0]][circle_position[1]]="BX,WC,BT"
g.board[triangle_position[0]][triangle_position[1]]="WT"
g.board[circle2_position[0]][circle2_position[1]]="BC"
else:
if collision=="none":
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
g.board[circle2_position[0]][circle2_position[1]]="WC"
elif collision=="triangle_triangle2":
g.board[triangle_position[0]][triangle_position[1]]="WX,WT,BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[circle2_position[0]][circle2_position[1]]="WC"
elif collision=="circle_circle2":
g.board[circle_position[0]][circle_position[1]]="WX,WC,BC"
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
elif collision=="triangle_circle2":
g.board[triangle_position[0]][triangle_position[1]]="BX,WC,BT"
g.board[circle_position[0]][circle_position[1]]="BC"
g.board[triangle2_position[0]][triangle2_position[1]]="WT"
elif collision=="circle_triangle2":
g.board[circle_position[0]][circle_position[1]]="BX,WT,BC"
g.board[triangle_position[0]][triangle_position[1]]="BT"
g.board[circle2_position[0]][circle2_position[1]]="WC"
g.black_score=b_score
g.save()
if g.white_score==9 or g.black_score==9:
g.game_state="OVER"
g.save()
if g.game_state!="OVER":
g.game_state="WAITING_FOR_MOVE"
g.save()
return json.dumps({"status": "OK"})
| true | true |
1c31c78d0621c40bcec3cdd165869a70fcf880a5 | 3,092 | py | Python | smamp/convert_UA_to_AA.py | lukaselflein/smamp | 2a0a8ce36b16aedd4c6a2bb576ba959061ec4e7e | [
"MIT"
] | null | null | null | smamp/convert_UA_to_AA.py | lukaselflein/smamp | 2a0a8ce36b16aedd4c6a2bb576ba959061ec4e7e | [
"MIT"
] | 1 | 2019-03-29T13:44:53.000Z | 2019-03-29T16:17:40.000Z | smamp/convert_UA_to_AA.py | lukaselflein/smamp | 2a0a8ce36b16aedd4c6a2bb576ba959061ec4e7e | [
"MIT"
] | null | null | null | """ Change structure with implicit Hydrogen to one with explicitely defined H-atoms.
Copyright 2019 Simulation Lab
University of Freiburg
Author: Johannes Hoermann <johannes.hoermann@imtek.uni-freiburg.de>
Modified: Lukas Elflein <elfleinl@cs.uni-freiburg.de>
"""
import os
import ase.io
import sys
import warnings
import numpy as np
import parmed as pmd
from ase.data import atomic_numbers
from ase.neighborlist import NeighborList
from matscipy.neighbours import neighbour_list
from parmed import gromacs
from smamp.insertHbyList import insertHbyList
from smamp.tools import find
from smamp.tools import read_atom_numbers
def read_input_files():
"""Search for and read input files (with implicit H-atoms)."""
ase_struct, pmd_top = None, None
pdb_file = find(path='..', folder_keyword='initial_structure', file_keyword='.pdb')[0]
top_file = find(path='..', folder_keyword='initial_structure', file_keyword='.top')[0]
ase_struct = ase.io.read(pdb_file)
pmd_struct = pmd.load_file(pdb_file)
pmd_top = gromacs.GromacsTopologyFile(top_file, parametrize=False)
# Make sure we actually found everything we need
if ase_struct is None:
raise RuntimeError('structure file (.pdb) not found in {}'.format(input_dir))
if pmd_top is None:
raise RuntimeError('topology file (.top) not found in {}'.format(input_dir))
return ase_struct, pmd_struct, pmd_top
def main(implicitHbondingPartners=None):
"""Execute everything."""
# Read the hydrogen-number table by default
if implicitHbondingPartners is None:
implicitHbondingPartners = read_atom_numbers()
# Read the united-atoms files extracted from the MD-simulation trajectory
# throws some warnings on angle types, does not matter for bonding info
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ase_struct, pmd_struct, pmd_top = read_input_files()
pmd_top.strip(':SOL,CL') # strip water and electrolyte from system
pmd_top.box = pmd_struct.box # Needed because .prmtop contains box info
pmd_top.positions = pmd_struct.positions
# Insert the explicit hydrogens
print('Inserting explicit hydrogens, please wait ...')
with open('insert_H.log', 'w') as logfile:
new_ase_struct, new_pmd_top, names, residues = insertHbyList(ase_struct,
pmd_top,
implicitHbondingPartners,
bond_length=1.0,
debug=logfile)
# Write output
new_ase_struct.write('ase_pdbH.pdb')
new_ase_struct.write('ase_pdbH.traj')
# Write other output
new_pmd_top.write_pdb('pmd_pdbH.pdb')
test_pmd = pmd.load_file('pmd_pdbH.pdb')
# some topology format, un functionality similar to GROMACS' .top, but readable by VMD
# new_pmd_top.write_psf('pmd_pdbH.psf')
print('Done.')
if __name__ == '__main__':
main()
| 36.376471 | 93 | 0.67238 |
import os
import ase.io
import sys
import warnings
import numpy as np
import parmed as pmd
from ase.data import atomic_numbers
from ase.neighborlist import NeighborList
from matscipy.neighbours import neighbour_list
from parmed import gromacs
from smamp.insertHbyList import insertHbyList
from smamp.tools import find
from smamp.tools import read_atom_numbers
def read_input_files():
ase_struct, pmd_top = None, None
pdb_file = find(path='..', folder_keyword='initial_structure', file_keyword='.pdb')[0]
top_file = find(path='..', folder_keyword='initial_structure', file_keyword='.top')[0]
ase_struct = ase.io.read(pdb_file)
pmd_struct = pmd.load_file(pdb_file)
pmd_top = gromacs.GromacsTopologyFile(top_file, parametrize=False)
if ase_struct is None:
raise RuntimeError('structure file (.pdb) not found in {}'.format(input_dir))
if pmd_top is None:
raise RuntimeError('topology file (.top) not found in {}'.format(input_dir))
return ase_struct, pmd_struct, pmd_top
def main(implicitHbondingPartners=None):
if implicitHbondingPartners is None:
implicitHbondingPartners = read_atom_numbers()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ase_struct, pmd_struct, pmd_top = read_input_files()
pmd_top.strip(':SOL,CL')
pmd_top.box = pmd_struct.box
pmd_top.positions = pmd_struct.positions
print('Inserting explicit hydrogens, please wait ...')
with open('insert_H.log', 'w') as logfile:
new_ase_struct, new_pmd_top, names, residues = insertHbyList(ase_struct,
pmd_top,
implicitHbondingPartners,
bond_length=1.0,
debug=logfile)
new_ase_struct.write('ase_pdbH.pdb')
new_ase_struct.write('ase_pdbH.traj')
new_pmd_top.write_pdb('pmd_pdbH.pdb')
test_pmd = pmd.load_file('pmd_pdbH.pdb')
# new_pmd_top.write_psf('pmd_pdbH.psf')
print('Done.')
if __name__ == '__main__':
main()
| true | true |
1c31c799afcd02d00be6c55536e837f3f97b10aa | 8,073 | py | Python | agrupar.py | Juane99/Leg-Recognition-CoppeliaSim | dad2ecde9dbc97965fbba07e1eb9d27458bff97b | [
"MIT"
] | 2 | 2021-07-03T17:02:51.000Z | 2021-11-03T09:54:58.000Z | agrupar.py | Juane99/Leg-Recognition-CoppeliaSim | dad2ecde9dbc97965fbba07e1eb9d27458bff97b | [
"MIT"
] | null | null | null | agrupar.py | Juane99/Leg-Recognition-CoppeliaSim | dad2ecde9dbc97965fbba07e1eb9d27458bff97b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Juan Emilio Martinez Manjon
Asignatura: Programacion Tecnica y Cientifica
Profesor: Eugenio Aguirre Molina
Curso: 2020/2021
"""
# agrupar.py
"""
Este fichero agrupa los puntos capturados desde el simulador en clusters.
Para saber cuantos puntos hay en cada cluster usaremos los parámetros
minpuntos, maxpuntos y umbral.
Después de hacer varias pruebas empíricas y analizando los resultados finales
he llegado a la conclusión de que los mejores parámetros para este problema
son:
- Minpuntos = 3
- Maxpuntos = 25
- Umbral = 0.05
"""
import parametros
import json
import os
from math import sqrt
from copy import copy
def main():
#Leemos los parámetros que ha introducido el usuario
minimo_puntos = parametros.params.getMinPuntos()
maximo_puntos = parametros.params.getMaxPuntos()
umbral = parametros.params.getUmbral()
###################
# FICHERO PIERNAS #
###################
numero_cluster = 0
ficheroPiernas=open("clustersPiernas.json", "w")
#Recorremos las carpetas con nombre positivo
for filename in os.listdir('.'):
if (os.path.isdir(filename) and "positivo" in filename):
objetos=[]
#Abrimos el archivo json de dentro de la carpeta
for file in os.listdir(filename):
if (file.endswith('json')):
a_abrir = file
with open(filename+'/'+a_abrir, 'r') as f:
for line in f:
objetos.append(json.loads(line))
iterTotalesDict=objetos[len(objetos)-1]
iterTotales=iterTotalesDict['Iteraciones totales']
#Vamos leyendo los puntos del fichero y metiendolos en el cluster
#dependiendo de los parametros maxPuntos, minPuntos y Umbral
for i in range(iterTotales):
x_elegidos = []
y_elegidos = []
puntosX=objetos[i+1]['PuntosX']
puntosY=objetos[i+1]['PuntosY']
#Recorremos los puntos de X e Y
x_anterior = puntosX[0]
y_anterior = puntosY[0]
for px, py in zip(puntosX,puntosY):
#Calculamos la distancia del punto
distancia = sqrt( ((px-x_anterior)*(px-x_anterior)) + ((py-y_anterior)*(py-y_anterior)))
#Si el punto leido pertenece a otro cluster
if (distancia > umbral or len(x_elegidos)+1 > maximo_puntos):
#Miramos si el cluster actual tiene un minimo de puntos
#para poder guardarlo en el json
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
x_elegidos.clear()
y_elegidos.clear()
#Añadimos el nuevo punto al cluster
x_elegidos.append(px)
y_elegidos.append(py)
x_anterior = px
y_anterior = py
#Si pasamos a la siguiente iteracion, miramos si podemos añadir
#al json los puntos que tenemos hasta ese momento
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
ficheroPiernas.close()
######################
# FICHERO NO PIERNAS #
######################
numero_cluster = 0
ficheroNoPiernas=open("clustersNoPiernas.json", "w")
#Recorremos las carpetas con nombre negativo
for filename in os.listdir('.'):
if (os.path.isdir(filename) and "negativo" in filename):
objetos=[]
#Abrimos el archivo json de dentro de la carpeta
for file in os.listdir(filename):
if (file.endswith('json')):
a_abrir = file
with open(filename+'/'+a_abrir, 'r') as f:
for line in f:
objetos.append(json.loads(line))
iterTotalesDict=objetos[len(objetos)-1]
iterTotales=iterTotalesDict['Iteraciones totales']
#Vamos leyendo los puntos del fichero y metiendolos en el cluster
#dependiendo de los parametros maxPuntos, minPuntos y Umbral
for i in range(iterTotales):
x_elegidos = []
y_elegidos = []
puntosX=objetos[i+1]['PuntosX']
puntosY=objetos[i+1]['PuntosY']
#Recorremos los puntos de X e Y
x_anterior = puntosX[0]
y_anterior = puntosY[0]
for px, py in zip(puntosX,puntosY):
#Calculamos la distancia del punto
distancia = sqrt( ((px-x_anterior)*(px-x_anterior)) + ((py-y_anterior)*(py-y_anterior)))
#Si el punto leido pertenece a otro cluster
if (distancia > umbral or len(x_elegidos)+1 > maximo_puntos):
#Miramos si el cluster actual tiene un minimo de puntos
#para poder guardarlo en el json
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroNoPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
x_elegidos.clear()
y_elegidos.clear()
#Añadimos el nuevo punto al cluster
x_elegidos.append(px)
y_elegidos.append(py)
x_anterior = px
y_anterior = py
#Si pasamos a la siguiente iteracion, miramos si podemos añadir
#al json los puntos que tenemos hasta ese momento
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroNoPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
ficheroNoPiernas.close() | 35.721239 | 110 | 0.464635 |
import parametros
import json
import os
from math import sqrt
from copy import copy
def main():
minimo_puntos = parametros.params.getMinPuntos()
maximo_puntos = parametros.params.getMaxPuntos()
umbral = parametros.params.getUmbral()
:
a_abrir = file
with open(filename+'/'+a_abrir, 'r') as f:
for line in f:
objetos.append(json.loads(line))
iterTotalesDict=objetos[len(objetos)-1]
iterTotales=iterTotalesDict['Iteraciones totales']
for i in range(iterTotales):
x_elegidos = []
y_elegidos = []
puntosX=objetos[i+1]['PuntosX']
puntosY=objetos[i+1]['PuntosY']
x_anterior = puntosX[0]
y_anterior = puntosY[0]
for px, py in zip(puntosX,puntosY):
distancia = sqrt( ((px-x_anterior)*(px-x_anterior)) + ((py-y_anterior)*(py-y_anterior)))
if (distancia > umbral or len(x_elegidos)+1 > maximo_puntos):
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
x_elegidos.clear()
y_elegidos.clear()
x_elegidos.append(px)
y_elegidos.append(py)
x_anterior = px
y_anterior = py
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
ficheroPiernas.close()
ine in f:
objetos.append(json.loads(line))
iterTotalesDict=objetos[len(objetos)-1]
iterTotales=iterTotalesDict['Iteraciones totales']
for i in range(iterTotales):
x_elegidos = []
y_elegidos = []
puntosX=objetos[i+1]['PuntosX']
puntosY=objetos[i+1]['PuntosY']
x_anterior = puntosX[0]
y_anterior = puntosY[0]
for px, py in zip(puntosX,puntosY):
distancia = sqrt( ((px-x_anterior)*(px-x_anterior)) + ((py-y_anterior)*(py-y_anterior)))
if (distancia > umbral or len(x_elegidos)+1 > maximo_puntos):
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroNoPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
x_elegidos.clear()
y_elegidos.clear()
x_elegidos.append(px)
y_elegidos.append(py)
x_anterior = px
y_anterior = py
if (len(x_elegidos) >= minimo_puntos):
cluster={"numero_cluster":numero_cluster,
"numero_puntos":len(x_elegidos),
"puntosX":copy(x_elegidos),
"puntosY":copy(y_elegidos)}
ficheroNoPiernas.write(json.dumps(cluster)+'\n')
numero_cluster += 1
ficheroNoPiernas.close() | true | true |
1c31c8910abdd3dcc95e5c9b42e51f7b63ab7484 | 537 | py | Python | env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_templateitemname.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="layout.polar.radialaxis.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 29.833333 | 78 | 0.638734 | import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="layout.polar.radialaxis.tickformatstop",
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| true | true |
1c31c9c604c90a89fb3ae0d97caf87d474ab4172 | 7,200 | py | Python | registry/tags.py | ewindisch/docker-registry | c95a344d03381e948336e36d45cda3f7be8002f7 | [
"Apache-2.0"
] | null | null | null | registry/tags.py | ewindisch/docker-registry | c95a344d03381e948336e36d45cda3f7be8002f7 | [
"Apache-2.0"
] | null | null | null | registry/tags.py | ewindisch/docker-registry | c95a344d03381e948336e36d45cda3f7be8002f7 | [
"Apache-2.0"
] | null | null | null |
import datetime
import logging
import re
import time
import flask
import simplejson as json
import signals
import storage
import toolkit
from .app import app
store = storage.load()
logger = logging.getLogger(__name__)
RE_USER_AGENT = re.compile('([^\s/]+)/([^\s/]+)')
@app.route('/v1/repositories/<path:repository>/properties', methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def set_properties(namespace, repo):
logger.debug("[set_access] namespace={0}; repository={1}".format(namespace,
repo))
data = None
try:
data = json.loads(flask.request.data)
except json.JSONDecodeError:
pass
if not data or not isinstance(data, dict):
return toolkit.api_error('Invalid data')
private_flag_path = store.private_flag_path(namespace, repo)
if data['access'] == 'private' and not store.is_private(namespace, repo):
store.put_content(private_flag_path, '')
elif data['access'] == 'public' and store.is_private(namespace, repo):
store.remove(private_flag_path)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/properties', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_properties(namespace, repo):
logger.debug("[get_access] namespace={0}; repository={1}".format(namespace,
repo))
is_private = store.is_private(namespace, repo)
return toolkit.response({
'access': 'private' if is_private else 'public'
})
@app.route('/v1/repositories/<path:repository>/tags', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_tags(namespace, repository):
logger.debug("[get_tags] namespace={0}; repository={1}".format(namespace,
repository))
data = {}
try:
for fname in store.list_directory(store.tag_path(namespace,
repository)):
tag_name = fname.split('/').pop()
if not tag_name.startswith('tag_'):
continue
data[tag_name[4:]] = store.get_content(fname)
except OSError:
return toolkit.api_error('Repository not found', 404)
return toolkit.response(data)
@app.route('/v1/repositories/<path:repository>/tags/<tag>', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_tag(namespace, repository, tag):
logger.debug("[get_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
data = None
try:
data = store.get_content(store.tag_path(namespace, repository, tag))
except IOError:
return toolkit.api_error('Tag not found', 404)
return toolkit.response(data)
# warning: this endpoint is deprecated in favor of tag-specific json
# implemented by get_repository_tag_json
@app.route('/v1/repositories/<path:repository>/json', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_repository_json(namespace, repository):
json_path = store.repository_json_path(namespace, repository)
data = {'last_update': None,
'docker_version': None,
'docker_go_version': None,
'arch': 'amd64',
'os': 'linux',
'kernel': None}
try:
data = json.loads(store.get_content(json_path))
except IOError:
# We ignore the error, we'll serve the default json declared above
pass
return toolkit.response(data)
@app.route(
'/v1/repositories/<path:repository>/tags/<tag>/json',
methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_repository_tag_json(namespace, repository, tag):
json_path = store.repository_tag_json_path(namespace, repository, tag)
data = {'last_update': None,
'docker_version': None,
'docker_go_version': None,
'arch': 'amd64',
'os': 'linux',
'kernel': None}
try:
data = json.loads(store.get_content(json_path))
except IOError:
# We ignore the error, we'll serve the default json declared above
pass
return toolkit.response(data)
def create_tag_json(user_agent):
props = {
'last_update': int(time.mktime(datetime.datetime.utcnow().timetuple()))
}
ua = dict(RE_USER_AGENT.findall(user_agent))
if 'docker' in ua:
props['docker_version'] = ua['docker']
if 'go' in ua:
props['docker_go_version'] = ua['go']
for k in ['arch', 'kernel', 'os']:
if k in ua:
props[k] = ua[k].lower()
return json.dumps(props)
@app.route('/v1/repositories/<path:repository>/tags/<tag>',
methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def put_tag(namespace, repository, tag):
logger.debug("[put_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
data = None
try:
data = json.loads(flask.request.data)
except json.JSONDecodeError:
pass
if not data or not isinstance(data, basestring):
return toolkit.api_error('Invalid data')
if not store.exists(store.image_json_path(data)):
return toolkit.api_error('Image not found', 404)
store.put_content(store.tag_path(namespace, repository, tag), data)
sender = flask.current_app._get_current_object()
signals.tag_created.send(sender, namespace=namespace,
repository=repository, tag=tag, value=data)
# Write some meta-data about the repos
ua = flask.request.headers.get('user-agent', '')
data = create_tag_json(user_agent=ua)
json_path = store.repository_tag_json_path(namespace, repository, tag)
store.put_content(json_path, data)
if tag == "latest": # TODO(dustinlacewell) : deprecate this for v2
json_path = store.repository_json_path(namespace, repository)
store.put_content(json_path, data)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/tags/<tag>',
methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_tag(namespace, repository, tag):
logger.debug("[delete_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
try:
store.remove(store.tag_path(namespace, repository, tag))
sender = flask.current_app._get_current_object()
signals.tag_deleted.send(sender, namespace=namespace,
repository=repository, tag=tag)
except OSError:
return toolkit.api_error('Tag not found', 404)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/tags',
methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_repository(namespace, repository):
logger.debug("[delete_repository] namespace={0}; repository={1}".format(
namespace, repository))
try:
store.remove(store.tag_path(namespace, repository))
#TODO(samalba): Trigger tags_deleted signals
except OSError:
return toolkit.api_error('Repository not found', 404)
return toolkit.response()
| 34.951456 | 79 | 0.663333 |
import datetime
import logging
import re
import time
import flask
import simplejson as json
import signals
import storage
import toolkit
from .app import app
store = storage.load()
logger = logging.getLogger(__name__)
RE_USER_AGENT = re.compile('([^\s/]+)/([^\s/]+)')
@app.route('/v1/repositories/<path:repository>/properties', methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def set_properties(namespace, repo):
logger.debug("[set_access] namespace={0}; repository={1}".format(namespace,
repo))
data = None
try:
data = json.loads(flask.request.data)
except json.JSONDecodeError:
pass
if not data or not isinstance(data, dict):
return toolkit.api_error('Invalid data')
private_flag_path = store.private_flag_path(namespace, repo)
if data['access'] == 'private' and not store.is_private(namespace, repo):
store.put_content(private_flag_path, '')
elif data['access'] == 'public' and store.is_private(namespace, repo):
store.remove(private_flag_path)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/properties', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_properties(namespace, repo):
logger.debug("[get_access] namespace={0}; repository={1}".format(namespace,
repo))
is_private = store.is_private(namespace, repo)
return toolkit.response({
'access': 'private' if is_private else 'public'
})
@app.route('/v1/repositories/<path:repository>/tags', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_tags(namespace, repository):
logger.debug("[get_tags] namespace={0}; repository={1}".format(namespace,
repository))
data = {}
try:
for fname in store.list_directory(store.tag_path(namespace,
repository)):
tag_name = fname.split('/').pop()
if not tag_name.startswith('tag_'):
continue
data[tag_name[4:]] = store.get_content(fname)
except OSError:
return toolkit.api_error('Repository not found', 404)
return toolkit.response(data)
@app.route('/v1/repositories/<path:repository>/tags/<tag>', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_tag(namespace, repository, tag):
logger.debug("[get_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
data = None
try:
data = store.get_content(store.tag_path(namespace, repository, tag))
except IOError:
return toolkit.api_error('Tag not found', 404)
return toolkit.response(data)
@app.route('/v1/repositories/<path:repository>/json', methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_repository_json(namespace, repository):
json_path = store.repository_json_path(namespace, repository)
data = {'last_update': None,
'docker_version': None,
'docker_go_version': None,
'arch': 'amd64',
'os': 'linux',
'kernel': None}
try:
data = json.loads(store.get_content(json_path))
except IOError:
pass
return toolkit.response(data)
@app.route(
'/v1/repositories/<path:repository>/tags/<tag>/json',
methods=['GET'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def get_repository_tag_json(namespace, repository, tag):
json_path = store.repository_tag_json_path(namespace, repository, tag)
data = {'last_update': None,
'docker_version': None,
'docker_go_version': None,
'arch': 'amd64',
'os': 'linux',
'kernel': None}
try:
data = json.loads(store.get_content(json_path))
except IOError:
# We ignore the error, we'll serve the default json declared above
pass
return toolkit.response(data)
def create_tag_json(user_agent):
props = {
'last_update': int(time.mktime(datetime.datetime.utcnow().timetuple()))
}
ua = dict(RE_USER_AGENT.findall(user_agent))
if 'docker' in ua:
props['docker_version'] = ua['docker']
if 'go' in ua:
props['docker_go_version'] = ua['go']
for k in ['arch', 'kernel', 'os']:
if k in ua:
props[k] = ua[k].lower()
return json.dumps(props)
@app.route('/v1/repositories/<path:repository>/tags/<tag>',
methods=['PUT'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def put_tag(namespace, repository, tag):
logger.debug("[put_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
data = None
try:
data = json.loads(flask.request.data)
except json.JSONDecodeError:
pass
if not data or not isinstance(data, basestring):
return toolkit.api_error('Invalid data')
if not store.exists(store.image_json_path(data)):
return toolkit.api_error('Image not found', 404)
store.put_content(store.tag_path(namespace, repository, tag), data)
sender = flask.current_app._get_current_object()
signals.tag_created.send(sender, namespace=namespace,
repository=repository, tag=tag, value=data)
ua = flask.request.headers.get('user-agent', '')
data = create_tag_json(user_agent=ua)
json_path = store.repository_tag_json_path(namespace, repository, tag)
store.put_content(json_path, data)
if tag == "latest":
json_path = store.repository_json_path(namespace, repository)
store.put_content(json_path, data)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/tags/<tag>',
methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_tag(namespace, repository, tag):
logger.debug("[delete_tag] namespace={0}; repository={1}; tag={2}".format(
namespace, repository, tag))
try:
store.remove(store.tag_path(namespace, repository, tag))
sender = flask.current_app._get_current_object()
signals.tag_deleted.send(sender, namespace=namespace,
repository=repository, tag=tag)
except OSError:
return toolkit.api_error('Tag not found', 404)
return toolkit.response()
@app.route('/v1/repositories/<path:repository>/tags',
methods=['DELETE'])
@toolkit.parse_repository_name
@toolkit.requires_auth
def delete_repository(namespace, repository):
logger.debug("[delete_repository] namespace={0}; repository={1}".format(
namespace, repository))
try:
store.remove(store.tag_path(namespace, repository))
except OSError:
return toolkit.api_error('Repository not found', 404)
return toolkit.response()
| true | true |
1c31ca05aefdd7ae97611e51466681ff81e8955e | 3,133 | py | Python | project/models/project.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | project/models/project.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | project/models/project.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Project models
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import operator
from threading import Lock
# Third-party modules
from django.db import models
import cachetools
# NOC modules
from noc.core.model.base import NOCModel
from noc.core.model.decorator import on_delete_check
from noc.core.model.fields import DocumentReferenceField
from noc.main.models.glyph import Glyph
from noc.main.models.remotesystem import RemoteSystem
from noc.core.bi.decorator import bi_sync
from noc.core.topology.types import ShapeOverlayPosition, ShapeOverlayForm
id_lock = Lock()
@bi_sync
@on_delete_check(
check=[
("crm.Subscriber", "project"),
("crm.Supplier", "project"),
("dns.DNSZone", "project"),
("inv.Interface", "project"),
("inv.SubInterface", "project"),
("ip.Address", "project"),
("ip.Prefix", "project"),
("ip.VRF", "project"),
("peer.AS", "project"),
("peer.ASSet", "project"),
("peer.Peer", "project"),
("phone.PhoneNumber", "project"),
("phone.PhoneRange", "project"),
("sa.ManagedObject", "project"),
("vc.VC", "project"),
("vc.VPN", "project"),
("vc.VLAN", "project"),
]
)
class Project(NOCModel):
"""
Projects are used to track investment projects expenses and profits
"""
class Meta(object):
verbose_name = "Project"
verbose_name_plural = "Projects"
app_label = "project"
db_table = "project_project"
code = models.CharField("Code", max_length=256, unique=True)
name = models.CharField("Name", max_length=256)
description = models.TextField("Description", null=True, blank=True)
shape_overlay_glyph = DocumentReferenceField(Glyph, null=True, blank=True)
shape_overlay_position = models.CharField(
"S.O. Position",
max_length=2,
choices=[(x.value, x.value) for x in ShapeOverlayPosition],
null=True,
blank=True,
)
shape_overlay_form = models.CharField(
"S.O. Form",
max_length=1,
choices=[(x.value, x.value) for x in ShapeOverlayForm],
null=True,
blank=True,
)
# Integration with external NRI systems
# Reference to remote system object has been imported from
remote_system = DocumentReferenceField(RemoteSystem, null=True, blank=True)
# Object id in remote system
remote_id = models.CharField(max_length=64, null=True, blank=True)
# Object id in BI
bi_id = models.BigIntegerField(unique=True)
_id_cache = cachetools.TTLCache(100, ttl=60)
def __str__(self):
return self.code
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda x: id_lock)
def get_by_id(cls, id):
p = Project.objects.filter(id=id)[:1]
if p:
return p[0]
return None
| 31.646465 | 86 | 0.604532 |
import operator
from threading import Lock
from django.db import models
import cachetools
from noc.core.model.base import NOCModel
from noc.core.model.decorator import on_delete_check
from noc.core.model.fields import DocumentReferenceField
from noc.main.models.glyph import Glyph
from noc.main.models.remotesystem import RemoteSystem
from noc.core.bi.decorator import bi_sync
from noc.core.topology.types import ShapeOverlayPosition, ShapeOverlayForm
id_lock = Lock()
@bi_sync
@on_delete_check(
check=[
("crm.Subscriber", "project"),
("crm.Supplier", "project"),
("dns.DNSZone", "project"),
("inv.Interface", "project"),
("inv.SubInterface", "project"),
("ip.Address", "project"),
("ip.Prefix", "project"),
("ip.VRF", "project"),
("peer.AS", "project"),
("peer.ASSet", "project"),
("peer.Peer", "project"),
("phone.PhoneNumber", "project"),
("phone.PhoneRange", "project"),
("sa.ManagedObject", "project"),
("vc.VC", "project"),
("vc.VPN", "project"),
("vc.VLAN", "project"),
]
)
class Project(NOCModel):
class Meta(object):
verbose_name = "Project"
verbose_name_plural = "Projects"
app_label = "project"
db_table = "project_project"
code = models.CharField("Code", max_length=256, unique=True)
name = models.CharField("Name", max_length=256)
description = models.TextField("Description", null=True, blank=True)
shape_overlay_glyph = DocumentReferenceField(Glyph, null=True, blank=True)
shape_overlay_position = models.CharField(
"S.O. Position",
max_length=2,
choices=[(x.value, x.value) for x in ShapeOverlayPosition],
null=True,
blank=True,
)
shape_overlay_form = models.CharField(
"S.O. Form",
max_length=1,
choices=[(x.value, x.value) for x in ShapeOverlayForm],
null=True,
blank=True,
)
remote_system = DocumentReferenceField(RemoteSystem, null=True, blank=True)
remote_id = models.CharField(max_length=64, null=True, blank=True)
bi_id = models.BigIntegerField(unique=True)
_id_cache = cachetools.TTLCache(100, ttl=60)
def __str__(self):
return self.code
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda x: id_lock)
def get_by_id(cls, id):
p = Project.objects.filter(id=id)[:1]
if p:
return p[0]
return None
| true | true |
1c31ca10d2ab9cbc594ee3845c8324feabaa2bfe | 28,237 | py | Python | manim/scene/scene.py | mrdon/manim | d023c76cdef915226bb9a1e41c20168959a02881 | [
"MIT"
] | null | null | null | manim/scene/scene.py | mrdon/manim | d023c76cdef915226bb9a1e41c20168959a02881 | [
"MIT"
] | null | null | null | manim/scene/scene.py | mrdon/manim | d023c76cdef915226bb9a1e41c20168959a02881 | [
"MIT"
] | null | null | null | """Basic canvas for animations."""
__all__ = ["Scene"]
import inspect
import random
import warnings
import platform
import copy
from tqdm import tqdm as ProgressDisplay
import numpy as np
from .. import config, logger
from ..animation.animation import Animation, Wait
from ..animation.transform import MoveToTarget, ApplyMethod
from ..camera.camera import Camera
from ..constants import *
from ..container import Container
from ..mobject.mobject import Mobject
from ..scene.scene_file_writer import SceneFileWriter
from ..utils.iterables import list_update, list_difference_update
from ..utils.hashing import get_hash_from_play_call, get_hash_from_wait_call
from ..utils.family import extract_mobject_family_members
from ..renderer.cairo_renderer import CairoRenderer
from ..utils.exceptions import EndSceneEarlyException
class Scene(Container):
"""A Scene is the canvas of your animation.
All of your own named Scenes will be subclasses of Scene, or other named
scenes.
Examples
--------
Override the construct() method to tell Manim what should go on in the
Scene.
.. code-block:: python
class MyScene(Scene):
def construct(self):
self.play(
Write(Text("Hello World!"))
)
Some important variables to note are:
camera: The camera object to be used for the scene.
file_writer : The object that writes the animations in the scene to a video file.
mobjects : The list of mobjects present in the scene.
foreground_mobjects : List of mobjects explicitly in the foreground.
random_seed: The seed with which all random operations are done.
"""
CONFIG = {
"camera_class": Camera,
"skip_animations": False,
"always_update_mobjects": False,
"random_seed": 0,
}
def __init__(self, renderer=None, **kwargs):
Container.__init__(self, **kwargs)
if renderer is None:
self.renderer = CairoRenderer(camera_class=self.camera_class)
else:
self.renderer = renderer
self.renderer.init(self)
self.mobjects = []
# TODO, remove need for foreground mobjects
self.foreground_mobjects = []
if self.random_seed is not None:
random.seed(self.random_seed)
np.random.seed(self.random_seed)
self.setup()
def render(self):
"""
Render this Scene.
"""
self.original_skipping_status = config["skip_animations"]
try:
self.construct()
except EndSceneEarlyException:
pass
self.tear_down()
# We have to reset these settings in case of multiple renders.
config["skip_animations"] = self.original_skipping_status
self.renderer.finish(self)
logger.info(
f"Rendered {str(self)}\nPlayed {self.renderer.num_plays} animations"
)
def setup(self):
"""
This is meant to be implemented by any scenes which
are comonly subclassed, and have some common setup
involved before the construct method is called.
"""
pass
def tear_down(self):
"""
This is meant to be implemented by any scenes which
are comonly subclassed, and have some common method
to be invoked before the scene ends.
"""
pass
def construct(self):
"""
The primary method for constructing (i.e adding content to)
the Scene.
"""
pass # To be implemented in subclasses
def __str__(self):
return self.__class__.__name__
def set_variables_as_attrs(self, *objects, **newly_named_objects):
"""
This method is slightly hacky, making it a little easier
for certain methods (typically subroutines of construct)
to share local variables.
"""
caller_locals = inspect.currentframe().f_back.f_locals
for key, value in list(caller_locals.items()):
for o in objects:
if value is o:
setattr(self, key, value)
for key, value in list(newly_named_objects.items()):
setattr(self, key, value)
return self
def get_attrs(self, *keys):
"""
Gets attributes of a scene given the attribute's identifier/name.
Parameters
----------
*keys : str
Name(s) of the argument(s) to return the attribute of.
Returns
-------
list
List of attributes of the passed identifiers.
"""
return [getattr(self, key) for key in keys]
def update_mobjects(self, dt):
"""
Begins updating all mobjects in the Scene.
Parameters
----------
dt: int or float
Change in time between updates. Defaults (mostly) to 1/frames_per_second
"""
for mobject in self.mobjects:
mobject.update(dt)
def should_update_mobjects(self):
"""
Returns True if any mobject in Scene is being updated
or if the scene has always_update_mobjects set to true.
Returns
-------
bool
"""
return self.always_update_mobjects or any(
[mob.has_time_based_updater() for mob in self.get_mobject_family_members()]
)
def get_top_level_mobjects(self):
"""
Returns all mobjects which are not submobjects.
Returns
-------
list
List of top level mobjects.
"""
# Return only those which are not in the family
# of another mobject from the scene
mobjects = self.get_mobjects()
families = [m.get_family() for m in mobjects]
def is_top_level(mobject):
num_families = sum([(mobject in family) for family in families])
return num_families == 1
return list(filter(is_top_level, mobjects))
def get_mobject_family_members(self):
"""
Returns list of family-members of all mobjects in scene.
If a Circle() and a VGroup(Rectangle(),Triangle()) were added,
it returns not only the Circle(), Rectangle() and Triangle(), but
also the VGroup() object.
Returns
-------
list
List of mobject family members.
"""
return extract_mobject_family_members(
self.mobjects, use_z_index=self.renderer.camera.use_z_index
)
def add(self, *mobjects):
"""
Mobjects will be displayed, from background to
foreground in the order with which they are added.
Parameters
---------
*mobjects : Mobject
Mobjects to add.
Returns
-------
Scene
The same scene after adding the Mobjects in.
"""
mobjects = [*mobjects, *self.foreground_mobjects]
self.restructure_mobjects(to_remove=mobjects)
self.mobjects += mobjects
return self
def add_mobjects_among(self, values):
"""
This is meant mostly for quick prototyping,
e.g. to add all mobjects defined up to a point,
call self.add_mobjects_among(locals().values())
"""
self.add(*filter(lambda m: isinstance(m, Mobject), values))
return self
def add_mobjects_from_animations(self, animations):
curr_mobjects = self.get_mobject_family_members()
for animation in animations:
# Anything animated that's not already in the
# scene gets added to the scene
mob = animation.mobject
if mob is not None and mob not in curr_mobjects:
self.add(mob)
curr_mobjects += mob.get_family()
def remove(self, *mobjects):
"""
Removes mobjects in the passed list of mobjects
from the scene and the foreground, by removing them
from "mobjects" and "foreground_mobjects"
Parameters
----------
*mobjects : Mobject
The mobjects to remove.
"""
for list_name in "mobjects", "foreground_mobjects":
self.restructure_mobjects(mobjects, list_name, False)
return self
def restructure_mobjects(
self, to_remove, mobject_list_name="mobjects", extract_families=True
):
"""
tl:wr
If your scene has a Group(), and you removed a mobject from the Group,
this dissolves the group and puts the rest of the mobjects directly
in self.mobjects or self.foreground_mobjects.
In cases where the scene contains a group, e.g. Group(m1, m2, m3), but one
of its submobjects is removed, e.g. scene.remove(m1), the list of mobjects
will be edited to contain other submobjects, but not m1, e.g. it will now
insert m2 and m3 to where the group once was.
Parameters
----------
to_remove : Mobject
The Mobject to remove.
mobject_list_name : str, optional
The list of mobjects ("mobjects", "foreground_mobjects" etc) to remove from.
extract_families : bool, optional
Whether the mobject's families should be recursively extracted.
Returns
-------
Scene
The Scene mobject with restructured Mobjects.
"""
if extract_families:
to_remove = extract_mobject_family_members(
to_remove, use_z_index=self.renderer.camera.use_z_index
)
_list = getattr(self, mobject_list_name)
new_list = self.get_restructured_mobject_list(_list, to_remove)
setattr(self, mobject_list_name, new_list)
return self
def get_restructured_mobject_list(self, mobjects, to_remove):
"""
Given a list of mobjects and a list of mobjects to be removed, this
filters out the removable mobjects from the list of mobjects.
Parameters
----------
mobjects : list
The Mobjects to check.
to_remove : list
The list of mobjects to remove.
Returns
-------
list
The list of mobjects with the mobjects to remove removed.
"""
new_mobjects = []
def add_safe_mobjects_from_list(list_to_examine, set_to_remove):
for mob in list_to_examine:
if mob in set_to_remove:
continue
intersect = set_to_remove.intersection(mob.get_family())
if intersect:
add_safe_mobjects_from_list(mob.submobjects, intersect)
else:
new_mobjects.append(mob)
add_safe_mobjects_from_list(mobjects, set(to_remove))
return new_mobjects
# TODO, remove this, and calls to this
def add_foreground_mobjects(self, *mobjects):
"""
Adds mobjects to the foreground, and internally to the list
foreground_mobjects, and mobjects.
Parameters
----------
*mobjects : Mobject
The Mobjects to add to the foreground.
Returns
------
Scene
The Scene, with the foreground mobjects added.
"""
self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)
self.add(*mobjects)
return self
def add_foreground_mobject(self, mobject):
"""
Adds a single mobject to the foreground, and internally to the list
foreground_mobjects, and mobjects.
Parameters
----------
mobject : Mobject
The Mobject to add to the foreground.
Returns
------
Scene
The Scene, with the foreground mobject added.
"""
return self.add_foreground_mobjects(mobject)
def remove_foreground_mobjects(self, *to_remove):
"""
Removes mobjects from the foreground, and internally from the list
foreground_mobjects.
Parameters
----------
*to_remove : Mobject
The mobject(s) to remove from the foreground.
Returns
------
Scene
The Scene, with the foreground mobjects removed.
"""
self.restructure_mobjects(to_remove, "foreground_mobjects")
return self
def remove_foreground_mobject(self, mobject):
"""
Removes a single mobject from the foreground, and internally from the list
foreground_mobjects.
Parameters
----------
mobject : Mobject
The mobject to remove from the foreground.
Returns
------
Scene
The Scene, with the foreground mobject removed.
"""
return self.remove_foreground_mobjects(mobject)
def bring_to_front(self, *mobjects):
"""
Adds the passed mobjects to the scene again,
pushing them to he front of the scene.
Parameters
----------
*mobjects : Mobject
The mobject(s) to bring to the front of the scene.
Returns
------
Scene
The Scene, with the mobjects brought to the front
of the scene.
"""
self.add(*mobjects)
return self
def bring_to_back(self, *mobjects):
"""
Removes the mobject from the scene and
adds them to the back of the scene.
Parameters
----------
*mobjects : Mobject
The mobject(s) to push to the back of the scene.
Returns
------
Scene
The Scene, with the mobjects pushed to the back
of the scene.
"""
self.remove(*mobjects)
self.mobjects = list(mobjects) + self.mobjects
return self
def clear(self):
"""
Removes all mobjects present in self.mobjects
and self.foreground_mobjects from the scene.
Returns
------
Scene
The Scene, with all of its mobjects in
self.mobjects and self.foreground_mobjects
removed.
"""
self.mobjects = []
self.foreground_mobjects = []
return self
def get_mobjects(self):
"""
Returns all the mobjects in self.mobjects
Returns
------
list
The list of self.mobjects .
"""
return list(self.mobjects)
def get_mobject_copies(self):
"""
Returns a copy of all mobjects present in
self.mobjects .
Returns
------
list
A list of the copies of all the mobjects
in self.mobjects
"""
return [m.copy() for m in self.mobjects]
def get_moving_mobjects(self, *animations):
"""
Gets all moving mobjects in the passed animation(s).
Parameters
----------
*animations : Animation
The animations to check for moving mobjects.
Returns
------
list
The list of mobjects that could be moving in
the Animation(s)
"""
# Go through mobjects from start to end, and
# as soon as there's one that needs updating of
# some kind per frame, return the list from that
# point forward.
animation_mobjects = [anim.mobject for anim in animations]
mobjects = self.get_mobject_family_members()
for i, mob in enumerate(mobjects):
update_possibilities = [
mob in animation_mobjects,
len(mob.get_family_updaters()) > 0,
mob in self.foreground_mobjects,
]
if any(update_possibilities):
return mobjects[i:]
return []
def get_moving_and_stationary_mobjects(self, animations):
moving_mobjects = self.get_moving_mobjects(*animations)
all_mobjects = list_update(self.mobjects, self.foreground_mobjects)
all_mobject_families = extract_mobject_family_members(
all_mobjects,
use_z_index=self.renderer.camera.use_z_index,
only_those_with_points=True,
)
moving_mobjects = self.get_moving_mobjects(*animations)
all_moving_mobject_families = extract_mobject_family_members(
moving_mobjects,
use_z_index=self.renderer.camera.use_z_index,
)
stationary_mobjects = list_difference_update(
all_mobject_families, all_moving_mobject_families
)
return all_moving_mobject_families, stationary_mobjects
def compile_play_args_to_animation_list(self, *args, **kwargs):
"""
Each arg can either be an animation, or a mobject method
followed by that methods arguments (and potentially follow
by a dict of kwargs for that method).
This animation list is built by going through the args list,
and each animation is simply added, but when a mobject method
s hit, a MoveToTarget animation is built using the args that
follow up until either another animation is hit, another method
is hit, or the args list runs out.
Parameters
----------
*args : Animation or method of a mobject, which is followed by that method's arguments
**kwargs : any named arguments like run_time or lag_ratio.
Returns
-------
list : list of animations with the parameters applied to them.
"""
animations = []
state = {
"curr_method": None,
"last_method": None,
"method_args": [],
}
def compile_method(state):
if state["curr_method"] is None:
return
mobject = state["curr_method"].__self__
if state["last_method"] and state["last_method"].__self__ is mobject:
animations.pop()
# method should already have target then.
else:
mobject.generate_target()
#
if len(state["method_args"]) > 0 and isinstance(
state["method_args"][-1], dict
):
method_kwargs = state["method_args"].pop()
else:
method_kwargs = {}
state["curr_method"].__func__(
mobject.target, *state["method_args"], **method_kwargs
)
animations.append(MoveToTarget(mobject))
state["last_method"] = state["curr_method"]
state["curr_method"] = None
state["method_args"] = []
for arg in args:
if isinstance(arg, Animation):
compile_method(state)
animations.append(arg)
elif inspect.ismethod(arg):
compile_method(state)
state["curr_method"] = arg
elif state["curr_method"] is not None:
state["method_args"].append(arg)
elif isinstance(arg, Mobject):
raise ValueError(
"""
I think you may have invoked a method
you meant to pass in as a Scene.play argument
"""
)
else:
raise ValueError("Invalid play arguments")
compile_method(state)
for animation in animations:
# This is where kwargs to play like run_time and rate_func
# get applied to all animations
animation.update_config(**kwargs)
return animations
def get_time_progression(
self, run_time, n_iterations=None, override_skip_animations=False
):
"""
You will hardly use this when making your own animations.
This method is for Manim's internal use.
Returns a CommandLine ProgressBar whose ``fill_time``
is dependent on the ``run_time`` of an animation,
the iterations to perform in that animation
and a bool saying whether or not to consider
the skipped animations.
Parameters
----------
run_time : float
The ``run_time`` of the animation.
n_iterations : int, optional
The number of iterations in the animation.
override_skip_animations : bool, optional
Whether or not to show skipped animations in the progress bar.
Returns
-------
ProgressDisplay
The CommandLine Progress Bar.
"""
if config["skip_animations"] and not override_skip_animations:
times = [run_time]
else:
step = 1 / self.renderer.camera.frame_rate
times = np.arange(0, run_time, step)
time_progression = ProgressDisplay(
times,
total=n_iterations,
leave=config["leave_progress_bars"],
ascii=True if platform.system() == "Windows" else None,
disable=not config["progress_bar"],
)
return time_progression
def get_animation_time_progression(self, animations):
"""
You will hardly use this when making your own animations.
This method is for Manim's internal use.
Uses :func:`~.get_time_progression` to obtain a
CommandLine ProgressBar whose ``fill_time`` is
dependent on the qualities of the passed Animation,
Parameters
----------
animations : List[:class:`~.Animation`, ...]
The list of animations to get
the time progression for.
Returns
-------
ProgressDisplay
The CommandLine Progress Bar.
"""
run_time = self.get_run_time(animations)
time_progression = self.get_time_progression(run_time)
time_progression.set_description(
"".join(
[
"Animation {}: ".format(self.renderer.num_plays),
str(animations[0]),
(", etc." if len(animations) > 1 else ""),
]
)
)
return time_progression
def get_wait_time_progression(self, duration, stop_condition):
"""
This method is used internally to obtain the CommandLine
Progressbar for when self.wait() is called in a scene.
Parameters
----------
duration : int or float
duration of wait time
stop_condition : function
The function which determines whether to continue waiting.
Returns
-------
ProgressBar
The CommandLine ProgressBar of the wait time
"""
if stop_condition is not None:
time_progression = self.get_time_progression(
duration,
n_iterations=-1, # So it doesn't show % progress
override_skip_animations=True,
)
time_progression.set_description(
"Waiting for {}".format(stop_condition.__name__)
)
else:
time_progression = self.get_time_progression(duration)
time_progression.set_description(
"Waiting {}".format(self.renderer.num_plays)
)
return time_progression
def get_run_time(self, animations):
"""
Gets the total run time for a list of animations.
Parameters
----------
animations : List[:class:`Animation`, ...]
A list of the animations whose total
``run_time`` is to be calculated.
Returns
-------
float
The total ``run_time`` of all of the animations in the list.
"""
return np.max([animation.run_time for animation in animations])
def play(self, *args, **kwargs):
self.renderer.play(self, *args, **kwargs)
def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):
self.play(Wait(duration=duration, stop_condition=stop_condition))
def wait_until(self, stop_condition, max_time=60):
"""
Like a wrapper for wait().
You pass a function that determines whether to continue waiting,
and a max wait time if that is never fulfilled.
Parameters
----------
stop_condition : function
The function whose boolean return value determines whether to continue waiting
max_time : int or float, optional
The maximum wait time in seconds, if the stop_condition is never fulfilled.
"""
self.wait(max_time, stop_condition=stop_condition)
def play_internal(self, *args, **kwargs):
"""
This method is used to prep the animations for rendering,
apply the arguments and parameters required to them,
render them, and write them to the video file.
Parameters
----------
*args : Animation or mobject with mobject method and params
**kwargs : named parameters affecting what was passed in *args e.g
run_time, lag_ratio etc.
"""
if len(args) == 0:
warnings.warn("Called Scene.play with no animations")
return
animations = self.compile_play_args_to_animation_list(*args, **kwargs)
if (
len(animations) == 1
and isinstance(animations[0], Wait)
and not self.should_update_mobjects()
):
self.add_static_frames(animations[0].duration)
return
moving_mobjects = None
static_mobjects = None
duration = None
stop_condition = None
time_progression = None
if len(animations) == 1 and isinstance(animations[0], Wait):
# TODO, be smart about setting a static image
# the same way Scene.play does
duration = animations[0].duration
stop_condition = animations[0].stop_condition
self.static_image = None
time_progression = self.get_wait_time_progression(duration, stop_condition)
else:
# Paint all non-moving objects onto the screen, so they don't
# have to be rendered every frame
(
moving_mobjects,
stationary_mobjects,
) = self.get_moving_and_stationary_mobjects(animations)
self.renderer.update_frame(self, mobjects=stationary_mobjects)
self.static_image = self.renderer.get_frame()
time_progression = self.get_animation_time_progression(animations)
for animation in animations:
animation.begin()
last_t = 0
for t in time_progression:
dt = t - last_t
last_t = t
for animation in animations:
animation.update_mobjects(dt)
alpha = t / animation.run_time
animation.interpolate(alpha)
self.update_mobjects(dt)
self.renderer.update_frame(self, moving_mobjects, self.static_image)
self.renderer.add_frame(self.renderer.get_frame())
if stop_condition is not None and stop_condition():
time_progression.close()
break
for animation in animations:
animation.finish()
animation.clean_up_from_scene(self)
def add_static_frames(self, duration):
self.renderer.update_frame(self)
dt = 1 / self.renderer.camera.frame_rate
self.renderer.add_frame(
self.renderer.get_frame(),
num_frames=int(duration / dt),
)
def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):
"""
This method is used to add a sound to the animation.
Parameters
----------
sound_file : str
The path to the sound file.
time_offset : int,float, optional
The offset in the sound file after which
the sound can be played.
gain :
"""
if config["skip_animations"]:
return
time = self.time + time_offset
self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)
| 32.234018 | 94 | 0.5882 |
__all__ = ["Scene"]
import inspect
import random
import warnings
import platform
import copy
from tqdm import tqdm as ProgressDisplay
import numpy as np
from .. import config, logger
from ..animation.animation import Animation, Wait
from ..animation.transform import MoveToTarget, ApplyMethod
from ..camera.camera import Camera
from ..constants import *
from ..container import Container
from ..mobject.mobject import Mobject
from ..scene.scene_file_writer import SceneFileWriter
from ..utils.iterables import list_update, list_difference_update
from ..utils.hashing import get_hash_from_play_call, get_hash_from_wait_call
from ..utils.family import extract_mobject_family_members
from ..renderer.cairo_renderer import CairoRenderer
from ..utils.exceptions import EndSceneEarlyException
class Scene(Container):
CONFIG = {
"camera_class": Camera,
"skip_animations": False,
"always_update_mobjects": False,
"random_seed": 0,
}
def __init__(self, renderer=None, **kwargs):
Container.__init__(self, **kwargs)
if renderer is None:
self.renderer = CairoRenderer(camera_class=self.camera_class)
else:
self.renderer = renderer
self.renderer.init(self)
self.mobjects = []
self.foreground_mobjects = []
if self.random_seed is not None:
random.seed(self.random_seed)
np.random.seed(self.random_seed)
self.setup()
def render(self):
self.original_skipping_status = config["skip_animations"]
try:
self.construct()
except EndSceneEarlyException:
pass
self.tear_down()
config["skip_animations"] = self.original_skipping_status
self.renderer.finish(self)
logger.info(
f"Rendered {str(self)}\nPlayed {self.renderer.num_plays} animations"
)
def setup(self):
pass
def tear_down(self):
pass
def construct(self):
pass
def __str__(self):
return self.__class__.__name__
def set_variables_as_attrs(self, *objects, **newly_named_objects):
caller_locals = inspect.currentframe().f_back.f_locals
for key, value in list(caller_locals.items()):
for o in objects:
if value is o:
setattr(self, key, value)
for key, value in list(newly_named_objects.items()):
setattr(self, key, value)
return self
def get_attrs(self, *keys):
return [getattr(self, key) for key in keys]
def update_mobjects(self, dt):
for mobject in self.mobjects:
mobject.update(dt)
def should_update_mobjects(self):
return self.always_update_mobjects or any(
[mob.has_time_based_updater() for mob in self.get_mobject_family_members()]
)
def get_top_level_mobjects(self):
mobjects = self.get_mobjects()
families = [m.get_family() for m in mobjects]
def is_top_level(mobject):
num_families = sum([(mobject in family) for family in families])
return num_families == 1
return list(filter(is_top_level, mobjects))
def get_mobject_family_members(self):
return extract_mobject_family_members(
self.mobjects, use_z_index=self.renderer.camera.use_z_index
)
def add(self, *mobjects):
mobjects = [*mobjects, *self.foreground_mobjects]
self.restructure_mobjects(to_remove=mobjects)
self.mobjects += mobjects
return self
def add_mobjects_among(self, values):
self.add(*filter(lambda m: isinstance(m, Mobject), values))
return self
def add_mobjects_from_animations(self, animations):
curr_mobjects = self.get_mobject_family_members()
for animation in animations:
# scene gets added to the scene
mob = animation.mobject
if mob is not None and mob not in curr_mobjects:
self.add(mob)
curr_mobjects += mob.get_family()
def remove(self, *mobjects):
for list_name in "mobjects", "foreground_mobjects":
self.restructure_mobjects(mobjects, list_name, False)
return self
def restructure_mobjects(
self, to_remove, mobject_list_name="mobjects", extract_families=True
):
if extract_families:
to_remove = extract_mobject_family_members(
to_remove, use_z_index=self.renderer.camera.use_z_index
)
_list = getattr(self, mobject_list_name)
new_list = self.get_restructured_mobject_list(_list, to_remove)
setattr(self, mobject_list_name, new_list)
return self
def get_restructured_mobject_list(self, mobjects, to_remove):
new_mobjects = []
def add_safe_mobjects_from_list(list_to_examine, set_to_remove):
for mob in list_to_examine:
if mob in set_to_remove:
continue
intersect = set_to_remove.intersection(mob.get_family())
if intersect:
add_safe_mobjects_from_list(mob.submobjects, intersect)
else:
new_mobjects.append(mob)
add_safe_mobjects_from_list(mobjects, set(to_remove))
return new_mobjects
# TODO, remove this, and calls to this
def add_foreground_mobjects(self, *mobjects):
self.foreground_mobjects = list_update(self.foreground_mobjects, mobjects)
self.add(*mobjects)
return self
def add_foreground_mobject(self, mobject):
return self.add_foreground_mobjects(mobject)
def remove_foreground_mobjects(self, *to_remove):
self.restructure_mobjects(to_remove, "foreground_mobjects")
return self
def remove_foreground_mobject(self, mobject):
return self.remove_foreground_mobjects(mobject)
def bring_to_front(self, *mobjects):
self.add(*mobjects)
return self
def bring_to_back(self, *mobjects):
self.remove(*mobjects)
self.mobjects = list(mobjects) + self.mobjects
return self
def clear(self):
self.mobjects = []
self.foreground_mobjects = []
return self
def get_mobjects(self):
return list(self.mobjects)
def get_mobject_copies(self):
return [m.copy() for m in self.mobjects]
def get_moving_mobjects(self, *animations):
# Go through mobjects from start to end, and
# as soon as there's one that needs updating of
animation_mobjects = [anim.mobject for anim in animations]
mobjects = self.get_mobject_family_members()
for i, mob in enumerate(mobjects):
update_possibilities = [
mob in animation_mobjects,
len(mob.get_family_updaters()) > 0,
mob in self.foreground_mobjects,
]
if any(update_possibilities):
return mobjects[i:]
return []
def get_moving_and_stationary_mobjects(self, animations):
moving_mobjects = self.get_moving_mobjects(*animations)
all_mobjects = list_update(self.mobjects, self.foreground_mobjects)
all_mobject_families = extract_mobject_family_members(
all_mobjects,
use_z_index=self.renderer.camera.use_z_index,
only_those_with_points=True,
)
moving_mobjects = self.get_moving_mobjects(*animations)
all_moving_mobject_families = extract_mobject_family_members(
moving_mobjects,
use_z_index=self.renderer.camera.use_z_index,
)
stationary_mobjects = list_difference_update(
all_mobject_families, all_moving_mobject_families
)
return all_moving_mobject_families, stationary_mobjects
def compile_play_args_to_animation_list(self, *args, **kwargs):
animations = []
state = {
"curr_method": None,
"last_method": None,
"method_args": [],
}
def compile_method(state):
if state["curr_method"] is None:
return
mobject = state["curr_method"].__self__
if state["last_method"] and state["last_method"].__self__ is mobject:
animations.pop()
else:
mobject.generate_target()
if len(state["method_args"]) > 0 and isinstance(
state["method_args"][-1], dict
):
method_kwargs = state["method_args"].pop()
else:
method_kwargs = {}
state["curr_method"].__func__(
mobject.target, *state["method_args"], **method_kwargs
)
animations.append(MoveToTarget(mobject))
state["last_method"] = state["curr_method"]
state["curr_method"] = None
state["method_args"] = []
for arg in args:
if isinstance(arg, Animation):
compile_method(state)
animations.append(arg)
elif inspect.ismethod(arg):
compile_method(state)
state["curr_method"] = arg
elif state["curr_method"] is not None:
state["method_args"].append(arg)
elif isinstance(arg, Mobject):
raise ValueError(
"""
I think you may have invoked a method
you meant to pass in as a Scene.play argument
"""
)
else:
raise ValueError("Invalid play arguments")
compile_method(state)
for animation in animations:
animation.update_config(**kwargs)
return animations
def get_time_progression(
self, run_time, n_iterations=None, override_skip_animations=False
):
if config["skip_animations"] and not override_skip_animations:
times = [run_time]
else:
step = 1 / self.renderer.camera.frame_rate
times = np.arange(0, run_time, step)
time_progression = ProgressDisplay(
times,
total=n_iterations,
leave=config["leave_progress_bars"],
ascii=True if platform.system() == "Windows" else None,
disable=not config["progress_bar"],
)
return time_progression
def get_animation_time_progression(self, animations):
run_time = self.get_run_time(animations)
time_progression = self.get_time_progression(run_time)
time_progression.set_description(
"".join(
[
"Animation {}: ".format(self.renderer.num_plays),
str(animations[0]),
(", etc." if len(animations) > 1 else ""),
]
)
)
return time_progression
def get_wait_time_progression(self, duration, stop_condition):
if stop_condition is not None:
time_progression = self.get_time_progression(
duration,
n_iterations=-1,
override_skip_animations=True,
)
time_progression.set_description(
"Waiting for {}".format(stop_condition.__name__)
)
else:
time_progression = self.get_time_progression(duration)
time_progression.set_description(
"Waiting {}".format(self.renderer.num_plays)
)
return time_progression
def get_run_time(self, animations):
return np.max([animation.run_time for animation in animations])
def play(self, *args, **kwargs):
self.renderer.play(self, *args, **kwargs)
def wait(self, duration=DEFAULT_WAIT_TIME, stop_condition=None):
self.play(Wait(duration=duration, stop_condition=stop_condition))
def wait_until(self, stop_condition, max_time=60):
self.wait(max_time, stop_condition=stop_condition)
def play_internal(self, *args, **kwargs):
if len(args) == 0:
warnings.warn("Called Scene.play with no animations")
return
animations = self.compile_play_args_to_animation_list(*args, **kwargs)
if (
len(animations) == 1
and isinstance(animations[0], Wait)
and not self.should_update_mobjects()
):
self.add_static_frames(animations[0].duration)
return
moving_mobjects = None
static_mobjects = None
duration = None
stop_condition = None
time_progression = None
if len(animations) == 1 and isinstance(animations[0], Wait):
# TODO, be smart about setting a static image
# the same way Scene.play does
duration = animations[0].duration
stop_condition = animations[0].stop_condition
self.static_image = None
time_progression = self.get_wait_time_progression(duration, stop_condition)
else:
# Paint all non-moving objects onto the screen, so they don't
(
moving_mobjects,
stationary_mobjects,
) = self.get_moving_and_stationary_mobjects(animations)
self.renderer.update_frame(self, mobjects=stationary_mobjects)
self.static_image = self.renderer.get_frame()
time_progression = self.get_animation_time_progression(animations)
for animation in animations:
animation.begin()
last_t = 0
for t in time_progression:
dt = t - last_t
last_t = t
for animation in animations:
animation.update_mobjects(dt)
alpha = t / animation.run_time
animation.interpolate(alpha)
self.update_mobjects(dt)
self.renderer.update_frame(self, moving_mobjects, self.static_image)
self.renderer.add_frame(self.renderer.get_frame())
if stop_condition is not None and stop_condition():
time_progression.close()
break
for animation in animations:
animation.finish()
animation.clean_up_from_scene(self)
def add_static_frames(self, duration):
self.renderer.update_frame(self)
dt = 1 / self.renderer.camera.frame_rate
self.renderer.add_frame(
self.renderer.get_frame(),
num_frames=int(duration / dt),
)
def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):
if config["skip_animations"]:
return
time = self.time + time_offset
self.renderer.file_writer.add_sound(sound_file, time, gain, **kwargs)
| true | true |
1c31ca64a96685a842ffc2ac7f588afc6a361e73 | 1,303 | py | Python | app/core/tests/test_admin.py | phelixdusengimana/django-backend-api-development | ce3b333b383a7d9f5aefb7335cad6be24b5dcf85 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | phelixdusengimana/django-backend-api-development | ce3b333b383a7d9f5aefb7335cad6be24b5dcf85 | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | phelixdusengimana/django-backend-api-development | ce3b333b383a7d9f5aefb7335cad6be24b5dcf85 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="phelixdusengimana@gmail.com",
password="password@123"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@gmail.com",
password="test123",
name="Test user full name"
)
def test_users_listed(self):
"""Test that users are listed on page"""
url = reverse("admin:core_user_changelist")
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test user edit page works."""
url = reverse("admin:core_user_change", args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test create user page works."""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.575 | 68 | 0.638526 | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="phelixdusengimana@gmail.com",
password="password@123"
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@gmail.com",
password="test123",
name="Test user full name"
)
def test_users_listed(self):
url = reverse("admin:core_user_changelist")
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse("admin:core_user_change", args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| true | true |
1c31ca9c4fd078498dab4348fb8d043cf927f485 | 12,782 | py | Python | fast_knn_nmt/custom_fairseq/train/train.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | 22 | 2021-05-31T15:14:37.000Z | 2022-03-18T06:26:21.000Z | fast_knn_nmt/custom_fairseq/train/train.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | 3 | 2021-10-06T09:54:03.000Z | 2021-10-13T12:11:53.000Z | fast_knn_nmt/custom_fairseq/train/train.py | Crazy-Chick/fast-knn-nmt | 7336bbe0be1240e70d3c3ac71c4e7cfb4f4ea4ff | [
"Apache-2.0"
] | 4 | 2021-06-02T16:12:02.000Z | 2022-02-28T12:18:24.000Z | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import argparse
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
if args.pretrained_part:
if not os.path.exists(args.pretrained_part):
raise FileNotFoundError(f"pretrained part file {args.pretrained_part} does not exist.")
logger.info(
"use pretrained part ckpt at {}".format(args.pretrained_part)
)
state = checkpoint_utils.load_checkpoint_to_cpu(args.pretrained_part)
trainer.get_model().load_state_dict(
state["model"], strict=False, args=args,
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
parser.add_argument("--pretrained_part", type=str, default="",
help="load pretrained ckpt unstrictly. This is useful when you only want to initialize part"
"of model using paramerters from another different model")
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 34.176471 | 116 | 0.657174 |
import argparse
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
def main(args):
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
logger.info(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
if args.pretrained_part:
if not os.path.exists(args.pretrained_part):
raise FileNotFoundError(f"pretrained part file {args.pretrained_part} does not exist.")
logger.info(
"use pretrained part ckpt at {}".format(args.pretrained_part)
)
state = checkpoint_utils.load_checkpoint_to_cpu(args.pretrained_part)
trainer.get_model().load_state_dict(
state["model"], strict=False, args=args,
)
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr):
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
parser.add_argument("--pretrained_part", type=str, default="",
help="load pretrained ckpt unstrictly. This is useful when you only want to initialize part"
"of model using paramerters from another different model")
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| true | true |
1c31cb13529eef04215d2078b1a571c501de6798 | 17,719 | py | Python | corehq/apps/reports/standard/project_health.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/reports/standard/project_health.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | corehq/apps/reports/standard/project_health.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | import datetime
from collections import namedtuple
from itertools import chain
from django.db.models import Sum
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from dimagi.ext import jsonobject
from dimagi.utils.dates import add_months
from corehq.apps.data_analytics.models import MALTRow
from corehq.apps.domain.models import Domain
from corehq.apps.es.groups import GroupES
from corehq.apps.es.users import UserES
from corehq.apps.hqwebapp.decorators import use_nvd3
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.standard import ProjectReport
from corehq.apps.users.util import raw_username
def get_performance_threshold(domain_name):
return Domain.get_by_name(domain_name).internal.performance_threshold or 15
class UserActivityStub(namedtuple('UserStub', ['user_id', 'username', 'num_forms_submitted',
'is_performing', 'previous_stub', 'next_stub'])):
@property
def is_active(self):
return self.num_forms_submitted > 0
@property
def is_newly_performing(self):
return self.is_performing and (self.previous_stub is None or not self.previous_stub.is_performing)
@property
def delta_forms(self):
previous_forms = 0 if self.previous_stub is None else self.previous_stub.num_forms_submitted
return self.num_forms_submitted - previous_forms
@property
def num_forms_submitted_next_month(self):
return self.next_stub.num_forms_submitted if self.next_stub else 0
@property
def delta_forms_next_month(self):
return self.num_forms_submitted_next_month - self.num_forms_submitted
class MonthlyPerformanceSummary(jsonobject.JsonObject):
month = jsonobject.DateProperty()
domain = jsonobject.StringProperty()
performance_threshold = jsonobject.IntegerProperty()
active = jsonobject.IntegerProperty()
performing = jsonobject.IntegerProperty()
def __init__(self, domain, month, selected_users, active_not_deleted_users,
performance_threshold, previous_summary=None,
delta_high_performers=0, delta_low_performers=0):
self._previous_summary = previous_summary
self._next_summary = None
self._is_final = None
base_queryset = MALTRow.objects.filter(
domain_name=domain,
month=month,
user_type__in=['CommCareUser', 'CommCareUser-Deleted'],
user_id__in=active_not_deleted_users,
)
if selected_users:
base_queryset = base_queryset.filter(
user_id__in=selected_users,
)
self._user_stat_from_malt = (base_queryset
.values('user_id', 'username')
.annotate(total_num_forms=Sum('num_of_forms')))
num_performing_users = (self._user_stat_from_malt
.filter(total_num_forms__gte=performance_threshold)
.count())
num_active_users = self._user_stat_from_malt.count()
num_low_performing_user = num_active_users - num_performing_users
if self._previous_summary:
delta_high_performers = num_performing_users - self._previous_summary.number_of_performing_users
delta_low_performers = num_low_performing_user - self._previous_summary.number_of_low_performing_users
super(MonthlyPerformanceSummary, self).__init__(
month=month,
domain=domain,
performance_threshold=performance_threshold,
active=num_active_users,
total_users_by_month=0,
percent_active=0,
performing=num_performing_users,
delta_high_performers=delta_high_performers,
delta_low_performers=delta_low_performers,
)
def set_next_month_summary(self, next_month_summary):
self._next_summary = next_month_summary
def set_percent_active(self):
self.total_users_by_month = self.inactive + self.number_of_active_users
if self.total_users_by_month:
self.percent_active = float(self.number_of_active_users) / float(self.total_users_by_month)
else:
self.percent_active = 0
@property
def number_of_performing_users(self):
return self.performing
@property
def number_of_low_performing_users(self):
return self.active - self.performing
@property
def number_of_active_users(self):
return self.active
@property
@memoized
def inactive(self):
dropouts = self.get_dropouts()
return len(dropouts) if dropouts else 0
@property
def previous_month(self):
prev_year, prev_month = add_months(self.month.year, self.month.month, -1)
return datetime.datetime(prev_year, prev_month, 1)
@property
def delta_high_performing(self):
if self._previous_summary:
return self.number_of_performing_users - self._previous_summary.number_of_performing_users
else:
return self.number_of_performing_users
@property
def delta_high_performing_pct(self):
if (self.delta_high_performing and self._previous_summary and
self._previous_summary.number_of_performing_users):
return self.delta_high_performing / float(self._previous_summary.number_of_performing_users) * 100
@property
def delta_low_performing(self):
if self._previous_summary:
return self.number_of_low_performing_users - self._previous_summary.number_of_low_performing_users
else:
return self.number_of_low_performing_users
@property
def delta_low_performing_pct(self):
if self.delta_low_performing and self._previous_summary \
and self._previous_summary.number_of_low_performing_users:
return self.delta_low_performing / float(self._previous_summary.number_of_low_performing_users) * 100
@property
def delta_active(self):
return self.active - self._previous_summary.active if self._previous_summary else self.active
@property
def delta_active_pct(self):
if self.delta_active and self._previous_summary and self._previous_summary.active:
return self.delta_active / float(self._previous_summary.active) * 100
@property
def delta_inactive(self):
return self.inactive - self._previous_summary.inactive if self._previous_summary else self.inactive
@property
def delta_inactive_pct(self):
if self.delta_inactive and self._previous_summary:
if self._previous_summary.inactive == 0:
return self.delta_inactive * 100.
return self.delta_inactive / float(self._previous_summary.inactive) * 100
def _get_all_user_stubs(self):
return {
row['user_id']: UserActivityStub(
user_id=row['user_id'],
username=raw_username(row['username']),
num_forms_submitted=row['total_num_forms'],
is_performing=row['total_num_forms'] >= self.performance_threshold,
previous_stub=None,
next_stub=None,
) for row in self._user_stat_from_malt
}
def finalize(self):
"""
Before a summary is "finalized" certain fields can't be accessed.
"""
self._is_final = True
@memoized
def _get_all_user_stubs_with_extra_data(self):
if not self._is_final:
# intentionally fail-hard with developer-facing error
raise Exception("User stubs accessed before finalized. "
"Please call finalize() before calling this method.")
if self._previous_summary:
previous_stubs = self._previous_summary._get_all_user_stubs()
next_stubs = self._next_summary._get_all_user_stubs() if self._next_summary else {}
user_stubs = self._get_all_user_stubs()
ret = []
for user_stub in user_stubs.values():
ret.append(UserActivityStub(
user_id=user_stub.user_id,
username=user_stub.username,
num_forms_submitted=user_stub.num_forms_submitted,
is_performing=user_stub.is_performing,
previous_stub=previous_stubs.get(user_stub.user_id),
next_stub=next_stubs.get(user_stub.user_id),
))
for missing_user_id in set(previous_stubs.keys()) - set(user_stubs.keys()):
previous_stub = previous_stubs[missing_user_id]
ret.append(UserActivityStub(
user_id=previous_stub.user_id,
username=previous_stub.username,
num_forms_submitted=0,
is_performing=False,
previous_stub=previous_stub,
next_stub=next_stubs.get(missing_user_id),
))
return ret
def get_unhealthy_users(self):
"""
Get a list of unhealthy users - defined as those who were "performing" last month
but are not this month (though are still active).
"""
if self._previous_summary:
unhealthy_users = [stub for stub in self._get_all_user_stubs_with_extra_data() if stub.is_active and not stub.is_performing]
return sorted(unhealthy_users, key=lambda stub: stub.delta_forms)
def get_dropouts(self):
"""
Get a list of dropout users - defined as those who were active last month
but are not active this month
"""
if self._previous_summary:
dropouts = [stub for stub in self._get_all_user_stubs_with_extra_data() if not stub.is_active]
return sorted(dropouts, key=lambda stub: stub.delta_forms)
def get_newly_performing(self):
"""
Get a list of "newly performing" users - defined as those who are "performing" this month
after not performing last month.
"""
if self._previous_summary:
dropouts = [stub for stub in self._get_all_user_stubs_with_extra_data() if stub.is_newly_performing]
return sorted(dropouts, key=lambda stub: -stub.delta_forms)
def build_worksheet(title, headers, rows):
worksheet = []
worksheet.append(headers)
worksheet.extend(rows)
return [
title,
worksheet
]
class ProjectHealthDashboard(ProjectReport):
slug = 'project_health'
name = ugettext_lazy("Project Performance")
report_template_path = "reports/async/project_health_dashboard.html"
description = ugettext_lazy("A summary of the overall health of your project"
" based on how your users are doing over time.")
fields = [
'corehq.apps.reports.filters.location.LocationGroupFilter',
'corehq.apps.reports.filters.dates.HiddenLastMonthDateFilter',
]
exportable = True
emailable = True
@property
@memoized
def template_report(self):
if self.is_rendered_as_email:
self.report_template_path = "reports/project_health/project_health_email.html"
return super(ProjectHealthDashboard, self).template_report
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(ProjectHealthDashboard, self).decorator_dispatcher(request, *args, **kwargs)
def get_number_of_months(self):
try:
return int(self.request.GET.get('months', 6))
except ValueError:
return 6
def get_group_location_ids(self):
params = [_f for _f in self.request.GET.getlist('grouplocationfilter') if _f]
return params
def parse_group_location_params(self, param_ids):
locationids_param = []
groupids_param = []
if param_ids:
for id in param_ids:
if id.startswith("g__"):
groupids_param.append(id[3:])
elif id.startswith("l__"):
loc = SQLLocation.by_location_id(id[3:])
if loc.get_descendants():
locationids_param.extend(loc.get_descendants().location_ids())
locationids_param.append(id[3:])
return locationids_param, groupids_param
def get_users_by_location_filter(self, location_ids):
return UserES().domain(self.domain).location(location_ids).values_list('_id', flat=True)
def get_users_by_group_filter(self, group_ids):
return GroupES().domain(self.domain).group_ids(group_ids).values_list("users", flat=True)
def get_unique_users(self, users_loc, users_group):
if users_loc and users_group:
return set(chain(*users_group)).union(users_loc)
elif users_loc:
return set(users_loc)
else:
return set(chain(*users_group))
def get_users_by_filter(self):
locationids_param, groupids_param = self.parse_group_location_params(self.get_group_location_ids())
users_list_by_location = self.get_users_by_location_filter(locationids_param)
users_list_by_group = self.get_users_by_group_filter(groupids_param)
users_set = self.get_unique_users(users_list_by_location, users_list_by_group)
return users_set
def previous_months_summary(self, months=6):
now = datetime.datetime.utcnow()
six_month_summary = []
last_month_summary = None
performance_threshold = get_performance_threshold(self.domain)
filtered_users = self.get_users_by_filter()
active_not_deleted_users = UserES().domain(self.domain).values_list("_id", flat=True)
for i in range(-months, 1):
year, month = add_months(now.year, now.month, i)
month_as_date = datetime.date(year, month, 1)
this_month_summary = MonthlyPerformanceSummary(
domain=self.domain,
performance_threshold=performance_threshold,
month=month_as_date,
previous_summary=last_month_summary,
selected_users=filtered_users,
active_not_deleted_users=active_not_deleted_users,
)
six_month_summary.append(this_month_summary)
if last_month_summary is not None:
last_month_summary.set_next_month_summary(this_month_summary)
last_month_summary = this_month_summary
# these steps have to be done in a second outer loop so that 'next month summary' is available
# whenever it is needed
for summary in six_month_summary:
summary.finalize()
summary.set_percent_active()
return six_month_summary[1:]
def export_summary(self, six_months):
return build_worksheet(title="Six Month Performance Summary",
headers=['month', 'num_high_performing_users', 'num_low_performing_users',
'total_active', 'total_inactive', 'total_num_users'],
rows=[[monthly_summary.month.isoformat(),
monthly_summary.number_of_performing_users,
monthly_summary.number_of_low_performing_users, monthly_summary.active,
monthly_summary.inactive, monthly_summary.total_users_by_month]
for monthly_summary in six_months])
@property
def export_table(self):
previous_months_reports = self.previous_months_summary(self.get_number_of_months())
last_month = previous_months_reports[-2]
header = ['user_id', 'username', 'last_month_forms', 'delta_last_month',
'this_month_forms', 'delta_this_month', 'is_performing']
def extract_user_stat(user_list):
return [[user.user_id, user.username, user.num_forms_submitted, user.delta_forms,
user.num_forms_submitted_next_month, user.delta_forms_next_month,
user.is_performing] for user in user_list]
return [
self.export_summary(previous_months_reports),
build_worksheet(title="Inactive Users", headers=header,
rows=extract_user_stat(last_month.get_dropouts())),
build_worksheet(title=_("Low Performing Users"), headers=header,
rows=extract_user_stat(last_month.get_unhealthy_users())),
build_worksheet(title=_("New Performing Users"), headers=header,
rows=extract_user_stat(last_month.get_newly_performing())),
]
@property
def template_context(self):
context = super().template_context
performance_threshold = get_performance_threshold(self.domain)
prior_months_reports = self.previous_months_summary(self.get_number_of_months())
six_months_reports = []
for report in prior_months_reports:
r = report.to_json()
# inactive is a calculated property and this is transformed to json in
# the template so we need to precompute here
r.update({'inactive': report.inactive})
six_months_reports.append(r)
context.update({
'six_months_reports': six_months_reports,
'this_month': prior_months_reports[-1],
'last_month': prior_months_reports[-2],
'threshold': performance_threshold,
'domain': self.domain,
})
return context
| 41.016204 | 136 | 0.661832 | import datetime
from collections import namedtuple
from itertools import chain
from django.db.models import Sum
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from dimagi.ext import jsonobject
from dimagi.utils.dates import add_months
from corehq.apps.data_analytics.models import MALTRow
from corehq.apps.domain.models import Domain
from corehq.apps.es.groups import GroupES
from corehq.apps.es.users import UserES
from corehq.apps.hqwebapp.decorators import use_nvd3
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.standard import ProjectReport
from corehq.apps.users.util import raw_username
def get_performance_threshold(domain_name):
return Domain.get_by_name(domain_name).internal.performance_threshold or 15
class UserActivityStub(namedtuple('UserStub', ['user_id', 'username', 'num_forms_submitted',
'is_performing', 'previous_stub', 'next_stub'])):
@property
def is_active(self):
return self.num_forms_submitted > 0
@property
def is_newly_performing(self):
return self.is_performing and (self.previous_stub is None or not self.previous_stub.is_performing)
@property
def delta_forms(self):
previous_forms = 0 if self.previous_stub is None else self.previous_stub.num_forms_submitted
return self.num_forms_submitted - previous_forms
@property
def num_forms_submitted_next_month(self):
return self.next_stub.num_forms_submitted if self.next_stub else 0
@property
def delta_forms_next_month(self):
return self.num_forms_submitted_next_month - self.num_forms_submitted
class MonthlyPerformanceSummary(jsonobject.JsonObject):
month = jsonobject.DateProperty()
domain = jsonobject.StringProperty()
performance_threshold = jsonobject.IntegerProperty()
active = jsonobject.IntegerProperty()
performing = jsonobject.IntegerProperty()
def __init__(self, domain, month, selected_users, active_not_deleted_users,
performance_threshold, previous_summary=None,
delta_high_performers=0, delta_low_performers=0):
self._previous_summary = previous_summary
self._next_summary = None
self._is_final = None
base_queryset = MALTRow.objects.filter(
domain_name=domain,
month=month,
user_type__in=['CommCareUser', 'CommCareUser-Deleted'],
user_id__in=active_not_deleted_users,
)
if selected_users:
base_queryset = base_queryset.filter(
user_id__in=selected_users,
)
self._user_stat_from_malt = (base_queryset
.values('user_id', 'username')
.annotate(total_num_forms=Sum('num_of_forms')))
num_performing_users = (self._user_stat_from_malt
.filter(total_num_forms__gte=performance_threshold)
.count())
num_active_users = self._user_stat_from_malt.count()
num_low_performing_user = num_active_users - num_performing_users
if self._previous_summary:
delta_high_performers = num_performing_users - self._previous_summary.number_of_performing_users
delta_low_performers = num_low_performing_user - self._previous_summary.number_of_low_performing_users
super(MonthlyPerformanceSummary, self).__init__(
month=month,
domain=domain,
performance_threshold=performance_threshold,
active=num_active_users,
total_users_by_month=0,
percent_active=0,
performing=num_performing_users,
delta_high_performers=delta_high_performers,
delta_low_performers=delta_low_performers,
)
def set_next_month_summary(self, next_month_summary):
self._next_summary = next_month_summary
def set_percent_active(self):
self.total_users_by_month = self.inactive + self.number_of_active_users
if self.total_users_by_month:
self.percent_active = float(self.number_of_active_users) / float(self.total_users_by_month)
else:
self.percent_active = 0
@property
def number_of_performing_users(self):
return self.performing
@property
def number_of_low_performing_users(self):
return self.active - self.performing
@property
def number_of_active_users(self):
return self.active
@property
@memoized
def inactive(self):
dropouts = self.get_dropouts()
return len(dropouts) if dropouts else 0
@property
def previous_month(self):
prev_year, prev_month = add_months(self.month.year, self.month.month, -1)
return datetime.datetime(prev_year, prev_month, 1)
@property
def delta_high_performing(self):
if self._previous_summary:
return self.number_of_performing_users - self._previous_summary.number_of_performing_users
else:
return self.number_of_performing_users
@property
def delta_high_performing_pct(self):
if (self.delta_high_performing and self._previous_summary and
self._previous_summary.number_of_performing_users):
return self.delta_high_performing / float(self._previous_summary.number_of_performing_users) * 100
@property
def delta_low_performing(self):
if self._previous_summary:
return self.number_of_low_performing_users - self._previous_summary.number_of_low_performing_users
else:
return self.number_of_low_performing_users
@property
def delta_low_performing_pct(self):
if self.delta_low_performing and self._previous_summary \
and self._previous_summary.number_of_low_performing_users:
return self.delta_low_performing / float(self._previous_summary.number_of_low_performing_users) * 100
@property
def delta_active(self):
return self.active - self._previous_summary.active if self._previous_summary else self.active
@property
def delta_active_pct(self):
if self.delta_active and self._previous_summary and self._previous_summary.active:
return self.delta_active / float(self._previous_summary.active) * 100
@property
def delta_inactive(self):
return self.inactive - self._previous_summary.inactive if self._previous_summary else self.inactive
@property
def delta_inactive_pct(self):
if self.delta_inactive and self._previous_summary:
if self._previous_summary.inactive == 0:
return self.delta_inactive * 100.
return self.delta_inactive / float(self._previous_summary.inactive) * 100
def _get_all_user_stubs(self):
return {
row['user_id']: UserActivityStub(
user_id=row['user_id'],
username=raw_username(row['username']),
num_forms_submitted=row['total_num_forms'],
is_performing=row['total_num_forms'] >= self.performance_threshold,
previous_stub=None,
next_stub=None,
) for row in self._user_stat_from_malt
}
def finalize(self):
self._is_final = True
@memoized
def _get_all_user_stubs_with_extra_data(self):
if not self._is_final:
raise Exception("User stubs accessed before finalized. "
"Please call finalize() before calling this method.")
if self._previous_summary:
previous_stubs = self._previous_summary._get_all_user_stubs()
next_stubs = self._next_summary._get_all_user_stubs() if self._next_summary else {}
user_stubs = self._get_all_user_stubs()
ret = []
for user_stub in user_stubs.values():
ret.append(UserActivityStub(
user_id=user_stub.user_id,
username=user_stub.username,
num_forms_submitted=user_stub.num_forms_submitted,
is_performing=user_stub.is_performing,
previous_stub=previous_stubs.get(user_stub.user_id),
next_stub=next_stubs.get(user_stub.user_id),
))
for missing_user_id in set(previous_stubs.keys()) - set(user_stubs.keys()):
previous_stub = previous_stubs[missing_user_id]
ret.append(UserActivityStub(
user_id=previous_stub.user_id,
username=previous_stub.username,
num_forms_submitted=0,
is_performing=False,
previous_stub=previous_stub,
next_stub=next_stubs.get(missing_user_id),
))
return ret
def get_unhealthy_users(self):
if self._previous_summary:
unhealthy_users = [stub for stub in self._get_all_user_stubs_with_extra_data() if stub.is_active and not stub.is_performing]
return sorted(unhealthy_users, key=lambda stub: stub.delta_forms)
def get_dropouts(self):
if self._previous_summary:
dropouts = [stub for stub in self._get_all_user_stubs_with_extra_data() if not stub.is_active]
return sorted(dropouts, key=lambda stub: stub.delta_forms)
def get_newly_performing(self):
if self._previous_summary:
dropouts = [stub for stub in self._get_all_user_stubs_with_extra_data() if stub.is_newly_performing]
return sorted(dropouts, key=lambda stub: -stub.delta_forms)
def build_worksheet(title, headers, rows):
worksheet = []
worksheet.append(headers)
worksheet.extend(rows)
return [
title,
worksheet
]
class ProjectHealthDashboard(ProjectReport):
slug = 'project_health'
name = ugettext_lazy("Project Performance")
report_template_path = "reports/async/project_health_dashboard.html"
description = ugettext_lazy("A summary of the overall health of your project"
" based on how your users are doing over time.")
fields = [
'corehq.apps.reports.filters.location.LocationGroupFilter',
'corehq.apps.reports.filters.dates.HiddenLastMonthDateFilter',
]
exportable = True
emailable = True
@property
@memoized
def template_report(self):
if self.is_rendered_as_email:
self.report_template_path = "reports/project_health/project_health_email.html"
return super(ProjectHealthDashboard, self).template_report
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(ProjectHealthDashboard, self).decorator_dispatcher(request, *args, **kwargs)
def get_number_of_months(self):
try:
return int(self.request.GET.get('months', 6))
except ValueError:
return 6
def get_group_location_ids(self):
params = [_f for _f in self.request.GET.getlist('grouplocationfilter') if _f]
return params
def parse_group_location_params(self, param_ids):
locationids_param = []
groupids_param = []
if param_ids:
for id in param_ids:
if id.startswith("g__"):
groupids_param.append(id[3:])
elif id.startswith("l__"):
loc = SQLLocation.by_location_id(id[3:])
if loc.get_descendants():
locationids_param.extend(loc.get_descendants().location_ids())
locationids_param.append(id[3:])
return locationids_param, groupids_param
def get_users_by_location_filter(self, location_ids):
return UserES().domain(self.domain).location(location_ids).values_list('_id', flat=True)
def get_users_by_group_filter(self, group_ids):
return GroupES().domain(self.domain).group_ids(group_ids).values_list("users", flat=True)
def get_unique_users(self, users_loc, users_group):
if users_loc and users_group:
return set(chain(*users_group)).union(users_loc)
elif users_loc:
return set(users_loc)
else:
return set(chain(*users_group))
def get_users_by_filter(self):
locationids_param, groupids_param = self.parse_group_location_params(self.get_group_location_ids())
users_list_by_location = self.get_users_by_location_filter(locationids_param)
users_list_by_group = self.get_users_by_group_filter(groupids_param)
users_set = self.get_unique_users(users_list_by_location, users_list_by_group)
return users_set
def previous_months_summary(self, months=6):
now = datetime.datetime.utcnow()
six_month_summary = []
last_month_summary = None
performance_threshold = get_performance_threshold(self.domain)
filtered_users = self.get_users_by_filter()
active_not_deleted_users = UserES().domain(self.domain).values_list("_id", flat=True)
for i in range(-months, 1):
year, month = add_months(now.year, now.month, i)
month_as_date = datetime.date(year, month, 1)
this_month_summary = MonthlyPerformanceSummary(
domain=self.domain,
performance_threshold=performance_threshold,
month=month_as_date,
previous_summary=last_month_summary,
selected_users=filtered_users,
active_not_deleted_users=active_not_deleted_users,
)
six_month_summary.append(this_month_summary)
if last_month_summary is not None:
last_month_summary.set_next_month_summary(this_month_summary)
last_month_summary = this_month_summary
for summary in six_month_summary:
summary.finalize()
summary.set_percent_active()
return six_month_summary[1:]
def export_summary(self, six_months):
return build_worksheet(title="Six Month Performance Summary",
headers=['month', 'num_high_performing_users', 'num_low_performing_users',
'total_active', 'total_inactive', 'total_num_users'],
rows=[[monthly_summary.month.isoformat(),
monthly_summary.number_of_performing_users,
monthly_summary.number_of_low_performing_users, monthly_summary.active,
monthly_summary.inactive, monthly_summary.total_users_by_month]
for monthly_summary in six_months])
@property
def export_table(self):
previous_months_reports = self.previous_months_summary(self.get_number_of_months())
last_month = previous_months_reports[-2]
header = ['user_id', 'username', 'last_month_forms', 'delta_last_month',
'this_month_forms', 'delta_this_month', 'is_performing']
def extract_user_stat(user_list):
return [[user.user_id, user.username, user.num_forms_submitted, user.delta_forms,
user.num_forms_submitted_next_month, user.delta_forms_next_month,
user.is_performing] for user in user_list]
return [
self.export_summary(previous_months_reports),
build_worksheet(title="Inactive Users", headers=header,
rows=extract_user_stat(last_month.get_dropouts())),
build_worksheet(title=_("Low Performing Users"), headers=header,
rows=extract_user_stat(last_month.get_unhealthy_users())),
build_worksheet(title=_("New Performing Users"), headers=header,
rows=extract_user_stat(last_month.get_newly_performing())),
]
@property
def template_context(self):
context = super().template_context
performance_threshold = get_performance_threshold(self.domain)
prior_months_reports = self.previous_months_summary(self.get_number_of_months())
six_months_reports = []
for report in prior_months_reports:
r = report.to_json()
r.update({'inactive': report.inactive})
six_months_reports.append(r)
context.update({
'six_months_reports': six_months_reports,
'this_month': prior_months_reports[-1],
'last_month': prior_months_reports[-2],
'threshold': performance_threshold,
'domain': self.domain,
})
return context
| true | true |
1c31cb6a4d7b517d33ce839c490c9a39a16d59f9 | 2,925 | py | Python | tle/__main__.py | skittles1412/TLE | a7d7ac399602071bf2c559003466e95862ce2a00 | [
"MIT"
] | null | null | null | tle/__main__.py | skittles1412/TLE | a7d7ac399602071bf2c559003466e95862ce2a00 | [
"MIT"
] | null | null | null | tle/__main__.py | skittles1412/TLE | a7d7ac399602071bf2c559003466e95862ce2a00 | [
"MIT"
] | null | null | null | import argparse
import asyncio
import distutils.util
import logging
import os
import discord
from logging.handlers import TimedRotatingFileHandler
from os import environ
from pathlib import Path
import seaborn as sns
from discord.ext import commands
from matplotlib import pyplot as plt
from tle import constants
from tle.util import codeforces_common as cf_common
from tle.util import discord_common, font_downloader
def setup():
# Make required directories.
for path in constants.ALL_DIRS:
os.makedirs(path, exist_ok=True)
# logging to console and file on daily interval
logging.basicConfig(format='{asctime}:{levelname}:{name}:{message}', style='{',
datefmt='%d-%m-%Y %H:%M:%S', level=logging.INFO,
handlers=[logging.StreamHandler(),
TimedRotatingFileHandler(constants.LOG_FILE_PATH, when='D',
backupCount=3, utc=True)])
# matplotlib and seaborn
plt.rcParams['figure.figsize'] = 7.0, 3.5
sns.set()
options = {
'axes.edgecolor': '#A0A0C5',
'axes.spines.top': False,
'axes.spines.right': False,
}
sns.set_style('darkgrid', options)
# Download fonts if necessary
font_downloader.maybe_download()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nodb', action='store_true')
args = parser.parse_args()
token = environ.get('BOT_TOKEN')
if not token:
logging.error('Token required')
return
allow_self_register = environ.get('ALLOW_DUEL_SELF_REGISTER')
if allow_self_register:
constants.ALLOW_DUEL_SELF_REGISTER = bool(distutils.util.strtobool(allow_self_register))
setup()
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=commands.when_mentioned_or(';'), intents=intents)
cogs = [file.stem for file in Path('tle', 'cogs').glob('*.py')]
disallowed_cogs = ['starboard']
for extension in cogs:
if extension not in disallowed_cogs:
bot.load_extension(f'tle.cogs.{extension}')
logging.info(f'Cogs loaded: {", ".join(bot.cogs)}')
def no_dm_check(ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage('Private messages not permitted.')
return True
# Restrict bot usage to inside guild channels only.
bot.add_check(no_dm_check)
# cf_common.initialize needs to run first, so it must be set as the bot's
# on_ready event handler rather than an on_ready listener.
@discord_common.on_ready_event_once(bot)
async def init():
await cf_common.initialize(args.nodb)
asyncio.create_task(discord_common.presence(bot))
bot.add_listener(discord_common.bot_error_handler, name='on_command_error')
bot.run(token)
if __name__ == '__main__':
main()
| 31.117021 | 96 | 0.668718 | import argparse
import asyncio
import distutils.util
import logging
import os
import discord
from logging.handlers import TimedRotatingFileHandler
from os import environ
from pathlib import Path
import seaborn as sns
from discord.ext import commands
from matplotlib import pyplot as plt
from tle import constants
from tle.util import codeforces_common as cf_common
from tle.util import discord_common, font_downloader
def setup():
for path in constants.ALL_DIRS:
os.makedirs(path, exist_ok=True)
logging.basicConfig(format='{asctime}:{levelname}:{name}:{message}', style='{',
datefmt='%d-%m-%Y %H:%M:%S', level=logging.INFO,
handlers=[logging.StreamHandler(),
TimedRotatingFileHandler(constants.LOG_FILE_PATH, when='D',
backupCount=3, utc=True)])
plt.rcParams['figure.figsize'] = 7.0, 3.5
sns.set()
options = {
'axes.edgecolor': '#A0A0C5',
'axes.spines.top': False,
'axes.spines.right': False,
}
sns.set_style('darkgrid', options)
font_downloader.maybe_download()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--nodb', action='store_true')
args = parser.parse_args()
token = environ.get('BOT_TOKEN')
if not token:
logging.error('Token required')
return
allow_self_register = environ.get('ALLOW_DUEL_SELF_REGISTER')
if allow_self_register:
constants.ALLOW_DUEL_SELF_REGISTER = bool(distutils.util.strtobool(allow_self_register))
setup()
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=commands.when_mentioned_or(';'), intents=intents)
cogs = [file.stem for file in Path('tle', 'cogs').glob('*.py')]
disallowed_cogs = ['starboard']
for extension in cogs:
if extension not in disallowed_cogs:
bot.load_extension(f'tle.cogs.{extension}')
logging.info(f'Cogs loaded: {", ".join(bot.cogs)}')
def no_dm_check(ctx):
if ctx.guild is None:
raise commands.NoPrivateMessage('Private messages not permitted.')
return True
bot.add_check(no_dm_check)
# on_ready event handler rather than an on_ready listener.
@discord_common.on_ready_event_once(bot)
async def init():
await cf_common.initialize(args.nodb)
asyncio.create_task(discord_common.presence(bot))
bot.add_listener(discord_common.bot_error_handler, name='on_command_error')
bot.run(token)
if __name__ == '__main__':
main()
| true | true |
1c31cd8b355b3554479765522432b64185cead75 | 6,189 | py | Python | emission/storage/decorations/place_queries.py | trevor-wu/e-mission-server | 2e31986bd7c0faab7110b7eb69541b0b9eac62df | [
"BSD-3-Clause"
] | 21 | 2015-02-09T00:35:17.000Z | 2021-12-14T16:41:05.000Z | emission/storage/decorations/place_queries.py | trevor-wu/e-mission-server | 2e31986bd7c0faab7110b7eb69541b0b9eac62df | [
"BSD-3-Clause"
] | 672 | 2015-01-29T18:10:56.000Z | 2022-03-24T13:04:51.000Z | emission/storage/decorations/place_queries.py | trevor-wu/e-mission-server | 2e31986bd7c0faab7110b7eb69541b0b9eac62df | [
"BSD-3-Clause"
] | 110 | 2015-01-29T18:11:10.000Z | 2022-03-29T17:58:14.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
def get_last_place_entry(key, user_id):
"""
There are many ways to find the last place. One would be to find the one
with the max enter_ts. But that is not performant because we would need to
retrieve all the enter_ts and find their max, which is expensive. Instead, we
use the property that we process data in chunks of trips, so the last place
would have been created and entered but not exited.
:param key:
"""
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.exit_ts' : {'$exists': False}})
logging.debug("last place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('exit_ts' not in ret_place.data)
assert('exit_fmt_time' not in ret_place.data)
assert('starting_trip' not in ret_place.data)
return ret_place
def get_first_place_entry(key, user_id):
"""
Similar to get_last_place_entry, only finding one with only an exit_ts
and no enter_ts.
"""
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.enter_ts' : {'$exists': False}})
logging.debug("first place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('enter_ts' not in ret_place.data)
assert('enter_fmt_time' not in ret_place.data)
assert('ending_trip' not in ret_place.data)
return ret_place
def get_last_place_before(place_key, reset_ts, user_id):
"""
Unlike `get_last_place_before` which returns the last place in the
timeline, this returns the last place before a particular timestamp.
Used to reset the pipeline, for example.
To implement this, we can't just look for places before that timestamp,
because then we will get a list. And we don't want to retrieve all of them
and sort either.
We can look for places that exit after that timestamp, but that will also
give a list. But hopefully, a shorter list, so that we don't have to sort
as much. I can't think of an alternative that doesn't require sorting.
Oh wait! There is an alternative!
We can look for the place that has an enter timestamp before the ts and an
exit timestamp after, or a trip that has a start timestamp before the ts
and an end timestamp after. We should only find one. And if we find the
trip then the place is its start place.
Note that these correspond to the two use cases in
https://github.com/e-mission/e-mission-server/issues/333
"""
trip_key_query = _get_trip_key_query(place_key)
logging.debug("Looking for last place before %s" % reset_ts)
ts = esta.TimeSeries.get_time_series(user_id)
all_user_places = list(edb.get_analysis_timeseries_db().find(
{"user_id": user_id, "metadata.key": place_key},
{"_id": True, "data.enter_fmt_time": True, "data.exit_fmt_time": True}))
logging.debug("all places for this user = %s" % all_user_places)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': place_key,
'data.exit_ts' : {'$gt': reset_ts},
'data.enter_ts': {'$lt': reset_ts}
})
logging.debug("last place doc for user %s = %s" % (user_id, ret_place_doc))
ret_trip_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': trip_key_query,
'data.end_ts' : {'$gt': reset_ts},
'data.start_ts': {'$lt': reset_ts}
})
logging.debug("last trip doc for user %s = %s" % (user_id, ret_trip_doc))
if ret_place_doc is None and ret_trip_doc is None:
# Check to see if the pipeline ended before this
last_place = get_last_place_entry(place_key, user_id)
logging.debug("last_place = %s, reset_ts = %s" %
(last_place, reset_ts))
if last_place is None:
return None
elif last_place.data.enter_ts is None:
return None
elif last_place.data.enter_ts < reset_ts:
return last_place
else:
raise ValueError("No trip or place straddling time %s for user %s" %
(reset_ts, user_id))
if ret_place_doc is None:
assert ret_trip_doc is not None
logging.info("ret_trip_doc start = %s, end = %s" %
(ret_trip_doc["data"]["start_fmt_time"],
ret_trip_doc["data"]["end_fmt_time"]))
ret_place_doc = esda.get_entry(place_key, ret_trip_doc["data"]['start_place'])
assert ret_place_doc is not None
ret_place = ecwe.Entry(ret_place_doc)
return ret_place
def _get_trip_key_query(place_key):
if place_key == esda.CLEANED_PLACE_KEY:
return {"$in": [esda.CLEANED_TRIP_KEY, esda.CLEANED_UNTRACKED_KEY]}
elif place_key == esda.RAW_PLACE_KEY:
return {"$in": [esda.RAW_TRIP_KEY, esda.RAW_UNTRACKED_KEY]}
else:
raise RuntimeException("Invalid place key %s" % place_key)
| 46.886364 | 94 | 0.630473 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
def get_last_place_entry(key, user_id):
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.exit_ts' : {'$exists': False}})
logging.debug("last place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('exit_ts' not in ret_place.data)
assert('exit_fmt_time' not in ret_place.data)
assert('starting_trip' not in ret_place.data)
return ret_place
def get_first_place_entry(key, user_id):
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.enter_ts' : {'$exists': False}})
logging.debug("first place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('enter_ts' not in ret_place.data)
assert('enter_fmt_time' not in ret_place.data)
assert('ending_trip' not in ret_place.data)
return ret_place
def get_last_place_before(place_key, reset_ts, user_id):
trip_key_query = _get_trip_key_query(place_key)
logging.debug("Looking for last place before %s" % reset_ts)
ts = esta.TimeSeries.get_time_series(user_id)
all_user_places = list(edb.get_analysis_timeseries_db().find(
{"user_id": user_id, "metadata.key": place_key},
{"_id": True, "data.enter_fmt_time": True, "data.exit_fmt_time": True}))
logging.debug("all places for this user = %s" % all_user_places)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': place_key,
'data.exit_ts' : {'$gt': reset_ts},
'data.enter_ts': {'$lt': reset_ts}
})
logging.debug("last place doc for user %s = %s" % (user_id, ret_place_doc))
ret_trip_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': trip_key_query,
'data.end_ts' : {'$gt': reset_ts},
'data.start_ts': {'$lt': reset_ts}
})
logging.debug("last trip doc for user %s = %s" % (user_id, ret_trip_doc))
if ret_place_doc is None and ret_trip_doc is None:
last_place = get_last_place_entry(place_key, user_id)
logging.debug("last_place = %s, reset_ts = %s" %
(last_place, reset_ts))
if last_place is None:
return None
elif last_place.data.enter_ts is None:
return None
elif last_place.data.enter_ts < reset_ts:
return last_place
else:
raise ValueError("No trip or place straddling time %s for user %s" %
(reset_ts, user_id))
if ret_place_doc is None:
assert ret_trip_doc is not None
logging.info("ret_trip_doc start = %s, end = %s" %
(ret_trip_doc["data"]["start_fmt_time"],
ret_trip_doc["data"]["end_fmt_time"]))
ret_place_doc = esda.get_entry(place_key, ret_trip_doc["data"]['start_place'])
assert ret_place_doc is not None
ret_place = ecwe.Entry(ret_place_doc)
return ret_place
def _get_trip_key_query(place_key):
if place_key == esda.CLEANED_PLACE_KEY:
return {"$in": [esda.CLEANED_TRIP_KEY, esda.CLEANED_UNTRACKED_KEY]}
elif place_key == esda.RAW_PLACE_KEY:
return {"$in": [esda.RAW_TRIP_KEY, esda.RAW_UNTRACKED_KEY]}
else:
raise RuntimeException("Invalid place key %s" % place_key)
| true | true |
1c31ce27baf51df5e89dd769ec2e22576991ccaf | 4,007 | py | Python | tests/test_sbm.py | jernsting/nxt_gem | 407af32250ab00aa17c54729cf0483adc0f8a658 | [
"BSD-3-Clause"
] | null | null | null | tests/test_sbm.py | jernsting/nxt_gem | 407af32250ab00aa17c54729cf0483adc0f8a658 | [
"BSD-3-Clause"
] | null | null | null | tests/test_sbm.py | jernsting/nxt_gem | 407af32250ab00aa17c54729cf0483adc0f8a658 | [
"BSD-3-Clause"
] | null | null | null | """
Run the graph embedding methods on Karate graph and evaluate them on
graph reconstruction and visualization. Please copy the
gem/data/karate.edgelist to the working directory
"""
import os.path
import unittest
import networkx as nx
import pickle
import numpy as np
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
from gem.embedding.sdne import SDNE
from tests.fit_model import fit_model
class SBMTest(unittest.TestCase):
def setUp(self) -> None:
# File that contains the edges. Format: source target
# Optionally, you can add weights as third column: source target weight
self.source_dir = os.path.dirname(os.path.abspath(__file__))
file_prefix = os.path.join(self.source_dir, 'data/sbm.gpickle')
# Load graph
G = nx.read_gpickle(file_prefix)
# convert G (networkx 1.x digraph) to networkx 2.x
H = nx.DiGraph()
H.add_nodes_from(G.node)
for source_node in G.edge.keys():
for target_node in G.edge[source_node].keys():
H.add_edge(source_node, target_node)
G = H
try:
node_colors = pickle.load(
open(os.path.join(self.source_dir, 'data/sbm_node_labels.pickle'), 'rb')
)
except UnicodeDecodeError:
node_colors = pickle.load(
open(os.path.join(self.source_dir, 'data/sbm_node_labels.pickle'), 'rb'), encoding='latin1'
)
node_colors_arr = [None] * node_colors.shape[0]
for idx in range(node_colors.shape[0]):
node_colors_arr[idx] = np.where(node_colors[idx, :].toarray() == 1)[1][0]
self.node_colors_arr = node_colors_arr
self.G = G
def test_GraphFactorization(self):
model = GraphFactorization(d=128, max_iter=1000, eta=1 * 10**-4, regu=1.0, data_set='sbm')
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/GraphFactorization.txt'))
self.internal_model_test(model, target)
def test_HOPE(self):
model = HOPE(d=256, beta=0.01)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/HOPE.txt'))
self.internal_model_test(model, target)
def test_LaplacianEigenmaps(self):
model = LaplacianEigenmaps(d=128)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/LaplacianEigenmaps.txt'))
self.internal_model_test(model, target)
def test_LocallyLinearEmbedding(self):
model = LocallyLinearEmbedding(d=128)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/LocallyLinearEmbedding.txt'))
self.internal_model_test(model, target)
def test_node2vec(self):
model = node2vec(d=182, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1, data_set='sbm')
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/node2vec.txt'))
self.internal_model_test(model, target, delta=.1)
def test_SDNE(self):
model = SDNE(d=128, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3, n_units=[500, 300, ], rho=0.3, n_iter=30,
xeta=0.001, n_batch=500, modelfile=['enc_model.json', 'dec_model.json'],
weightfile=['enc_weights.hdf5', 'dec_weights.hdf5'])
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/SDNE.txt'))
self.internal_model_test(model, target, delta=1)
def internal_model_test(self, model, target, verbose: bool = False, delta: float = 1e-3):
MAP, prec_curv, err, err_baseline = fit_model(self.G, model)
# ---------------------------------------------------------------------------------
if verbose:
print(("\tMAP: {} \t preccision curve: {}\n\n\n\n" + '-' * 100).format(MAP, prec_curv[:5]))
self.assertTrue(abs(np.mean(target - model.get_embedding())) < delta)
| 42.178947 | 119 | 0.649114 | import os.path
import unittest
import networkx as nx
import pickle
import numpy as np
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
from gem.embedding.sdne import SDNE
from tests.fit_model import fit_model
class SBMTest(unittest.TestCase):
def setUp(self) -> None:
self.source_dir = os.path.dirname(os.path.abspath(__file__))
file_prefix = os.path.join(self.source_dir, 'data/sbm.gpickle')
G = nx.read_gpickle(file_prefix)
H = nx.DiGraph()
H.add_nodes_from(G.node)
for source_node in G.edge.keys():
for target_node in G.edge[source_node].keys():
H.add_edge(source_node, target_node)
G = H
try:
node_colors = pickle.load(
open(os.path.join(self.source_dir, 'data/sbm_node_labels.pickle'), 'rb')
)
except UnicodeDecodeError:
node_colors = pickle.load(
open(os.path.join(self.source_dir, 'data/sbm_node_labels.pickle'), 'rb'), encoding='latin1'
)
node_colors_arr = [None] * node_colors.shape[0]
for idx in range(node_colors.shape[0]):
node_colors_arr[idx] = np.where(node_colors[idx, :].toarray() == 1)[1][0]
self.node_colors_arr = node_colors_arr
self.G = G
def test_GraphFactorization(self):
model = GraphFactorization(d=128, max_iter=1000, eta=1 * 10**-4, regu=1.0, data_set='sbm')
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/GraphFactorization.txt'))
self.internal_model_test(model, target)
def test_HOPE(self):
model = HOPE(d=256, beta=0.01)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/HOPE.txt'))
self.internal_model_test(model, target)
def test_LaplacianEigenmaps(self):
model = LaplacianEigenmaps(d=128)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/LaplacianEigenmaps.txt'))
self.internal_model_test(model, target)
def test_LocallyLinearEmbedding(self):
model = LocallyLinearEmbedding(d=128)
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/LocallyLinearEmbedding.txt'))
self.internal_model_test(model, target)
def test_node2vec(self):
model = node2vec(d=182, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1, data_set='sbm')
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/node2vec.txt'))
self.internal_model_test(model, target, delta=.1)
def test_SDNE(self):
model = SDNE(d=128, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3, n_units=[500, 300, ], rho=0.3, n_iter=30,
xeta=0.001, n_batch=500, modelfile=['enc_model.json', 'dec_model.json'],
weightfile=['enc_weights.hdf5', 'dec_weights.hdf5'])
target = np.loadtxt(os.path.join(self.source_dir, 'smb_res/SDNE.txt'))
self.internal_model_test(model, target, delta=1)
def internal_model_test(self, model, target, verbose: bool = False, delta: float = 1e-3):
MAP, prec_curv, err, err_baseline = fit_model(self.G, model)
if verbose:
print(("\tMAP: {} \t preccision curve: {}\n\n\n\n" + '-' * 100).format(MAP, prec_curv[:5]))
self.assertTrue(abs(np.mean(target - model.get_embedding())) < delta)
| true | true |
1c31d05f59c5c18d4835128551f94b0b2b968c51 | 493 | py | Python | comments/migrations/0011_auto_20210702_2215.py | montukv/CommentAPI | 3158f5b40cb21f192f292a3f3caae71d9f5d9020 | [
"MIT"
] | null | null | null | comments/migrations/0011_auto_20210702_2215.py | montukv/CommentAPI | 3158f5b40cb21f192f292a3f3caae71d9f5d9020 | [
"MIT"
] | null | null | null | comments/migrations/0011_auto_20210702_2215.py | montukv/CommentAPI | 3158f5b40cb21f192f292a3f3caae71d9f5d9020 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-02 16:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comments', '0010_auto_20210702_2058'),
]
operations = [
migrations.RenameField(
model_name='page',
old_name='id',
new_name='idno',
),
migrations.RenameField(
model_name='usercomment',
old_name='id',
new_name='idno',
),
]
| 20.541667 | 48 | 0.545639 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('comments', '0010_auto_20210702_2058'),
]
operations = [
migrations.RenameField(
model_name='page',
old_name='id',
new_name='idno',
),
migrations.RenameField(
model_name='usercomment',
old_name='id',
new_name='idno',
),
]
| true | true |
1c31d17673c7ffdca7d04acaf3793863f9f7516d | 20,646 | py | Python | src/prefect/agent/ecs/agent.py | jspeis/prefect | 61154b21d2eaac21b7a597b6e9b63377ffffcb9a | [
"Apache-2.0"
] | 1 | 2022-03-19T06:11:03.000Z | 2022-03-19T06:11:03.000Z | src/prefect/agent/ecs/agent.py | jspeis/prefect | 61154b21d2eaac21b7a597b6e9b63377ffffcb9a | [
"Apache-2.0"
] | 1 | 2020-05-24T17:04:47.000Z | 2020-05-24T17:04:47.000Z | src/prefect/agent/ecs/agent.py | jspeis/prefect | 61154b21d2eaac21b7a597b6e9b63377ffffcb9a | [
"Apache-2.0"
] | null | null | null | import os
from copy import deepcopy
from typing import Iterable, Dict, Optional, Any
import slugify
import yaml
from prefect import config
from prefect.agent import Agent
from prefect.run_configs import ECSRun
from prefect.utilities.agent import get_flow_image, get_flow_run_command
from prefect.utilities.filesystems import read_bytes_from_path
from prefect.utilities.graphql import GraphQLResult
DEFAULT_TASK_DEFINITION_PATH = os.path.join(
os.path.dirname(__file__), "task_definition.yaml"
)
def merge_run_task_kwargs(opts1: dict, opts2: dict) -> dict:
"""Merge two `run_task_kwargs` dicts, given precedence to `opts2`.
Values are merged with the following heuristics:
- Anything outside of `overrides.containerOverrides` is merged directly,
with precedence given to `opts2`
- Dicts in the `overrides.containerOverrides` list are matched on their
`"name"` fields, then merged directly (with precedence given to `opts2`).
Args:
- opts1 (dict): A dict of kwargs for `run_task`
- opts2 (dict): A second dict of kwargs for `run_task`.
Returns:
- dict: A merged dict of kwargs
"""
out = deepcopy(opts1)
# Everything except 'overrides' merge directly
for k, v in opts2.items():
if k != "overrides":
out[k] = v
# Everything in `overrides` except `containerOverrides` merge directly
overrides = opts2.get("overrides", {})
if overrides:
out_overrides = out.setdefault("overrides", {})
for k, v in overrides.items():
if k != "containerOverrides":
out_overrides[k] = v
# Entries in `containerOverrides` are paired by name, and then merged
container_overrides = overrides.get("containerOverrides")
if container_overrides:
out_container_overrides = out_overrides.setdefault("containerOverrides", [])
for entry in container_overrides:
for out_entry in out_container_overrides:
if out_entry.get("name") == entry.get("name"):
out_entry.update(entry)
break
else:
out_container_overrides.append(entry)
return out
class ECSAgent(Agent):
"""
Agent which deploys flow runs as ECS tasks.
Args:
- agent_config_id (str, optional): An optional agent configuration ID
that can be used to set configuration based on an agent from a
backend API. If set all configuration values will be pulled from
the backend agent configuration.
- name (str, optional): An optional name to give this agent. Can also
be set through the environment variable `PREFECT__CLOUD__AGENT__NAME`.
Defaults to "agent".
- labels (List[str], optional): A list of labels, which are arbitrary
string identifiers used by Prefect Agents when polling for work.
- env_vars (dict, optional): A dictionary of environment variables and
values that will be set on each flow run that this agent submits
for execution.
- max_polls (int, optional): Maximum number of times the agent will
poll Prefect Cloud for flow runs; defaults to infinite.
- agent_address (str, optional): Address to serve internal api at.
Currently this is just health checks for use by an orchestration
layer. Leave blank for no api server (default).
- no_cloud_logs (bool, optional): Disable logging to a Prefect backend
for this agent and all deployed flow runs. Defaults to `False`.
- task_definition_path (str, optional): Path to a task definition
template to use when defining new tasks. If not provided, the
default template will be used.
- run_task_kwargs_path (str, optional): Path to a `yaml` file
containing default kwargs to pass to `ECS.client.run_task`. May be
a local path, or a remote path on e.g. `s3`.
- aws_access_key_id (str, optional): AWS access key id for connecting
the boto3 client. If not provided, will be loaded from your
environment (via either the `AWS_ACCESS_KEY_ID` environment
variable, or the `~/.aws/config` file). See
[the boto3 credentials docs][1] for more information.
- aws_secret_access_key (str, optional): AWS secret access key for
connecting the boto3 client. If not provided, will be loaded from
your environment (via either the `AWS_SECRET_ACCESS_KEY`
environment variable, or the `~/.aws/config` file).
See [the boto3 credentials docs][1] for more information.
- aws_session_token (str, optional): AWS session key for connecting the
boto3 client. If not provided, will be loaded from your environment
(via either the `AWS_SESSION_TOKEN` environment variable, or the
`~/.aws/config` file). See [the boto3 credentials docs][1] for more
information.
- region_name (str, optional): AWS region name to launch ECS tasks in.
If not provided, will be loaded from your environment (via either
the `AWS_DEFAULT_REGION` environment variable, or the
`~/.aws/config` file). See [the boto3 configuration docs][2] for
more information.
- cluster (str, optional): The AWS cluster to use, defaults to
`"default"` if not provided.
- launch_type (str, optional): The launch type to use, either
`"FARGATE"` (default) or `"EC2"`.
- task_role_arn (str, optional): The default task role ARN to use when
registering ECS tasks created by this agent.
- botocore_config (dict, optional): Additional botocore configuration
options to be passed to the boto3 client. See [the boto3
configuration docs][2] for more information.
[1]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
[2]: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
"""
def __init__( # type: ignore
self,
agent_config_id: str = None,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
agent_address: str = None,
no_cloud_logs: bool = False,
task_definition_path: str = None,
run_task_kwargs_path: str = None,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
aws_session_token: str = None,
region_name: str = None,
cluster: str = None,
launch_type: str = None,
task_role_arn: str = None,
botocore_config: dict = None,
) -> None:
super().__init__(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
no_cloud_logs=no_cloud_logs,
)
from botocore.config import Config
from prefect.utilities.aws import get_boto_client
self.cluster = cluster
self.launch_type = launch_type.upper() if launch_type else "FARGATE"
self.task_role_arn = task_role_arn
# Load boto configuration. We want to use the standard retry mode by
# default (which isn't boto's default due to backwards compatibility).
# The logic below lets the user override our default retry mode either
# in `botocore_config` or in their aws config file.
#
# See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html
# for more info.
boto_config = Config(**botocore_config or {})
if not boto_config.retries:
boto_config.retries = {"mode": "standard"}
self.boto_kwargs = dict(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
config=boto_config,
) # type: Dict[str, Any]
self.ecs_client = get_boto_client("ecs", **self.boto_kwargs)
self.rgtag_client = get_boto_client(
"resourcegroupstaggingapi", **self.boto_kwargs
)
# Load default task definition
if not task_definition_path:
task_definition_path = DEFAULT_TASK_DEFINITION_PATH
try:
self.task_definition = yaml.safe_load(
read_bytes_from_path(task_definition_path)
)
except Exception:
self.logger.error(
"Failed to load default task definition from %r",
task_definition_path,
exc_info=True,
)
raise
# Load default run_task kwargs
if run_task_kwargs_path:
try:
self.run_task_kwargs = yaml.safe_load(
read_bytes_from_path(run_task_kwargs_path)
)
except Exception:
self.logger.error(
"Failed to load default `run_task` kwargs from %r",
run_task_kwargs_path,
exc_info=True,
)
raise
else:
self.run_task_kwargs = {}
# If `task_role_arn` is configured on the agent, add it to the default
# template. The agent default `task_role_arn` is only applied if using
# the agent's default template.
if self.task_role_arn:
self.task_definition["taskRoleArn"] = self.task_role_arn
# If running on fargate, auto-configure `networkConfiguration` for the
# user if they didn't configure it themselves.
if self.launch_type == "FARGATE" and not self.run_task_kwargs.get(
"networkConfiguration"
):
self.run_task_kwargs[
"networkConfiguration"
] = self.infer_network_configuration()
def infer_network_configuration(self) -> dict:
"""Infer default values for `networkConfiguration`.
This is called when running on `FARGATE` with no `networkConfiguration`
specified in the default `run_task_kwargs`. This makes it easier to get
setup, as usually the default is what you want.
Returns:
- dict: Inferred `networkConfiguration`
"""
from prefect.utilities.aws import get_boto_client
self.logger.debug("Inferring default `networkConfiguration`...")
ec2 = get_boto_client("ec2", **self.boto_kwargs)
vpcs = ec2.describe_vpcs(Filters=[{"Name": "isDefault", "Values": ["true"]}])[
"Vpcs"
]
if vpcs:
vpc_id = vpcs[0]["VpcId"]
subnets = ec2.describe_subnets(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["Subnets"]
if subnets:
config = {
"awsvpcConfiguration": {
"subnets": [s["SubnetId"] for s in subnets],
"assignPublicIp": "ENABLED",
}
}
self.logger.debug("Using networkConfiguration=%r", config)
return config
msg = (
"Failed to infer default networkConfiguration, please explicitly "
"configure using `--run-task-kwargs`"
)
self.logger.error(msg)
raise ValueError(msg)
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Deploy a flow run as an ECS task.
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
"""
self.logger.info("Deploying flow run %r", flow_run.id)
run_config = self._get_run_config(flow_run, ECSRun)
assert isinstance(run_config, ECSRun) # mypy
taskdef_arn = self.get_task_definition_arn(flow_run, run_config)
if taskdef_arn is None:
# Register a new task definition
self.logger.debug(
"Registering new task definition for flow %s", flow_run.flow.id
)
taskdef = self.generate_task_definition(flow_run, run_config)
resp = self.ecs_client.register_task_definition(**taskdef)
taskdef_arn = resp["taskDefinition"]["taskDefinitionArn"]
self.logger.debug(
"Registered task definition %s for flow %s",
taskdef_arn,
flow_run.flow.id,
)
else:
self.logger.debug(
"Using task definition %s for flow %s", taskdef_arn, flow_run.flow.id
)
# Get kwargs to pass to run_task
kwargs = self.get_run_task_kwargs(flow_run, run_config)
resp = self.ecs_client.run_task(taskDefinition=taskdef_arn, **kwargs)
if resp.get("tasks"):
task_arn = resp["tasks"][0]["taskArn"]
self.logger.debug("Started task %r for flow run %r", task_arn, flow_run.id)
return f"Task {task_arn}"
raise ValueError(
"Failed to start task for flow run {0}. Failures: {1}".format(
flow_run.id, resp.get("failures")
)
)
def get_task_definition_tags(self, flow_run: GraphQLResult) -> dict:
"""Get required task definition tags from a flow run.
Args:
- flow_run (GraphQLResult): the flow run
Returns:
- dict: a dict of tags to use
"""
return {
"prefect:flow-id": flow_run.flow.id,
"prefect:flow-version": str(flow_run.flow.version),
}
def get_task_definition_arn(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Optional[str]:
"""Get an existing task definition ARN for a flow run.
Args:
- flow_run (GraphQLResult): the flow run
- run_config (ECSRun): The flow's run config
Returns:
- Optional[str]: the task definition ARN. Returns `None` if no
existing definition is found.
"""
if run_config.task_definition_arn is not None:
return run_config.task_definition_arn
tags = self.get_task_definition_tags(flow_run)
from botocore.exceptions import ClientError
try:
res = self.rgtag_client.get_resources(
TagFilters=[{"Key": k, "Values": [v]} for k, v in tags.items()],
ResourceTypeFilters=["ecs:task-definition"],
)
if res["ResourceTagMappingList"]:
return res["ResourceTagMappingList"][0]["ResourceARN"]
return None
except ClientError:
return None
def generate_task_definition(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Dict[str, Any]:
"""Generate an ECS task definition from a flow run
Args:
- flow_run (GraphQLResult): A flow run object
- run_config (ECSRun): The flow's run config
Returns:
- dict: a dictionary representation of an ECS task definition
"""
if run_config.task_definition:
taskdef = deepcopy(run_config.task_definition)
elif run_config.task_definition_path:
self.logger.debug(
"Loading task definition template from %r",
run_config.task_definition_path,
)
template_bytes = read_bytes_from_path(run_config.task_definition_path)
taskdef = yaml.safe_load(template_bytes)
else:
taskdef = deepcopy(self.task_definition)
slug = slugify.slugify(
flow_run.flow.name,
max_length=255 - len("prefect-"),
word_boundary=True,
save_order=True,
)
family = f"prefect-{slug}"
tags = self.get_task_definition_tags(flow_run)
taskdef["family"] = family
taskdef_tags = [{"key": k, "value": v} for k, v in tags.items()]
for entry in taskdef.get("tags", []):
if entry["key"] not in tags:
taskdef_tags.append(entry)
taskdef["tags"] = taskdef_tags
# Get the flow container (creating one if it doesn't already exist)
containers = taskdef.setdefault("containerDefinitions", [])
for container in containers:
if container.get("name") == "flow":
break
else:
container = {"name": "flow"}
containers.append(container)
# Set flow image
container["image"] = image = get_flow_image(flow_run)
# Set flow run command
container["command"] = ["/bin/sh", "-c", get_flow_run_command(flow_run)]
# Set taskRoleArn if configured
if run_config.task_role_arn:
taskdef["taskRoleArn"] = run_config.task_role_arn
# Populate static environment variables from the following sources,
# with precedence:
# - Static environment variables, hardcoded below
# - Values in the task definition template
env = {
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__CONTEXT__IMAGE": image,
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
}
container_env = [{"name": k, "value": v} for k, v in env.items()]
for entry in container.get("environment", []):
if entry["name"] not in env:
container_env.append(entry)
container["environment"] = container_env
# Set resource requirements, if provided
# Also ensure that cpu/memory are strings not integers
if run_config.cpu:
taskdef["cpu"] = str(run_config.cpu)
elif "cpu" in taskdef:
taskdef["cpu"] = str(taskdef["cpu"])
if run_config.memory:
taskdef["memory"] = str(run_config.memory)
elif "memory" in taskdef:
taskdef["memory"] = str(taskdef["memory"])
return taskdef
def get_run_task_kwargs(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Dict[str, Any]:
"""Generate kwargs to pass to `ECS.client.run_task` for a flow run
Args:
- flow_run (GraphQLResult): A flow run object
- run_config (ECSRun): The flow's run config
Returns:
- dict: kwargs to pass to `ECS.client.run_task`
"""
# Set agent defaults
out = deepcopy(self.run_task_kwargs)
if self.launch_type:
out["launchType"] = self.launch_type
if self.cluster:
out["cluster"] = self.cluster
# Apply run-config kwargs, if any
if run_config.run_task_kwargs:
out = merge_run_task_kwargs(out, run_config.run_task_kwargs)
# Find or create the flow container overrides
overrides = out.setdefault("overrides", {})
container_overrides = overrides.setdefault("containerOverrides", [])
for container in container_overrides:
if container.get("name") == "flow":
break
else:
container = {"name": "flow"}
container_overrides.append(container)
# Populate environment variables from the following sources,
# with precedence:
# - Dynamic values required for flow execution, hardcoded below
# - Values set on the ECSRun object
# - Values set using the `--env` CLI flag on the agent
env = self.env_vars.copy()
if run_config.env:
env.update(run_config.env)
env.update(
{
"PREFECT__CLOUD__API": config.cloud.api,
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id,
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id,
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
"PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
}
)
container_env = [{"name": k, "value": v} for k, v in env.items()]
for entry in container.get("environment", []):
if entry["name"] not in env:
container_env.append(entry)
container["environment"] = container_env
return out
| 39.476099 | 98 | 0.608108 | import os
from copy import deepcopy
from typing import Iterable, Dict, Optional, Any
import slugify
import yaml
from prefect import config
from prefect.agent import Agent
from prefect.run_configs import ECSRun
from prefect.utilities.agent import get_flow_image, get_flow_run_command
from prefect.utilities.filesystems import read_bytes_from_path
from prefect.utilities.graphql import GraphQLResult
DEFAULT_TASK_DEFINITION_PATH = os.path.join(
os.path.dirname(__file__), "task_definition.yaml"
)
def merge_run_task_kwargs(opts1: dict, opts2: dict) -> dict:
out = deepcopy(opts1)
for k, v in opts2.items():
if k != "overrides":
out[k] = v
overrides = opts2.get("overrides", {})
if overrides:
out_overrides = out.setdefault("overrides", {})
for k, v in overrides.items():
if k != "containerOverrides":
out_overrides[k] = v
container_overrides = overrides.get("containerOverrides")
if container_overrides:
out_container_overrides = out_overrides.setdefault("containerOverrides", [])
for entry in container_overrides:
for out_entry in out_container_overrides:
if out_entry.get("name") == entry.get("name"):
out_entry.update(entry)
break
else:
out_container_overrides.append(entry)
return out
class ECSAgent(Agent):
def __init__(
self,
agent_config_id: str = None,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
agent_address: str = None,
no_cloud_logs: bool = False,
task_definition_path: str = None,
run_task_kwargs_path: str = None,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
aws_session_token: str = None,
region_name: str = None,
cluster: str = None,
launch_type: str = None,
task_role_arn: str = None,
botocore_config: dict = None,
) -> None:
super().__init__(
agent_config_id=agent_config_id,
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
no_cloud_logs=no_cloud_logs,
)
from botocore.config import Config
from prefect.utilities.aws import get_boto_client
self.cluster = cluster
self.launch_type = launch_type.upper() if launch_type else "FARGATE"
self.task_role_arn = task_role_arn
boto_config = Config(**botocore_config or {})
if not boto_config.retries:
boto_config.retries = {"mode": "standard"}
self.boto_kwargs = dict(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
config=boto_config,
)
self.ecs_client = get_boto_client("ecs", **self.boto_kwargs)
self.rgtag_client = get_boto_client(
"resourcegroupstaggingapi", **self.boto_kwargs
)
if not task_definition_path:
task_definition_path = DEFAULT_TASK_DEFINITION_PATH
try:
self.task_definition = yaml.safe_load(
read_bytes_from_path(task_definition_path)
)
except Exception:
self.logger.error(
"Failed to load default task definition from %r",
task_definition_path,
exc_info=True,
)
raise
if run_task_kwargs_path:
try:
self.run_task_kwargs = yaml.safe_load(
read_bytes_from_path(run_task_kwargs_path)
)
except Exception:
self.logger.error(
"Failed to load default `run_task` kwargs from %r",
run_task_kwargs_path,
exc_info=True,
)
raise
else:
self.run_task_kwargs = {}
if self.task_role_arn:
self.task_definition["taskRoleArn"] = self.task_role_arn
# If running on fargate, auto-configure `networkConfiguration` for the
# user if they didn't configure it themselves.
if self.launch_type == "FARGATE" and not self.run_task_kwargs.get(
"networkConfiguration"
):
self.run_task_kwargs[
"networkConfiguration"
] = self.infer_network_configuration()
def infer_network_configuration(self) -> dict:
from prefect.utilities.aws import get_boto_client
self.logger.debug("Inferring default `networkConfiguration`...")
ec2 = get_boto_client("ec2", **self.boto_kwargs)
vpcs = ec2.describe_vpcs(Filters=[{"Name": "isDefault", "Values": ["true"]}])[
"Vpcs"
]
if vpcs:
vpc_id = vpcs[0]["VpcId"]
subnets = ec2.describe_subnets(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["Subnets"]
if subnets:
config = {
"awsvpcConfiguration": {
"subnets": [s["SubnetId"] for s in subnets],
"assignPublicIp": "ENABLED",
}
}
self.logger.debug("Using networkConfiguration=%r", config)
return config
msg = (
"Failed to infer default networkConfiguration, please explicitly "
"configure using `--run-task-kwargs`"
)
self.logger.error(msg)
raise ValueError(msg)
def deploy_flow(self, flow_run: GraphQLResult) -> str:
self.logger.info("Deploying flow run %r", flow_run.id)
run_config = self._get_run_config(flow_run, ECSRun)
assert isinstance(run_config, ECSRun)
taskdef_arn = self.get_task_definition_arn(flow_run, run_config)
if taskdef_arn is None:
self.logger.debug(
"Registering new task definition for flow %s", flow_run.flow.id
)
taskdef = self.generate_task_definition(flow_run, run_config)
resp = self.ecs_client.register_task_definition(**taskdef)
taskdef_arn = resp["taskDefinition"]["taskDefinitionArn"]
self.logger.debug(
"Registered task definition %s for flow %s",
taskdef_arn,
flow_run.flow.id,
)
else:
self.logger.debug(
"Using task definition %s for flow %s", taskdef_arn, flow_run.flow.id
)
kwargs = self.get_run_task_kwargs(flow_run, run_config)
resp = self.ecs_client.run_task(taskDefinition=taskdef_arn, **kwargs)
if resp.get("tasks"):
task_arn = resp["tasks"][0]["taskArn"]
self.logger.debug("Started task %r for flow run %r", task_arn, flow_run.id)
return f"Task {task_arn}"
raise ValueError(
"Failed to start task for flow run {0}. Failures: {1}".format(
flow_run.id, resp.get("failures")
)
)
def get_task_definition_tags(self, flow_run: GraphQLResult) -> dict:
return {
"prefect:flow-id": flow_run.flow.id,
"prefect:flow-version": str(flow_run.flow.version),
}
def get_task_definition_arn(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Optional[str]:
if run_config.task_definition_arn is not None:
return run_config.task_definition_arn
tags = self.get_task_definition_tags(flow_run)
from botocore.exceptions import ClientError
try:
res = self.rgtag_client.get_resources(
TagFilters=[{"Key": k, "Values": [v]} for k, v in tags.items()],
ResourceTypeFilters=["ecs:task-definition"],
)
if res["ResourceTagMappingList"]:
return res["ResourceTagMappingList"][0]["ResourceARN"]
return None
except ClientError:
return None
def generate_task_definition(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Dict[str, Any]:
if run_config.task_definition:
taskdef = deepcopy(run_config.task_definition)
elif run_config.task_definition_path:
self.logger.debug(
"Loading task definition template from %r",
run_config.task_definition_path,
)
template_bytes = read_bytes_from_path(run_config.task_definition_path)
taskdef = yaml.safe_load(template_bytes)
else:
taskdef = deepcopy(self.task_definition)
slug = slugify.slugify(
flow_run.flow.name,
max_length=255 - len("prefect-"),
word_boundary=True,
save_order=True,
)
family = f"prefect-{slug}"
tags = self.get_task_definition_tags(flow_run)
taskdef["family"] = family
taskdef_tags = [{"key": k, "value": v} for k, v in tags.items()]
for entry in taskdef.get("tags", []):
if entry["key"] not in tags:
taskdef_tags.append(entry)
taskdef["tags"] = taskdef_tags
containers = taskdef.setdefault("containerDefinitions", [])
for container in containers:
if container.get("name") == "flow":
break
else:
container = {"name": "flow"}
containers.append(container)
# Set flow image
container["image"] = image = get_flow_image(flow_run)
# Set flow run command
container["command"] = ["/bin/sh", "-c", get_flow_run_command(flow_run)]
# Set taskRoleArn if configured
if run_config.task_role_arn:
taskdef["taskRoleArn"] = run_config.task_role_arn
# Populate static environment variables from the following sources,
# with precedence:
# - Static environment variables, hardcoded below
# - Values in the task definition template
env = {
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__CONTEXT__IMAGE": image,
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
}
container_env = [{"name": k, "value": v} for k, v in env.items()]
for entry in container.get("environment", []):
if entry["name"] not in env:
container_env.append(entry)
container["environment"] = container_env
# Set resource requirements, if provided
# Also ensure that cpu/memory are strings not integers
if run_config.cpu:
taskdef["cpu"] = str(run_config.cpu)
elif "cpu" in taskdef:
taskdef["cpu"] = str(taskdef["cpu"])
if run_config.memory:
taskdef["memory"] = str(run_config.memory)
elif "memory" in taskdef:
taskdef["memory"] = str(taskdef["memory"])
return taskdef
def get_run_task_kwargs(
self, flow_run: GraphQLResult, run_config: ECSRun
) -> Dict[str, Any]:
# Set agent defaults
out = deepcopy(self.run_task_kwargs)
if self.launch_type:
out["launchType"] = self.launch_type
if self.cluster:
out["cluster"] = self.cluster
# Apply run-config kwargs, if any
if run_config.run_task_kwargs:
out = merge_run_task_kwargs(out, run_config.run_task_kwargs)
# Find or create the flow container overrides
overrides = out.setdefault("overrides", {})
container_overrides = overrides.setdefault("containerOverrides", [])
for container in container_overrides:
if container.get("name") == "flow":
break
else:
container = {"name": "flow"}
container_overrides.append(container)
# Populate environment variables from the following sources,
# with precedence:
# - Dynamic values required for flow execution, hardcoded below
# - Values set on the ECSRun object
# - Values set using the `--env` CLI flag on the agent
env = self.env_vars.copy()
if run_config.env:
env.update(run_config.env)
env.update(
{
"PREFECT__CLOUD__API": config.cloud.api,
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id,
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id,
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
"PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
}
)
container_env = [{"name": k, "value": v} for k, v in env.items()]
for entry in container.get("environment", []):
if entry["name"] not in env:
container_env.append(entry)
container["environment"] = container_env
return out
| true | true |
1c31d3cee567475068402dd59b3d0cd44159485b | 2,933 | py | Python | students/models.py | harshalgalgale/alumni | 52d8ecb8186abe5659f28a6555c069ec96f25269 | [
"Apache-2.0"
] | null | null | null | students/models.py | harshalgalgale/alumni | 52d8ecb8186abe5659f28a6555c069ec96f25269 | [
"Apache-2.0"
] | null | null | null | students/models.py | harshalgalgale/alumni | 52d8ecb8186abe5659f28a6555c069ec96f25269 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
# Create your models here.
GENDER = [
('M', 'Male'),
('F', 'Female'),
]
DEGREE = [
('btech', 'B.Tech'),
('mtech', 'M.Tech'),
('phd', 'PhD'),
]
DEPARTMENT = [
('ape', 'APE'),
('fmp', 'FMP'),
('ide', 'IDE'),
('swce', 'SWCE'),
]
YEAR_CHOICES = [(r, r) for r in range(1969, datetime.today().year + 1)]
MIN_YEAR = 1900
def get_default_reg_no():
count = Student.objects.all().count()
return f'DRNo_{count}'
class Student(models.Model):
reg_no = models.CharField(help_text='Student id', max_length=20, default=get_default_reg_no)
birth_date = models.DateField(null=True, blank=True)
first_name = models.CharField(help_text='First name', max_length=150)
middle_name = models.CharField(help_text='Middle name', max_length=150, null=True, blank=True)
last_name = models.CharField(help_text='Last name', max_length=150)
degree = models.CharField(help_text='Passing degree', choices=DEGREE, max_length=10)
department = models.CharField(help_text='Department', choices=DEPARTMENT, max_length=10, null=True, blank=True)
reg_year = models.IntegerField(help_text='Registration year',
validators=[MinValueValidator(MIN_YEAR), MaxValueValidator(datetime.now().year)],
default=datetime.now().year)
pass_year = models.IntegerField(help_text='Graduation year',
validators=[MinValueValidator(MIN_YEAR), MaxValueValidator(datetime.now().year)],
default=datetime.now().year)
gender = models.CharField(choices=GENDER, max_length=1, null=True, blank=True)
class Meta:
verbose_name = 'Registered Student'
verbose_name_plural = 'Registered Students'
ordering = ['reg_no', 'birth_date', 'first_name', 'last_name']
unique_together = ['reg_no', 'degree', 'reg_year']
def __str__(self):
return f'{self.reg_no} : {self.get_short_name()} : {self.degree} : {self.reg_year}'
def get_full_name(self):
full_name = f'{self.first_name} {self.middle_name} {self.last_name}'
return full_name.strip()
def get_short_name(self):
return f'{self.first_name} {self.last_name}'
# class PassedStudent(models.Model):
# student = models.OneToOneField(Student, on_delete=models.CASCADE)
# year = models.IntegerField(help_text='Passing year',
# validators=[MinValueValidator(MIN_YEAR), MaxValueValidator(datetime.now().year)],
# default=datetime.now().year)
#
# class Meta:
# verbose_name = 'Graduation'
# verbose_name_plural = 'Graduation'
# ordering = ['student']
#
# def __str__(self):
# return f'{self.student} : {self.year}'
| 38.090909 | 116 | 0.637572 | from datetime import datetime
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
GENDER = [
('M', 'Male'),
('F', 'Female'),
]
DEGREE = [
('btech', 'B.Tech'),
('mtech', 'M.Tech'),
('phd', 'PhD'),
]
DEPARTMENT = [
('ape', 'APE'),
('fmp', 'FMP'),
('ide', 'IDE'),
('swce', 'SWCE'),
]
YEAR_CHOICES = [(r, r) for r in range(1969, datetime.today().year + 1)]
MIN_YEAR = 1900
def get_default_reg_no():
count = Student.objects.all().count()
return f'DRNo_{count}'
class Student(models.Model):
reg_no = models.CharField(help_text='Student id', max_length=20, default=get_default_reg_no)
birth_date = models.DateField(null=True, blank=True)
first_name = models.CharField(help_text='First name', max_length=150)
middle_name = models.CharField(help_text='Middle name', max_length=150, null=True, blank=True)
last_name = models.CharField(help_text='Last name', max_length=150)
degree = models.CharField(help_text='Passing degree', choices=DEGREE, max_length=10)
department = models.CharField(help_text='Department', choices=DEPARTMENT, max_length=10, null=True, blank=True)
reg_year = models.IntegerField(help_text='Registration year',
validators=[MinValueValidator(MIN_YEAR), MaxValueValidator(datetime.now().year)],
default=datetime.now().year)
pass_year = models.IntegerField(help_text='Graduation year',
validators=[MinValueValidator(MIN_YEAR), MaxValueValidator(datetime.now().year)],
default=datetime.now().year)
gender = models.CharField(choices=GENDER, max_length=1, null=True, blank=True)
class Meta:
verbose_name = 'Registered Student'
verbose_name_plural = 'Registered Students'
ordering = ['reg_no', 'birth_date', 'first_name', 'last_name']
unique_together = ['reg_no', 'degree', 'reg_year']
def __str__(self):
return f'{self.reg_no} : {self.get_short_name()} : {self.degree} : {self.reg_year}'
def get_full_name(self):
full_name = f'{self.first_name} {self.middle_name} {self.last_name}'
return full_name.strip()
def get_short_name(self):
return f'{self.first_name} {self.last_name}'
| true | true |
1c31d430e14dd7191000e06cc94f70c42c3a4692 | 8,852 | py | Python | pyzscaler/zpa/app_segments.py | mitchos/pyZscaler | c4c83345bf26e2883b9555d20a90f9387a45e05e | [
"MIT"
] | 16 | 2021-07-09T00:20:31.000Z | 2022-02-17T19:29:26.000Z | pyzscaler/zpa/app_segments.py | mitchos/pyZscaler | c4c83345bf26e2883b9555d20a90f9387a45e05e | [
"MIT"
] | 62 | 2021-07-21T03:42:09.000Z | 2022-03-18T09:08:20.000Z | pyzscaler/zpa/app_segments.py | mitchos/pyZscaler | c4c83345bf26e2883b9555d20a90f9387a45e05e | [
"MIT"
] | 8 | 2021-09-11T08:14:53.000Z | 2022-03-25T20:14:41.000Z | from box import Box, BoxList
from restfly.endpoint import APIEndpoint
from pyzscaler.utils import Iterator, add_id_groups, convert_keys, snake_to_camel
class AppSegmentsAPI(APIEndpoint):
# Params that need reformatting
reformat_params = [
("clientless_app_ids", "clientlessApps"),
("server_group_ids", "serverGroups"),
]
def list_segments(self, **kwargs) -> BoxList:
"""
Retrieve all configured application segments.
Returns:
:obj:`BoxList`: List of application segments.
Examples:
>>> app_segments = zpa.app_segments.list_segments()
"""
return BoxList(Iterator(self._api, "application", **kwargs))
def get_segment(self, segment_id: str) -> Box:
"""
Get information for an application segment.
Args:
segment_id (str):
The unique identifier for the application segment.
Returns:
:obj:`Box`: The application segment resource record.
Examples:
>>> app_segment = zpa.app_segments.details('99999')
"""
return self._get(f"application/{segment_id}")
def delete_segment(self, segment_id: str) -> int:
"""
Delete an application segment.
Args:
segment_id (str):
The unique identifier for the application segment.
Returns:
:obj:`int`: The operation response code.
Examples:
>>> zpa.app_segments.delete('99999')
"""
return self._delete(f"application/{segment_id}").status_code
def add_segment(
self,
name: str,
domain_names: list,
segment_group_id: str,
server_group_ids: list,
tcp_ports: str = None,
udp_ports: str = None,
**kwargs,
) -> Box:
"""
Create an application segment.
Args:
segment_group_id (str):
The unique identifer for the segment group this application segment belongs to.
udp_ports (:obj:`list` of :obj:`str`):
List of udp port range pairs, e.g. ['35000', '35000'] for port 35000.
tcp_ports (:obj:`list` of :obj:`str`):
List of tcp port range pairs, e.g. ['22', '22'] for port 22-22, ['80', '100'] for 80-100.
domain_names (:obj:`list` of :obj:`str`):
List of domain names or IP addresses for the application segment.
name (str):
The name of the application segment.
server_group_ids (:obj:`list` of :obj:`str`):
The list of server group IDs that belong to this application segment.
**kwargs:
Optional keyword args.
Keyword Args:
bypass_type (str):
The type of bypass for the Application Segment. Accepted values are `ALWAYS`, `NEVER` and `ON_NET`.
clientless_app_ids (:obj:`list`):
List of unique IDs for clientless apps to associate with this Application Segment.
config_space (str):
The config space for this Application Segment. Accepted values are `DEFAULT` and `SIEM`.
default_idle_timeout (int):
The Default Idle Timeout for the Application Segment.
default_max_age (int):
The Default Max Age for the Application Segment.
description (str):
Additional information about this Application Segment.
double_encrypt (bool):
Double Encrypt the Application Segment micro-tunnel.
enabled (bool):
Enable the Application Segment.
health_check_type (str):
Set the Health Check Type. Accepted values are `DEFAULT` and `NONE`.
health_reporting (str):
Set the Health Reporting. Accepted values are `NONE`, `ON_ACCESS` and `CONTINUOUS`.
ip_anchored (bool):
Enable IP Anchoring for this Application Segment.
is_cname_enabled (bool):
Enable CNAMEs for this Application Segment.
passive_health_enabled (bool):
Enable Passive Health Checks for this Application Segment.
Returns:
:obj:`Box`: The newly created application segment resource record.
Examples:
Add a new application segment for example.com, ports 8080-8085.
>>> zpa.app_segments.add_segment('new_app_segment',
... domain_names=['example.com'],
... segment_group_id='99999',
... tcp_ports=['8080', '8085'],
... server_group_ids=['99999', '88888'])
"""
# Initialise payload
payload = {
"name": name,
"domainNames": domain_names,
"tcpPortRanges": tcp_ports,
"udpPortRanges": udp_ports,
"segmentGroupId": segment_group_id,
"serverGroups": [{"id": group_id} for group_id in server_group_ids],
}
add_id_groups(self.reformat_params, kwargs, payload)
# Add optional parameters to payload
for key, value in kwargs.items():
payload[snake_to_camel(key)] = value
return self._post("application", json=payload)
def update_segment(self, segment_id: str, **kwargs) -> Box:
"""
Update an application segment.
Args:
segment_id (str):
The unique identifier for the application segment.
**kwargs:
Optional params.
Keyword Args:
bypass_type (str):
The type of bypass for the Application Segment. Accepted values are `ALWAYS`, `NEVER` and `ON_NET`.
clientless_app_ids (:obj:`list`):
List of unique IDs for clientless apps to associate with this Application Segment.
config_space (str):
The config space for this Application Segment. Accepted values are `DEFAULT` and `SIEM`.
default_idle_timeout (int):
The Default Idle Timeout for the Application Segment.
default_max_age (int):
The Default Max Age for the Application Segment.
description (str):
Additional information about this Application Segment.
domain_names (:obj:`list` of :obj:`str`):
List of domain names or IP addresses for the application segment.
double_encrypt (bool):
Double Encrypt the Application Segment micro-tunnel.
enabled (bool):
Enable the Application Segment.
health_check_type (str):
Set the Health Check Type. Accepted values are `DEFAULT` and `NONE`.
health_reporting (str):
Set the Health Reporting. Accepted values are `NONE`, `ON_ACCESS` and `CONTINUOUS`.
ip_anchored (bool):
Enable IP Anchoring for this Application Segment.
is_cname_enabled (bool):
Enable CNAMEs for this Application Segment.
name (str):
The name of the application segment.
passive_health_enabled (bool):
Enable Passive Health Checks for this Application Segment.
segment_group_id (str):
The unique identifer for the segment group this application segment belongs to.
server_group_ids (:obj:`list` of :obj:`str`):
The list of server group IDs that belong to this application segment.
tcp_ports (:obj:`list` of :obj:`str`):
List of tcp port range pairs, e.g. ['22', '22'] for port 22-22, ['80', '100'] for 80-100.
udp_ports (:obj:`list` of :obj:`str`):
List of udp port range pairs, e.g. ['35000', '35000'] for port 35000.
Returns:
:obj:`Box`: The updated application segment resource record.
Examples:
Rename the application segment for example.com.
>>> zpa.app_segments.update('99999',
... name='new_app_name',
"""
# Set payload to value of existing record and recursively convert nested dict keys from snake_case
# to camelCase.
payload = convert_keys(self.get_segment(segment_id))
# Reformat keys that we've simplified for our users
add_id_groups(self.reformat_params, kwargs, payload)
# Add optional parameters to payload
for key, value in kwargs.items():
payload[snake_to_camel(key)] = value
resp = self._put(f"application/{segment_id}", json=payload).status_code
# Return the object if it was updated successfully
if resp == 204:
return self.get_segment(segment_id)
| 38.995595 | 115 | 0.585969 | from box import Box, BoxList
from restfly.endpoint import APIEndpoint
from pyzscaler.utils import Iterator, add_id_groups, convert_keys, snake_to_camel
class AppSegmentsAPI(APIEndpoint):
reformat_params = [
("clientless_app_ids", "clientlessApps"),
("server_group_ids", "serverGroups"),
]
def list_segments(self, **kwargs) -> BoxList:
return BoxList(Iterator(self._api, "application", **kwargs))
def get_segment(self, segment_id: str) -> Box:
return self._get(f"application/{segment_id}")
def delete_segment(self, segment_id: str) -> int:
return self._delete(f"application/{segment_id}").status_code
def add_segment(
self,
name: str,
domain_names: list,
segment_group_id: str,
server_group_ids: list,
tcp_ports: str = None,
udp_ports: str = None,
**kwargs,
) -> Box:
payload = {
"name": name,
"domainNames": domain_names,
"tcpPortRanges": tcp_ports,
"udpPortRanges": udp_ports,
"segmentGroupId": segment_group_id,
"serverGroups": [{"id": group_id} for group_id in server_group_ids],
}
add_id_groups(self.reformat_params, kwargs, payload)
for key, value in kwargs.items():
payload[snake_to_camel(key)] = value
return self._post("application", json=payload)
def update_segment(self, segment_id: str, **kwargs) -> Box:
payload = convert_keys(self.get_segment(segment_id))
add_id_groups(self.reformat_params, kwargs, payload)
# Add optional parameters to payload
for key, value in kwargs.items():
payload[snake_to_camel(key)] = value
resp = self._put(f"application/{segment_id}", json=payload).status_code
# Return the object if it was updated successfully
if resp == 204:
return self.get_segment(segment_id)
| true | true |
1c31d499886ee8af2326cc361c43981590242571 | 215 | py | Python | genome/genome/doctype/diseases/test_diseases.py | Havenir/genome | 249469e7743af7cab05938c13c2fbc1576b75233 | [
"MIT"
] | 1 | 2020-12-24T04:52:06.000Z | 2020-12-24T04:52:06.000Z | genome/genome/doctype/diseases/test_diseases.py | Havenir/genome | 249469e7743af7cab05938c13c2fbc1576b75233 | [
"MIT"
] | null | null | null | genome/genome/doctype/diseases/test_diseases.py | Havenir/genome | 249469e7743af7cab05938c13c2fbc1576b75233 | [
"MIT"
] | 4 | 2020-09-17T06:05:31.000Z | 2021-03-04T06:23:40.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestDiseases(unittest.TestCase):
pass
| 19.545455 | 55 | 0.772093 |
from __future__ import unicode_literals
import frappe
import unittest
class TestDiseases(unittest.TestCase):
pass
| true | true |
1c31d63a94965d0f41ae0edd1005c4fcf1cd168f | 814 | py | Python | internal/deprecated/internal/preprocessing/patch_preprocess.py | lucasace/caer | e077a81e8d5bb3d38039ff9289a93996b1133411 | [
"MIT"
] | null | null | null | internal/deprecated/internal/preprocessing/patch_preprocess.py | lucasace/caer | e077a81e8d5bb3d38039ff9289a93996b1133411 | [
"MIT"
] | null | null | null | internal/deprecated/internal/preprocessing/patch_preprocess.py | lucasace/caer | e077a81e8d5bb3d38039ff9289a93996b1133411 | [
"MIT"
] | 1 | 2021-01-01T10:37:55.000Z | 2021-01-01T10:37:55.000Z | # _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++, Cuda
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2021 The Caer Authors <http://github.com/jasmcaus>
from ._patches import extract_patches_2d
__all__ = [
'PatchPreprocess'
]
class PatchPreprocess:
def __init__(self, width, height):
self.width = width
self.height = height
def patch_preprocess(self, image):
return extract_patches_2d(image, (self.height, self.width), max_patches=1)[0] | 30.148148 | 85 | 0.593366 |
from ._patches import extract_patches_2d
__all__ = [
'PatchPreprocess'
]
class PatchPreprocess:
def __init__(self, width, height):
self.width = width
self.height = height
def patch_preprocess(self, image):
return extract_patches_2d(image, (self.height, self.width), max_patches=1)[0] | true | true |
1c31d678f356b6d1601126ce5cbf097e2f52d9ea | 4,956 | py | Python | be/.metadata/.plugins/org.eclipse.wildwebdeveloper.embedder.node/node-v14.15.4-win-x64/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_gyp.py | parksey/- | 9d68e32781e4880f7c7230831908e3a208fc1751 | [
"MIT"
] | 4 | 2020-11-25T12:51:46.000Z | 2022-03-23T15:58:08.000Z | be/.metadata/.plugins/org.eclipse.wildwebdeveloper.embedder.node/node-v14.15.4-win-x64/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_gyp.py | parksey/- | 9d68e32781e4880f7c7230831908e3a208fc1751 | [
"MIT"
] | null | null | null | be/.metadata/.plugins/org.eclipse.wildwebdeveloper.embedder.node/node-v14.15.4-win-x64/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_gyp.py | parksey/- | 9d68e32781e4880f7c7230831908e3a208fc1751 | [
"MIT"
] | 1 | 2020-11-25T12:51:49.000Z | 2020-11-25T12:51:49.000Z | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
from __future__ import print_function
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print(line)
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print(" " * (basic_offset * indent) + line)
indent += brace_diff
else:
indent += brace_diff
print(" " * (basic_offset * indent) + line)
else:
print(" " * (basic_offset * indent) + line)
else:
print("")
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| 31.367089 | 81 | 0.624496 |
from __future__ import print_function
import sys
import re
COMMENT_RE = re.compile(r'\s*
# Regex to remove quoted strings when we're counting braces.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print(line)
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print(" " * (basic_offset * indent) + line)
indent += brace_diff
else:
indent += brace_diff
print(" " * (basic_offset * indent) + line)
else:
print(" " * (basic_offset * indent) + line)
else:
print("")
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
| true | true |
1c31d7df3aa58c7a4c130b8a6891261ed6481aed | 522 | py | Python | launch.py | fanzhe98/FaceRecoCamera | 3a5b5018e26e07a98036ca12e79d263e23e154f8 | [
"MIT"
] | null | null | null | launch.py | fanzhe98/FaceRecoCamera | 3a5b5018e26e07a98036ca12e79d263e23e154f8 | [
"MIT"
] | null | null | null | launch.py | fanzhe98/FaceRecoCamera | 3a5b5018e26e07a98036ca12e79d263e23e154f8 | [
"MIT"
] | 1 | 2018-10-20T02:09:45.000Z | 2018-10-20T02:09:45.000Z | from flask import Flask, render_template
import os
app = Flask(__name__)
@app.route("/")
def hello():
DIRECTORY = "../yilun/static/Portrait/"
nameList = []
pathList = []
for f in os.listdir(DIRECTORY):
if os.path.splitext(f)[1].lower() in ('.jpg', '.jpeg'):
nameList.append(f.split(".")[0])
pathList.append('/static/Portrait/'+ f.split(".")[0] + '.jpg')
return render_template('Test.html',var1 = nameList, var2 = pathList)
if __name__ == '__main__':
app.run()
| 23.727273 | 74 | 0.595785 | from flask import Flask, render_template
import os
app = Flask(__name__)
@app.route("/")
def hello():
DIRECTORY = "../yilun/static/Portrait/"
nameList = []
pathList = []
for f in os.listdir(DIRECTORY):
if os.path.splitext(f)[1].lower() in ('.jpg', '.jpeg'):
nameList.append(f.split(".")[0])
pathList.append('/static/Portrait/'+ f.split(".")[0] + '.jpg')
return render_template('Test.html',var1 = nameList, var2 = pathList)
if __name__ == '__main__':
app.run()
| true | true |
1c31d870a381eeddacc43630762e03a91786e791 | 1,980 | py | Python | tests/test_frequency.py | kyokley/git-hammer | e72aa0f8cee4ab5b30ba2502be936660c1385854 | [
"Apache-2.0"
] | 117 | 2019-01-21T09:40:26.000Z | 2022-01-19T10:04:09.000Z | tests/test_frequency.py | kyokley/git-hammer | e72aa0f8cee4ab5b30ba2502be936660c1385854 | [
"Apache-2.0"
] | 28 | 2019-01-21T15:26:41.000Z | 2021-04-12T22:13:01.000Z | tests/test_frequency.py | kyokley/git-hammer | e72aa0f8cee4ab5b30ba2502be936660c1385854 | [
"Apache-2.0"
] | 6 | 2019-05-12T07:03:23.000Z | 2020-04-29T06:17:14.000Z | import datetime
import unittest
from githammer import Frequency
class FrequencyTest(unittest.TestCase):
def setUp(self) -> None:
print()
print(self.id())
self.initial_date = datetime.datetime(2019, 10, 10, 10, 10, 10, tzinfo=datetime.timezone.utc)
self.year_start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.year_start_week_date = datetime.datetime(2019, 1, 7, 0, 0, 0, tzinfo=datetime.timezone.utc)
def test_correct_start_of_interval(self):
self.assertEqual(Frequency.daily.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 10, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.weekly.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 7, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.monthly.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.yearly.start_of_interval(self.initial_date),
datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
def test_correct_next_instance(self):
self.assertEqual(Frequency.daily.next_instance(self.year_start_date),
datetime.datetime(2019, 1, 2, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.weekly.next_instance(self.year_start_week_date),
datetime.datetime(2019, 1, 14, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.monthly.next_instance(self.year_start_date),
datetime.datetime(2019, 2, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.yearly.next_instance(self.year_start_date),
datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
| 58.235294 | 104 | 0.664646 | import datetime
import unittest
from githammer import Frequency
class FrequencyTest(unittest.TestCase):
def setUp(self) -> None:
print()
print(self.id())
self.initial_date = datetime.datetime(2019, 10, 10, 10, 10, 10, tzinfo=datetime.timezone.utc)
self.year_start_date = datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
self.year_start_week_date = datetime.datetime(2019, 1, 7, 0, 0, 0, tzinfo=datetime.timezone.utc)
def test_correct_start_of_interval(self):
self.assertEqual(Frequency.daily.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 10, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.weekly.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 7, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.monthly.start_of_interval(self.initial_date),
datetime.datetime(2019, 10, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.yearly.start_of_interval(self.initial_date),
datetime.datetime(2019, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
def test_correct_next_instance(self):
self.assertEqual(Frequency.daily.next_instance(self.year_start_date),
datetime.datetime(2019, 1, 2, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.weekly.next_instance(self.year_start_week_date),
datetime.datetime(2019, 1, 14, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.monthly.next_instance(self.year_start_date),
datetime.datetime(2019, 2, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
self.assertEqual(Frequency.yearly.next_instance(self.year_start_date),
datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc))
| true | true |
1c31d8afda384495bd9a283d63ddb431e592d18f | 157 | py | Python | mdpreview/util.py | kevr/vim-mdpreview | c6b0baeeacfceb279e1323307095c0c2f9d2de8d | [
"MIT"
] | 1 | 2021-07-07T00:51:19.000Z | 2021-07-07T00:51:19.000Z | mdpreview/util.py | kevr/vim-mdpreview | c6b0baeeacfceb279e1323307095c0c2f9d2de8d | [
"MIT"
] | null | null | null | mdpreview/util.py | kevr/vim-mdpreview | c6b0baeeacfceb279e1323307095c0c2f9d2de8d | [
"MIT"
] | null | null | null | import os
home = os.environ.get("HOME")
share = os.environ.get("MDPREVIEW_PATH",
os.path.join(home, ".local", "share", "mdpreview"))
| 26.166667 | 74 | 0.585987 | import os
home = os.environ.get("HOME")
share = os.environ.get("MDPREVIEW_PATH",
os.path.join(home, ".local", "share", "mdpreview"))
| true | true |
1c31db7e429a217f515b4d7c307a3c801a19678c | 4,746 | py | Python | src/executor/SegmentSplitter.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 3 | 2020-02-12T01:24:46.000Z | 2020-02-13T00:50:46.000Z | src/executor/SegmentSplitter.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 32 | 2020-02-20T10:20:56.000Z | 2022-02-10T01:42:46.000Z | src/executor/SegmentSplitter.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 1 | 2020-02-22T02:47:19.000Z | 2020-02-22T02:47:19.000Z | from ..data.VideoItem import VideoItem
from ..data.VideoFile import VideoFile
from .iExecutor import iExecutor
from ..signals.SkipSignal import SkipSignal
import re
import numpy as np
class SegmentSplitter(iExecutor):
def __init__(self, *parents, clip_length='5s', length_threshold='3s'):
super().__init__(*parents)
self.clip_len_s = SegmentSplitter.parse_time(clip_length)
self.len_thresh_s = SegmentSplitter.parse_time(length_threshold)
def split_segment(self, item):
metadata = iExecutor.get_metadata(item)
video = VideoFile(item.filepath)
# First we find the length of BBs
bbs = metadata.bb_fields.get_bbs_as_arrs()
collision_locations = metadata.bb_fields.collision_locations
if len(collision_locations) < 1:
raise SkipSignal("Item has no collision_locations")
if len(bbs) == 0:
raise SkipSignal("Item has no bounding boxes")
if metadata.start_i is None:
metadata.start_i = 0
if metadata.end_i is None:
metadata.end_i = video.true_length
dtype = [
('frame', np.int),
('id', np.int),
('class', object),
('x1', np.int),
('y1', np.int),
('x2', np.int),
('y2', np.int),
('has_collision', np.bool),
]
bbs = np.array(bbs, dtype=dtype)
collision_locations = np.sort(collision_locations)
frames = np.unique(bbs['frame'])
segments = [ ]
segments += self.create_positives(collision_locations, frames, metadata, video)
segments += self.create_negatives(segments, collision_locations, frames, metadata, video)
items = [ ]
for idx, (begin, end) in enumerate(segments):
item = metadata.clone()
item.bb_fields.crop_range(begin, end)
item.id = metadata.id + f'-{idx}'
item.start_i = begin + metadata.start_i
item.end_i = end + metadata.start_i
items.append(item)
return items
def create_positives(self, ALs, frames, metadata, video):
cover = [ ]
begin = 0
for al in ALs:
min_end = video.get_frame_after_time_elapsed(begin + metadata.start_i, self.len_thresh_s * 1000)
# Check for minimum range
if al + metadata.start_i < min_end:
continue
begin = video.get_frame_after_time_elapsed(metadata.start_i + al, -self.clip_len_s * 1000)
begin = max(0, begin - metadata.start_i)
it_begin = np.searchsorted(frames, begin)
it_end = np.searchsorted(frames, al)
it_end = min(frames.shape[0] - 1, it_end) # Prevent out of index access for ALs with no BBs
# Add coverage
cover.append((frames[it_begin], frames[it_end]))
begin = frames[it_end]
return cover
def create_negatives(self, positive_cover, ALs, frames, metadata, video):
cover = [ ]
begin = 0
end = frames[-1]
for prange in positive_cover + [(end, end)]:
end, next_begin = prange
it_begin = np.searchsorted(frames, begin)
it_end = np.searchsorted(frames, end)
total_delta = video.get_time_delta(begin + metadata.start_i, end + metadata.start_i) / 1000
n_covers = int(total_delta / self.clip_len_s)
begin = next_begin
if n_covers < 1:
continue
delta = (it_end - it_begin) / n_covers
cover_frames = [ it_begin ]
for _ in range(0, n_covers):
next_frame = video.get_frame_after_time_elapsed(cover_frames[-1], self.clip_len_s * 1000)
cover_frames.append(next_frame)
cover += [ (int(cover_frames[i]), int(cover_frames[i+1]))
for i in range(0, n_covers) ]
return cover
def parse_time(time):
pattern = r'(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?'
result = re.match(pattern, time)
if not result:
raise ValueError(f'Invalid time: {time}. Expected digit followed by [smh]')
hours, minutes, seconds = result.groups()
time_s = int(hours or 0)
time_s *= 60
time_s += int(minutes or 0)
time_s *= 60
time_s += int(seconds or 0)
if time_s <= 0:
raise ValueError(f'Invalid time: {time}. Expected a non-zero positive value')
return time_s
def run(self, item: VideoItem):
return map(
lambda mdi: VideoItem(mdi, filepath=item.filepath),
self.split_segment(item)
)
| 40.913793 | 108 | 0.578592 | from ..data.VideoItem import VideoItem
from ..data.VideoFile import VideoFile
from .iExecutor import iExecutor
from ..signals.SkipSignal import SkipSignal
import re
import numpy as np
class SegmentSplitter(iExecutor):
def __init__(self, *parents, clip_length='5s', length_threshold='3s'):
super().__init__(*parents)
self.clip_len_s = SegmentSplitter.parse_time(clip_length)
self.len_thresh_s = SegmentSplitter.parse_time(length_threshold)
def split_segment(self, item):
metadata = iExecutor.get_metadata(item)
video = VideoFile(item.filepath)
bbs = metadata.bb_fields.get_bbs_as_arrs()
collision_locations = metadata.bb_fields.collision_locations
if len(collision_locations) < 1:
raise SkipSignal("Item has no collision_locations")
if len(bbs) == 0:
raise SkipSignal("Item has no bounding boxes")
if metadata.start_i is None:
metadata.start_i = 0
if metadata.end_i is None:
metadata.end_i = video.true_length
dtype = [
('frame', np.int),
('id', np.int),
('class', object),
('x1', np.int),
('y1', np.int),
('x2', np.int),
('y2', np.int),
('has_collision', np.bool),
]
bbs = np.array(bbs, dtype=dtype)
collision_locations = np.sort(collision_locations)
frames = np.unique(bbs['frame'])
segments = [ ]
segments += self.create_positives(collision_locations, frames, metadata, video)
segments += self.create_negatives(segments, collision_locations, frames, metadata, video)
items = [ ]
for idx, (begin, end) in enumerate(segments):
item = metadata.clone()
item.bb_fields.crop_range(begin, end)
item.id = metadata.id + f'-{idx}'
item.start_i = begin + metadata.start_i
item.end_i = end + metadata.start_i
items.append(item)
return items
def create_positives(self, ALs, frames, metadata, video):
cover = [ ]
begin = 0
for al in ALs:
min_end = video.get_frame_after_time_elapsed(begin + metadata.start_i, self.len_thresh_s * 1000)
if al + metadata.start_i < min_end:
continue
begin = video.get_frame_after_time_elapsed(metadata.start_i + al, -self.clip_len_s * 1000)
begin = max(0, begin - metadata.start_i)
it_begin = np.searchsorted(frames, begin)
it_end = np.searchsorted(frames, al)
it_end = min(frames.shape[0] - 1, it_end)
cover.append((frames[it_begin], frames[it_end]))
begin = frames[it_end]
return cover
def create_negatives(self, positive_cover, ALs, frames, metadata, video):
cover = [ ]
begin = 0
end = frames[-1]
for prange in positive_cover + [(end, end)]:
end, next_begin = prange
it_begin = np.searchsorted(frames, begin)
it_end = np.searchsorted(frames, end)
total_delta = video.get_time_delta(begin + metadata.start_i, end + metadata.start_i) / 1000
n_covers = int(total_delta / self.clip_len_s)
begin = next_begin
if n_covers < 1:
continue
delta = (it_end - it_begin) / n_covers
cover_frames = [ it_begin ]
for _ in range(0, n_covers):
next_frame = video.get_frame_after_time_elapsed(cover_frames[-1], self.clip_len_s * 1000)
cover_frames.append(next_frame)
cover += [ (int(cover_frames[i]), int(cover_frames[i+1]))
for i in range(0, n_covers) ]
return cover
def parse_time(time):
pattern = r'(?:(\d+)h)?(?:(\d+)m)?(?:(\d+)s)?'
result = re.match(pattern, time)
if not result:
raise ValueError(f'Invalid time: {time}. Expected digit followed by [smh]')
hours, minutes, seconds = result.groups()
time_s = int(hours or 0)
time_s *= 60
time_s += int(minutes or 0)
time_s *= 60
time_s += int(seconds or 0)
if time_s <= 0:
raise ValueError(f'Invalid time: {time}. Expected a non-zero positive value')
return time_s
def run(self, item: VideoItem):
return map(
lambda mdi: VideoItem(mdi, filepath=item.filepath),
self.split_segment(item)
)
| true | true |
1c31dbe58f6a8b1be16852af637bf05ffbc5739f | 1,277 | py | Python | lizard_ext/lizardcpre.py | BjrnJhsn/lizard | 5c3f02b67f72f70f4dbdbd2e97249e0ec20d40fa | [
"MIT"
] | 1,255 | 2015-01-07T20:24:45.000Z | 2022-03-31T02:39:50.000Z | lizard_ext/lizardcpre.py | BjrnJhsn/lizard | 5c3f02b67f72f70f4dbdbd2e97249e0ec20d40fa | [
"MIT"
] | 293 | 2015-01-05T14:31:16.000Z | 2022-03-24T18:12:16.000Z | lizard_ext/lizardcpre.py | sider/lizard | 61ad3c1f9989280dfd4157c337e70e08174f7c34 | [
"MIT"
] | 217 | 2015-01-07T20:24:49.000Z | 2022-03-30T19:20:21.000Z | '''
This is an extension of lizard,
It helps to deal with C code with preprocessors that
is hard to parse. It works by always ignoring the code
between #else and #end.
'''
class LizardExtension(object): # pylint: disable=R0903
ordering_index = 0
def __call__(self, tokens, reader):
def preprocess_tokens(tokens):
else_count = 0
if_stack = []
for token in tokens:
if token.startswith("#"):
if_stack.append(token)
else_count += token.count("#else")
if token.startswith("#endif"):
while if_stack:
last = if_stack.pop()
else_count -= last.count("#else")
if last.startswith("#if"):
break
for _ in range(token.count('\n')):
yield '\n'
elif else_count:
for _ in range(token.count('\n')):
yield '\n'
elif not (if_stack and if_stack[-1].startswith("#elif")):
yield token
if "c" not in reader.ext:
return tokens
return preprocess_tokens(tokens)
| 33.605263 | 73 | 0.477682 |
class LizardExtension(object):
ordering_index = 0
def __call__(self, tokens, reader):
def preprocess_tokens(tokens):
else_count = 0
if_stack = []
for token in tokens:
if token.startswith("#"):
if_stack.append(token)
else_count += token.count("#else")
if token.startswith("#endif"):
while if_stack:
last = if_stack.pop()
else_count -= last.count("#else")
if last.startswith("#if"):
break
for _ in range(token.count('\n')):
yield '\n'
elif else_count:
for _ in range(token.count('\n')):
yield '\n'
elif not (if_stack and if_stack[-1].startswith("#elif")):
yield token
if "c" not in reader.ext:
return tokens
return preprocess_tokens(tokens)
| true | true |
1c31dbec8103bddf1a2f3fe3c553d38c7d083aa5 | 4,020 | py | Python | src/si/util/im2col.py | Joao16am/si | 813ca373022fc5ee35eac69147b5567275718b46 | [
"Apache-2.0"
] | null | null | null | src/si/util/im2col.py | Joao16am/si | 813ca373022fc5ee35eac69147b5567275718b46 | [
"Apache-2.0"
] | null | null | null | src/si/util/im2col.py | Joao16am/si | 813ca373022fc5ee35eac69147b5567275718b46 | [
"Apache-2.0"
] | null | null | null | import numpy as np
def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride):
if not isinstance(X_shape, tuple):
raise ValueError("`X_shape` must be of type tuple")
if not isinstance(out_dim, tuple):
raise ValueError("`out_dim` must be of type tuple")
if not isinstance(kernel_shape, tuple):
raise ValueError("`kernel_shape` must be of type tuple")
if not isinstance(stride, int):
raise ValueError("`stride` must be of type int")
fr, fc = kernel_shape
out_rows, out_cols = out_dim
n_ex, in_rows, in_cols, in_ch = X_shape
pr = int((stride * (out_rows - 1) + fr - in_rows) / 2)
pc = int((stride * (out_cols - 1) + fc - in_cols) / 2)
out_rows1 = int(1 + (in_rows + 2 * pr - fr) / stride)
out_cols1 = int(1 + (in_cols + 2 * pc - fc) / stride)
# add asymmetric padding pixels to right / bottom
pr1, pr2 = pr, pr
if out_rows1 == out_rows - 1:
pr1, pr2 = pr, pr + 1
elif out_rows1 != out_rows:
raise AssertionError
pc1, pc2 = pc, pc
if out_cols1 == out_cols - 1:
pc1, pc2 = pc, pc + 1
elif out_cols1 != out_cols:
raise AssertionError
if any(np.array([pr1, pr2, pc1, pc2]) < 0):
raise ValueError(
"Padding cannot be less than 0. Got: {}".format((pr1, pr2, pc1, pc2))
)
return (pr1, pr2, pc1, pc2)
def pad2D(X, pad, kernel_shape=None, stride=None):
p = pad
if isinstance(p, int):
p = (p, p, p, p)
if isinstance(p, tuple):
if len(p) == 2:
p = (p[0], p[0], p[1], p[1])
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),
mode="constant",
constant_values=0,
)
# compute the correct padding dims for a 'same' convolution
if p == "same" and kernel_shape and stride is not None:
p = calc_pad_dims_2D(
X.shape, X.shape[1:3], kernel_shape, stride)
X_pad, p = pad2D(X, p)
return X_pad, p
def _im2col_indices(X_shape, fr, fc, p, s):
pr1, pr2, pc1, pc2 = p
n_ex, n_in, in_rows, in_cols = X_shape
out_rows = (in_rows + pr1 + pr2 - fr) // s + 1
out_cols = (in_cols + pc1 + pc2 - fc) // s + 1
if any([out_rows <= 0, out_cols <= 0]):
raise ValueError(
"Dimension mismatch during convolution: "
"out_rows = {}, out_cols = {}".format(out_rows, out_cols)
)
i0 = np.repeat(np.arange(fr), fc)
i0 = np.tile(i0, n_in)
i1 = s * np.repeat(np.arange(out_rows), out_cols)
j0 = np.tile(np.arange(fc), fr * n_in)
j1 = s * np.tile(np.arange(out_cols), out_rows)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(n_in), fr * fc).reshape(-1, 1)
return k, i, j
def im2col(X, W_shape, pad, stride):
fr, fc, n_in, n_out = W_shape
s, p = stride, pad
n_ex, in_rows, in_cols, n_in = X.shape
# zero-pad the input
X_pad, p = pad2D(X, p, W_shape[:2], stride=s)
pr1, pr2, pc1, pc2 = p
# shuffle to have channels as the first dim
X_pad = X_pad.transpose(0, 3, 1, 2)
# get the indices for im2col
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, p, s)
X_col = X_pad[:, k, i, j]
X_col = X_col.transpose(1, 2, 0).reshape(fr * fc * n_in, -1)
return X_col, p
def col2im(X_col, X_shape, W_shape, pad, stride):
s = stride
pr1, pr2, pc1, pc2 = pad
fr, fc, n_in, n_out = W_shape
n_ex, in_rows, in_cols, n_in = X_shape
X_pad = np.zeros((n_ex, n_in, in_rows + pr1 + pr2, in_cols + pc1 + pc2))
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s)
X_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)
X_col_reshaped = X_col_reshaped.transpose(2, 0, 1)
np.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return X_pad[:, :, pr1:pr2, pc1:pc2]
| 30 | 81 | 0.574129 | import numpy as np
def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride):
if not isinstance(X_shape, tuple):
raise ValueError("`X_shape` must be of type tuple")
if not isinstance(out_dim, tuple):
raise ValueError("`out_dim` must be of type tuple")
if not isinstance(kernel_shape, tuple):
raise ValueError("`kernel_shape` must be of type tuple")
if not isinstance(stride, int):
raise ValueError("`stride` must be of type int")
fr, fc = kernel_shape
out_rows, out_cols = out_dim
n_ex, in_rows, in_cols, in_ch = X_shape
pr = int((stride * (out_rows - 1) + fr - in_rows) / 2)
pc = int((stride * (out_cols - 1) + fc - in_cols) / 2)
out_rows1 = int(1 + (in_rows + 2 * pr - fr) / stride)
out_cols1 = int(1 + (in_cols + 2 * pc - fc) / stride)
pr1, pr2 = pr, pr
if out_rows1 == out_rows - 1:
pr1, pr2 = pr, pr + 1
elif out_rows1 != out_rows:
raise AssertionError
pc1, pc2 = pc, pc
if out_cols1 == out_cols - 1:
pc1, pc2 = pc, pc + 1
elif out_cols1 != out_cols:
raise AssertionError
if any(np.array([pr1, pr2, pc1, pc2]) < 0):
raise ValueError(
"Padding cannot be less than 0. Got: {}".format((pr1, pr2, pc1, pc2))
)
return (pr1, pr2, pc1, pc2)
def pad2D(X, pad, kernel_shape=None, stride=None):
p = pad
if isinstance(p, int):
p = (p, p, p, p)
if isinstance(p, tuple):
if len(p) == 2:
p = (p[0], p[0], p[1], p[1])
X_pad = np.pad(
X,
pad_width=((0, 0), (p[0], p[1]), (p[2], p[3]), (0, 0)),
mode="constant",
constant_values=0,
)
if p == "same" and kernel_shape and stride is not None:
p = calc_pad_dims_2D(
X.shape, X.shape[1:3], kernel_shape, stride)
X_pad, p = pad2D(X, p)
return X_pad, p
def _im2col_indices(X_shape, fr, fc, p, s):
pr1, pr2, pc1, pc2 = p
n_ex, n_in, in_rows, in_cols = X_shape
out_rows = (in_rows + pr1 + pr2 - fr) // s + 1
out_cols = (in_cols + pc1 + pc2 - fc) // s + 1
if any([out_rows <= 0, out_cols <= 0]):
raise ValueError(
"Dimension mismatch during convolution: "
"out_rows = {}, out_cols = {}".format(out_rows, out_cols)
)
i0 = np.repeat(np.arange(fr), fc)
i0 = np.tile(i0, n_in)
i1 = s * np.repeat(np.arange(out_rows), out_cols)
j0 = np.tile(np.arange(fc), fr * n_in)
j1 = s * np.tile(np.arange(out_cols), out_rows)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(n_in), fr * fc).reshape(-1, 1)
return k, i, j
def im2col(X, W_shape, pad, stride):
fr, fc, n_in, n_out = W_shape
s, p = stride, pad
n_ex, in_rows, in_cols, n_in = X.shape
X_pad, p = pad2D(X, p, W_shape[:2], stride=s)
pr1, pr2, pc1, pc2 = p
X_pad = X_pad.transpose(0, 3, 1, 2)
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, p, s)
X_col = X_pad[:, k, i, j]
X_col = X_col.transpose(1, 2, 0).reshape(fr * fc * n_in, -1)
return X_col, p
def col2im(X_col, X_shape, W_shape, pad, stride):
s = stride
pr1, pr2, pc1, pc2 = pad
fr, fc, n_in, n_out = W_shape
n_ex, in_rows, in_cols, n_in = X_shape
X_pad = np.zeros((n_ex, n_in, in_rows + pr1 + pr2, in_cols + pc1 + pc2))
k, i, j = _im2col_indices((n_ex, n_in, in_rows, in_cols), fr, fc, pad, s)
X_col_reshaped = X_col.reshape(n_in * fr * fc, -1, n_ex)
X_col_reshaped = X_col_reshaped.transpose(2, 0, 1)
np.add.at(X_pad, (slice(None), k, i, j), X_col_reshaped)
pr2 = None if pr2 == 0 else -pr2
pc2 = None if pc2 == 0 else -pc2
return X_pad[:, :, pr1:pr2, pc1:pc2]
| true | true |
1c31dc1b2b97667d440a1d991f7121cb424b7e94 | 921 | py | Python | research/clustering/lossy_short_deck.py | keithlee96/pluribus-poker-AI | 15e52fe73dd09570e782dd0e7b9069865eb5823d | [
"MIT"
] | 113 | 2020-08-06T15:03:18.000Z | 2022-03-31T01:56:34.000Z | research/clustering/lossy_short_deck.py | jumbokun/pluribus-poker-AI | 15e52fe73dd09570e782dd0e7b9069865eb5823d | [
"MIT"
] | null | null | null | research/clustering/lossy_short_deck.py | jumbokun/pluribus-poker-AI | 15e52fe73dd09570e782dd0e7b9069865eb5823d | [
"MIT"
] | 42 | 2020-08-17T15:51:30.000Z | 2022-03-31T17:10:44.000Z | """
Simple script for converting card combos and clusters into a dictionary where a tuple of cards are the keys
and the cluster id is the value
Cd into clustering to run, it will attempt to drop off files in clustering/data
"""
import dill as pickle
from tqdm import tqdm
with open("data/information_abstraction.pkl", "rb") as file:
data = pickle.load(file)
if __name__ == "__main__":
rounds = ["flop", "turn", "river"]
for round in rounds:
print(f"Getting indices for {round}")
card_combos = data[round]
clusters = data["_" + round + "_clusters"]
lossy_lookup = {}
for i, card_combo in enumerate(tqdm(card_combos)):
lossy_lookup[tuple(card_combo)] = clusters[i]
location = "data/" + round + "_lossy.pkl"
with open(location, "wb") as file:
pickle.dump(lossy_lookup, file)
print(f"Dumped {round} data to {location}")
| 35.423077 | 107 | 0.652552 | import dill as pickle
from tqdm import tqdm
with open("data/information_abstraction.pkl", "rb") as file:
data = pickle.load(file)
if __name__ == "__main__":
rounds = ["flop", "turn", "river"]
for round in rounds:
print(f"Getting indices for {round}")
card_combos = data[round]
clusters = data["_" + round + "_clusters"]
lossy_lookup = {}
for i, card_combo in enumerate(tqdm(card_combos)):
lossy_lookup[tuple(card_combo)] = clusters[i]
location = "data/" + round + "_lossy.pkl"
with open(location, "wb") as file:
pickle.dump(lossy_lookup, file)
print(f"Dumped {round} data to {location}")
| true | true |
1c31dc2bad37aea0744fcab5ba2f10c4bd5e2092 | 3,820 | py | Python | history.py | SWang848/DCRAC | 695d83063cf484cd54d7744c8c719fde94c3cde5 | [
"Apache-2.0"
] | null | null | null | history.py | SWang848/DCRAC | 695d83063cf484cd54d7744c8c719fde94c3cde5 | [
"Apache-2.0"
] | null | null | null | history.py | SWang848/DCRAC | 695d83063cf484cd54d7744c8c719fde94c3cde5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import time
import cv2
try:
from PIL import Image
except:
import Image
from utils import *
class History():
"""
Manages frame history
"""
def __init__(self, length, im_shape, nb_action):
"""
Args:
length: How many frames should be stored in the history
im_shape: Target size to crop to, im_shape = (WIDTH,HEIGHT,CHANNEL)
"""
# assert len(im_shape) == 3
if len(im_shape) == 3:
self.im_shape = im_shape
self.black_and_white = True if im_shape[2] == 1 else False
else:
self.im_shape = im_shape
self.black_and_white = False
self.length = length
self.history_o = None
self.history_a_prev = None
self.reset()
def reset(self):
"""Reset the history of observation and action
"""
self.history_o = np.zeros((self.length, ) + self.im_shape, dtype=np.uint8)
# self.history_a_prev = np.zeros((self.length, ), dtype=np.int8)
# action '-1' means None
self.history_a_prev = -np.ones((self.length, ), dtype=np.int8)
def reset_with_raw_frame(self, raw_frame, action_prev=None, fill=False):
"""Fill the history with a raw frame
"""
self.reset()
# ↓ action '-2' means no action, will be translated to all zeros with shape (nb_objective) as one-hot
action_prev = -2 if action_prev is None else action_prev
if fill:
self.add_raw_frame(raw_frame, action_prev)
return self.fill_with_last_frame()
else:
return self.add_raw_frame(raw_frame, action_prev)
def add_raw_frame(self, raw_frame, action_prev, save=False):
"""Adds a new frame to the history
"""
self.history_o = np.roll(self.history_o, -1, axis=0)
self.history_o[-1] = self.process_frame(raw_frame, save=save)
self.history_a_prev = np.roll(self.history_a_prev, -1, axis=0)
self.history_a_prev[-1] = action_prev
return self.history_o, self.history_a_prev
def fill_with_last_frame(self):
"""
Fills the state with the latest experienced frame
"""
for i in range(len(self.history_o)-1):
self.history_o[i] = self.history_o[-1]
# self.history_a_prev[i] = self.history_a_prev[-1]
return self.history_o, self.history_a_prev
def process_frame(self, raw_frame, save=False, filename=None):
"""Processes a frame by resizing and cropping as necessary and then
converting to grayscale
Arguments:
raw_frame {np.array} -- Raw pixels
Keyword Arguments:
save {bool} -- Whether to save the converted frame to disk (default: {False})
filename {str} -- Filename to save it to (default: {None})
Returns:
np.array -- The processed frame
"""
if len(self.im_shape) < 3:
return raw_frame
if self.black_and_white:
raw_frame = cv2.cvtColor(raw_frame,cv2.COLOR_RGB2GRAY)
cropped = cv2.resize(raw_frame, dsize=self.im_shape[:2], interpolation=cv2.INTER_AREA)
cropped = cropped.reshape(self.im_shape)
if save:
self.save_image(cropped)
return cropped
def save_image(self, frame, filename=None):
if filename is None:
filename = "./output/imgs/"+str(time.time())+".png"
if self.black_and_white:
frame = frame.reshape(self.im_shape[:2])
img = Image.fromarray(frame, mode='L')
img.save(filename)
else:
img = Image.fromarray(frame, mode='RGB')
img.save(filename) | 33.217391 | 109 | 0.590052 | import numpy as np
import time
import cv2
try:
from PIL import Image
except:
import Image
from utils import *
class History():
def __init__(self, length, im_shape, nb_action):
if len(im_shape) == 3:
self.im_shape = im_shape
self.black_and_white = True if im_shape[2] == 1 else False
else:
self.im_shape = im_shape
self.black_and_white = False
self.length = length
self.history_o = None
self.history_a_prev = None
self.reset()
def reset(self):
self.history_o = np.zeros((self.length, ) + self.im_shape, dtype=np.uint8)
self.history_a_prev = -np.ones((self.length, ), dtype=np.int8)
def reset_with_raw_frame(self, raw_frame, action_prev=None, fill=False):
self.reset()
action_prev = -2 if action_prev is None else action_prev
if fill:
self.add_raw_frame(raw_frame, action_prev)
return self.fill_with_last_frame()
else:
return self.add_raw_frame(raw_frame, action_prev)
def add_raw_frame(self, raw_frame, action_prev, save=False):
self.history_o = np.roll(self.history_o, -1, axis=0)
self.history_o[-1] = self.process_frame(raw_frame, save=save)
self.history_a_prev = np.roll(self.history_a_prev, -1, axis=0)
self.history_a_prev[-1] = action_prev
return self.history_o, self.history_a_prev
def fill_with_last_frame(self):
for i in range(len(self.history_o)-1):
self.history_o[i] = self.history_o[-1]
return self.history_o, self.history_a_prev
def process_frame(self, raw_frame, save=False, filename=None):
if len(self.im_shape) < 3:
return raw_frame
if self.black_and_white:
raw_frame = cv2.cvtColor(raw_frame,cv2.COLOR_RGB2GRAY)
cropped = cv2.resize(raw_frame, dsize=self.im_shape[:2], interpolation=cv2.INTER_AREA)
cropped = cropped.reshape(self.im_shape)
if save:
self.save_image(cropped)
return cropped
def save_image(self, frame, filename=None):
if filename is None:
filename = "./output/imgs/"+str(time.time())+".png"
if self.black_and_white:
frame = frame.reshape(self.im_shape[:2])
img = Image.fromarray(frame, mode='L')
img.save(filename)
else:
img = Image.fromarray(frame, mode='RGB')
img.save(filename) | true | true |
1c31dd354d5a42d55075a5ccdd336af3ea50688c | 456 | py | Python | CRM_project/accounts/urls.py | rzhvn1/Military_CRM | 64856ebba988453148c0360d1e6f73cd0950f9e5 | [
"MIT"
] | null | null | null | CRM_project/accounts/urls.py | rzhvn1/Military_CRM | 64856ebba988453148c0360d1e6f73cd0950f9e5 | [
"MIT"
] | null | null | null | CRM_project/accounts/urls.py | rzhvn1/Military_CRM | 64856ebba988453148c0360d1e6f73cd0950f9e5 | [
"MIT"
] | null | null | null | from django.urls import path, include
from .views import *
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
# router.register('dossier', DossierModelViewSet, basename='dossier')
router.register('register', RegisterViewSet, basename="register")
urlpatterns = [
path('', include(router.urls)),
path('login/', AuthView.as_view(), name="authorization"),
path('dossier/', DossierModelViewSet.as_view(), name = 'dossier')
] | 35.076923 | 69 | 0.741228 | from django.urls import path, include
from .views import *
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('register', RegisterViewSet, basename="register")
urlpatterns = [
path('', include(router.urls)),
path('login/', AuthView.as_view(), name="authorization"),
path('dossier/', DossierModelViewSet.as_view(), name = 'dossier')
] | true | true |
1c31dd3666a6e69efaca05e0c7374cf000e8afd4 | 4,336 | py | Python | tests/primitives.py | plotdevice/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 110 | 2015-01-17T03:22:51.000Z | 2022-02-12T06:04:27.000Z | tests/primitives.py | Jason-Cooke/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 38 | 2015-01-02T01:06:59.000Z | 2021-10-05T06:34:42.000Z | tests/primitives.py | Jason-Cooke/plotdevice | 598f66a19cd58b8cfea8295024998b322ed66adf | [
"MIT"
] | 17 | 2015-04-28T17:29:03.000Z | 2021-07-11T21:26:25.000Z | # encoding: utf-8
import unittest
from . import PlotDeviceTestCase, reference
from plotdevice import *
class PrimitivesTests(PlotDeviceTestCase):
@reference('primitives/primitives-arc.png')
def test_primitives_arc(self):
# tut/Primitives (1)
size(150, 75)
arc(75,25, 25)
@reference('primitives/primitives-square.png')
def test_primitives_square(self):
# tut/Primitives (2)
size(150, 75)
poly(75,25, 25)
@reference('primitives/primitives-poly.png')
def test_primitives_poly(self):
# tut/Primitives (3)
size(150, 75)
poly(75,25, 25, sides=6)
@reference('primitives/primitives-star.png')
def test_primitives_star(self):
# tut/Primitives (4)
size(150, 75)
poly(75,25, 25, points=5)
@reference('primitives/primitives-arrow.png')
def test_primitives_arrow(self):
# tut/Primitives (5)
size(150, 75)
arrow(75,25, 50)
@reference('primitives/primitives-oval.png')
def test_primitives_oval(self):
# tut/Primitives (6)
size(150, 75)
oval(75,25, 50,50)
@reference('primitives/primitives-rect.png')
def test_primitives_rect(self):
# tut/Primitives (7)
size(150, 75)
rect(75,25, 50,50)
@reference('primitives/primitives-image.png')
def test_primitives_image(self):
# tut/Primitives (8)
size(150, 77)
image("tests/_in/triforce.png", 75,25)
@reference('primitives/primitives-text.png')
def test_primitives_text(self):
# tut/Primitives (9)
size(150, 75)
text("xyzzy", 75,25)
@reference('primitives/arc-simple.png')
def test_arc_simple(self):
# ref/Primitives/commands/arc()
size(125, 125)
fill(.9)
arc(125,125, 125)
fill(.2)
arc(40,40, 20)
@reference('primitives/arc.png')
def test_arc(self):
# ref/Primitives/commands/arc()
size(125, 125)
nofill()
stroke(.2)
arc(60,60, 40, range=180)
arc(60,60, 30, range=90, ccw=True)
stroke('red')
arc(60,60, 20, range=270, close=True)
@reference('primitives/superfolia.jpg')
def test_superfolia(self):
# ref/Primitives/commands/image()
size(135, 135)
image("tests/_in/superfolia.jpg", 0, 0)
@reference('primitives/line.jpg')
def test_line(self):
# ref/Primitives/commands/line()
size(125, 125)
pen(2)
stroke(0.2)
line(10, 20, 80, 80)
@reference('primitives/oval.jpg')
def test_oval(self):
# ref/Primitives/commands/oval()
size(125, 125)
fill(0.2)
oval(10,20, 40,40)
@reference('primitives/poly-sides.png')
def test_poly_sides(self):
# ref/Primitives/commands/poly()
size(125, 125)
fill(.2)
poly(30,30, 20)
poly(80,30, 20, sides=5)
poly(30,80, 20, sides=6)
poly(80,80, 20, sides=8)
@reference('primitives/poly-points.png')
def test_poly_points(self):
# ref/Primitives/commands/poly()
size(125, 125)
fill(.2)
poly(30,30, 20, points=5)
poly(80,30, 20, points=6)
poly(30,80, 20, points=8)
poly(80,80, 20, points=12)
@reference('primitives/rect.jpg')
def test_rect(self):
# ref/Primitives/commands/rect()
size(125, 125)
fill(0.2)
rect(10, 20, 60, 40)
@reference('primitives/text.png')
def test_text(self):
# ref/Primitives/commands/text()
size(125, 125)
fill(0.2)
font("Helvetica", 20)
text("hello", 10,50)
text("goodbye", 10,70, italic=True)
@reference('primitives/arrow.jpg')
def test_arrow(self):
# ref/Primitives/compat/arrow()
size(125, 125)
fill(0.2)
arrow(50, 50, 50)
rotate(180)
fill('red')
arrow(50, 50, 50)
@reference('primitives/star.png')
def test_star(self):
# ref/Primitives/compat/star()
size(125, 125)
fill(.75)
star(50,50, 16, 50,25)
fill(0.2)
star(50,50, 8, 50)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PrimitivesTests))
return suite
| 26.278788 | 52 | 0.573801 |
import unittest
from . import PlotDeviceTestCase, reference
from plotdevice import *
class PrimitivesTests(PlotDeviceTestCase):
@reference('primitives/primitives-arc.png')
def test_primitives_arc(self):
size(150, 75)
arc(75,25, 25)
@reference('primitives/primitives-square.png')
def test_primitives_square(self):
size(150, 75)
poly(75,25, 25)
@reference('primitives/primitives-poly.png')
def test_primitives_poly(self):
size(150, 75)
poly(75,25, 25, sides=6)
@reference('primitives/primitives-star.png')
def test_primitives_star(self):
size(150, 75)
poly(75,25, 25, points=5)
@reference('primitives/primitives-arrow.png')
def test_primitives_arrow(self):
size(150, 75)
arrow(75,25, 50)
@reference('primitives/primitives-oval.png')
def test_primitives_oval(self):
size(150, 75)
oval(75,25, 50,50)
@reference('primitives/primitives-rect.png')
def test_primitives_rect(self):
size(150, 75)
rect(75,25, 50,50)
@reference('primitives/primitives-image.png')
def test_primitives_image(self):
size(150, 77)
image("tests/_in/triforce.png", 75,25)
@reference('primitives/primitives-text.png')
def test_primitives_text(self):
size(150, 75)
text("xyzzy", 75,25)
@reference('primitives/arc-simple.png')
def test_arc_simple(self):
size(125, 125)
fill(.9)
arc(125,125, 125)
fill(.2)
arc(40,40, 20)
@reference('primitives/arc.png')
def test_arc(self):
size(125, 125)
nofill()
stroke(.2)
arc(60,60, 40, range=180)
arc(60,60, 30, range=90, ccw=True)
stroke('red')
arc(60,60, 20, range=270, close=True)
@reference('primitives/superfolia.jpg')
def test_superfolia(self):
size(135, 135)
image("tests/_in/superfolia.jpg", 0, 0)
@reference('primitives/line.jpg')
def test_line(self):
size(125, 125)
pen(2)
stroke(0.2)
line(10, 20, 80, 80)
@reference('primitives/oval.jpg')
def test_oval(self):
size(125, 125)
fill(0.2)
oval(10,20, 40,40)
@reference('primitives/poly-sides.png')
def test_poly_sides(self):
size(125, 125)
fill(.2)
poly(30,30, 20)
poly(80,30, 20, sides=5)
poly(30,80, 20, sides=6)
poly(80,80, 20, sides=8)
@reference('primitives/poly-points.png')
def test_poly_points(self):
size(125, 125)
fill(.2)
poly(30,30, 20, points=5)
poly(80,30, 20, points=6)
poly(30,80, 20, points=8)
poly(80,80, 20, points=12)
@reference('primitives/rect.jpg')
def test_rect(self):
size(125, 125)
fill(0.2)
rect(10, 20, 60, 40)
@reference('primitives/text.png')
def test_text(self):
size(125, 125)
fill(0.2)
font("Helvetica", 20)
text("hello", 10,50)
text("goodbye", 10,70, italic=True)
@reference('primitives/arrow.jpg')
def test_arrow(self):
size(125, 125)
fill(0.2)
arrow(50, 50, 50)
rotate(180)
fill('red')
arrow(50, 50, 50)
@reference('primitives/star.png')
def test_star(self):
size(125, 125)
fill(.75)
star(50,50, 16, 50,25)
fill(0.2)
star(50,50, 8, 50)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PrimitivesTests))
return suite
| true | true |
1c31dd6a0b4063761667a82dffffdd16b7808469 | 333 | py | Python | foreshadow/concrete/__init__.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 25 | 2018-07-26T17:30:31.000Z | 2021-02-23T22:54:01.000Z | foreshadow/concrete/__init__.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 150 | 2018-11-02T18:09:12.000Z | 2020-05-15T01:01:35.000Z | foreshadow/concrete/__init__.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 1 | 2019-02-20T22:24:00.000Z | 2019-02-20T22:24:00.000Z | """All the concrete transformers provided by foreshadow."""
from foreshadow.concrete.externals import * # noqa: F403, F401
from foreshadow.concrete.externals import __all__ as e_all
from foreshadow.concrete.internals import * # noqa: F403, F401
from foreshadow.concrete.internals import __all__ as i_all
__all__ = i_all + e_all
| 33.3 | 63 | 0.78979 |
from foreshadow.concrete.externals import *
from foreshadow.concrete.externals import __all__ as e_all
from foreshadow.concrete.internals import *
from foreshadow.concrete.internals import __all__ as i_all
__all__ = i_all + e_all
| true | true |
1c31dd8a5d1d70e3cb9aa10c601dc19b9d8805e7 | 1,564 | py | Python | flexget/components/bittorrent/magnet_info_hash.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | flexget/components/bittorrent/magnet_info_hash.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | 1 | 2017-10-09T23:06:44.000Z | 2017-10-09T23:06:44.000Z | flexget/components/bittorrent/magnet_info_hash.py | metaMMA/Flexget | a38986422461d7935ead1e2b4ed4c88bcd0a90f5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import base64
import re
from flexget import plugin
from flexget.event import event
log = logging.getLogger('magnet_btih')
class MagnetBtih(object):
"""Sets torrent_info_hash from magnet url."""
schema = {'type': 'boolean'}
def on_task_metainfo(self, task, config):
if config is False:
return
for entry in task.all_entries:
if entry.get('torrent_info_hash'):
continue
for url in [entry['url']] + entry.get('urls', []):
if url.startswith('magnet:'):
# find base16 encoded
info_hash_search = re.search('btih:([0-9a-f]{40})', url, re.IGNORECASE)
if info_hash_search:
entry['torrent_info_hash'] = info_hash_search.group(1).upper()
break
# find base32 encoded
info_hash_search = re.search('btih:([2-7a-z]{32})', url, re.IGNORECASE)
if info_hash_search:
b32hash = info_hash_search.group(1).upper()
b16hash = base64.b16encode(base64.b32decode(b32hash))
entry['torrent_info_hash'] = b16hash.decode('ascii').upper()
break
@event('plugin.register')
def register_plugin():
plugin.register(MagnetBtih, 'magnet_btih', builtin=True, api_ver=2)
| 35.545455 | 91 | 0.582481 | from __future__ import unicode_literals, division, absolute_import
from builtins import *
import logging
import base64
import re
from flexget import plugin
from flexget.event import event
log = logging.getLogger('magnet_btih')
class MagnetBtih(object):
schema = {'type': 'boolean'}
def on_task_metainfo(self, task, config):
if config is False:
return
for entry in task.all_entries:
if entry.get('torrent_info_hash'):
continue
for url in [entry['url']] + entry.get('urls', []):
if url.startswith('magnet:'):
info_hash_search = re.search('btih:([0-9a-f]{40})', url, re.IGNORECASE)
if info_hash_search:
entry['torrent_info_hash'] = info_hash_search.group(1).upper()
break
info_hash_search = re.search('btih:([2-7a-z]{32})', url, re.IGNORECASE)
if info_hash_search:
b32hash = info_hash_search.group(1).upper()
b16hash = base64.b16encode(base64.b32decode(b32hash))
entry['torrent_info_hash'] = b16hash.decode('ascii').upper()
break
@event('plugin.register')
def register_plugin():
plugin.register(MagnetBtih, 'magnet_btih', builtin=True, api_ver=2)
| true | true |
1c31df6c495e7182d04ca6e28ef9d3d06610c7dd | 15,222 | py | Python | src/sage/schemes/toric/ideal.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/toric/ideal.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/toric/ideal.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | r"""
Toric ideals
A toric ideal (associated to an integer matrix `A`) is an ideal of the
form
.. MATH::
I_A = \left<
x^u - x^v
: u,v \in \ZZ_\geq^n
, u-v \in \ker(A)
\right>
In other words, it is an ideal generated by irreducible "binomials",
that is, differences of monomials without a common factor. Since the
Buchberger algorithm preserves this property, any Groebner basis is
then also generated by binomials.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: IA.ker()
Free module of degree 3 and rank 1 over Integer Ring
User basis matrix:
[-1 2 -1]
sage: IA
Ideal (-z1^2 + z0*z2) of Multivariate Polynomial
Ring in z0, z1, z2 over Rational Field
Here, the "naive" ideal generated by `z_0 z_2 - z_1^2` does already
equal the toric ideal. But that is not true in general! For example,
this toric ideal ([Stu1997]_, Example 1.2) is the twisted
cubic and cannot be generated by `2=\dim \ker(A)` polynomials::
sage: A = matrix([[3,2,1,0],[0,1,2,3]])
sage: IA = ToricIdeal(A)
sage: IA.ker()
Free module of degree 4 and rank 2 over Integer Ring
User basis matrix:
[-1 1 1 -1]
[-1 2 -1 0]
sage: IA
Ideal (-z1*z2 + z0*z3, -z1^2 + z0*z2, z2^2 - z1*z3) of
Multivariate Polynomial Ring in z0, z1, z2, z3 over Rational Field
The following family of toric ideals is from Example 4.4 of
[Stu1997]_. One can show that `I_d` is generated by one
quadric and `d` binomials of degree `d`::
sage: I = lambda d: ToricIdeal(matrix([[1,1,1,1,1],[0,1,1,0,0],[0,0,1,1,d]]))
sage: I(2)
Ideal (-z3^2 + z0*z4,
z0*z2 - z1*z3,
z2*z3 - z1*z4) of
Multivariate Polynomial Ring in z0, z1, z2, z3, z4 over Rational Field
sage: I(3)
Ideal (-z3^3 + z0^2*z4,
z0*z2 - z1*z3,
z2*z3^2 - z0*z1*z4,
z2^2*z3 - z1^2*z4) of
Multivariate Polynomial Ring in z0, z1, z2, z3, z4 over Rational Field
sage: I(4)
Ideal (-z3^4 + z0^3*z4,
z0*z2 - z1*z3,
z2*z3^3 - z0^2*z1*z4,
z2^2*z3^2 - z0*z1^2*z4,
z2^3*z3 - z1^3*z4) of
Multivariate Polynomial Ring in z0, z1, z2, z3, z4 over Rational Field
Finally, the example in [SH1995b]_ ::
sage: A = matrix(ZZ, [ [15, 4, 14, 19, 2, 1, 10, 17],
....: [18, 11, 13, 5, 16, 16, 8, 19],
....: [11, 7, 8, 19, 15, 18, 14, 6],
....: [17, 10, 13, 17, 16, 14, 15, 18] ])
sage: IA = ToricIdeal(A) # long time
sage: IA.ngens() # long time
213
TESTS::
sage: A = matrix(ZZ, [[1, 1, 0, 0, -1, 0, 0, -1],
....: [0, 0, 1, 1, 0, -1, -1, 0],
....: [1, 0, 0, 1, 1, 1, 0, 0],
....: [1, 0, 0, 1, 0, 0, -1, -1]])
sage: IA = ToricIdeal(A)
sage: R = IA.ring()
sage: R.inject_variables()
Defining z0, z1, z2, z3, z4, z5, z6, z7
sage: IA == R.ideal([z4*z6-z5*z7, z2*z5-z3*z6, -z3*z7+z2*z4,
....: -z2*z6+z1*z7, z1*z4-z3*z6, z0*z7-z3*z6, -z1*z5+z0*z6, -z3*z5+z0*z4,
....: z0*z2-z1*z3]) # Computed with Maple 12
True
The next example first appeared in Example 12.7 in [Stu1995]_. It is also
used by the Maple 12 help system as example::
sage: A = matrix(ZZ, [[1, 2, 3, 4, 0, 1, 4, 5],
....: [2, 3, 4, 1, 1, 4, 5, 0],
....: [3, 4, 1, 2, 4, 5, 0, 1],
....: [4, 1, 2, 3, 5, 0, 1, 4]])
sage: IA = ToricIdeal(A, 'z1, z2, z3, z4, z5, z6, z7, z8')
sage: R = IA.ring()
sage: R.inject_variables()
Defining z1, z2, z3, z4, z5, z6, z7, z8
sage: IA == R.ideal([z4^4-z6*z8^3, z3^4-z5*z7^3, -z4^3+z2*z8^2,
....: z2*z4-z6*z8, -z4^2*z6+z2^2*z8, -z4*z6^2+z2^3, -z3^3+z1*z7^2,
....: z1*z3-z5*z7, -z3^2*z5+z1^2*z7, z1^3-z3*z5^2])
True
AUTHORS:
- Volker Braun (2011-01-03): Initial version
"""
# ****************************************************************************
# Copyright (C) 2010 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
# TODO:
# * Implement the Di Biase & Urbanke algorithm
# * Implement the Conti & Traverso algorithm (for educational purposes)
# * Cythonize the Buchberger algorithm for toric ideals
# * Use the (multiple) weighted homogeneity during Groebner basis computations
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.misc.misc_c import prod
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.polynomial.multi_polynomial_ideal import MPolynomialIdeal
class ToricIdeal(MPolynomialIdeal):
r"""
This class represents a toric ideal defined by an integral matrix.
INPUT:
- ``A`` -- integer matrix. The defining matrix of the toric ideal.
- ``names`` -- string (optional). Names for the variables. By
default, this is ``'z'`` and the variables will be named ``z0``,
``z1``, ...
- ``base_ring`` -- a ring (optional). Default: `\QQ`. The base
ring of the ideal. A toric ideal uses only coefficients `\pm 1`.
- ``polynomial_ring`` -- a polynomial ring (optional). The
polynomial ring to construct the ideal in.
You may specify the ambient polynomial ring via the
``polynomial_ring`` parameter or via the ``names`` and
``base_ring`` parameter. A ``ValueError`` is raised if you
specify both.
- ``algorithm`` -- string (optional). The algorithm to use. For
now, must be ``'HostenSturmfels'`` which is the algorithm
proposed by Hosten and Sturmfels in [SH1995b]_.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: ToricIdeal(A)
Ideal (-z1^2 + z0*z2) of Multivariate Polynomial Ring
in z0, z1, z2 over Rational Field
First way of specifying the polynomial ring::
sage: ToricIdeal(A, names='x,y,z', base_ring=ZZ)
Ideal (-y^2 + x*z) of Multivariate Polynomial Ring
in x, y, z over Integer Ring
Second way of specifying the polynomial ring::
sage: R.<x,y,z> = ZZ[]
sage: ToricIdeal(A, polynomial_ring=R)
Ideal (-y^2 + x*z) of Multivariate Polynomial Ring
in x, y, z over Integer Ring
It is an error to specify both::
sage: ToricIdeal(A, names='x,y,z', polynomial_ring=R)
Traceback (most recent call last):
...
ValueError: you must not specify both variable names and a polynomial ring
"""
def __init__(self, A,
names='z', base_ring=QQ,
polynomial_ring=None,
algorithm='HostenSturmfels'):
r"""
Create an ideal and a multivariate polynomial ring containing it.
See the :mod:`module documentation
<sage.schemes.toric.ideal>` for an introduction to
toric ideals.
INPUT:
See the :class:`class-level documentation <ToricIdeal>` for
input values.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: ToricIdeal(A)
Ideal (-z1^2 + z0*z2) of Multivariate Polynomial Ring
in z0, z1, z2 over Rational Field
sage: ToricIdeal(A, names='x', base_ring=GF(101))
Ideal (-x1^2 + x0*x2) of Multivariate Polynomial Ring
in x0, x1, x2 over Finite Field of size 101
sage: ToricIdeal(A, names='x', base_ring=FractionField(QQ['t']))
Ideal (-x1^2 + x0*x2) of Multivariate Polynomial Ring
in x0, x1, x2 over Fraction Field of Univariate Polynomial Ring in t over Rational Field
"""
self._A = matrix(ZZ, A)
if polynomial_ring:
if (names!='z') or (base_ring is not QQ):
raise ValueError('you must not specify both variable names and a polynomial ring')
self._names = [str(_) for _ in polynomial_ring.gens()]
self._base_ring = polynomial_ring.base_ring()
ring = polynomial_ring
else:
self._names = names
self._base_ring = base_ring
ring = self._init_ring('degrevlex')
if algorithm=='HostenSturmfels':
ideal = self._ideal_HostenSturmfels()
else:
raise ValueError(f'algorithm = {algorithm} is not known')
gens = [ring(x) for x in ideal.gens()]
MPolynomialIdeal.__init__(self, ring, gens, coerce=False)
def A(self):
"""
Return the defining matrix.
OUTPUT:
An integer matrix.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: IA.A()
[1 1 1]
[0 1 2]
"""
return self._A
def ker(self):
"""
Return the kernel of the defining matrix.
OUTPUT:
The kernel of ``self.A()``.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: IA.ker()
Free module of degree 3 and rank 1 over Integer Ring
User basis matrix:
[-1 2 -1]
"""
if '_ker' in self.__dict__:
return self._ker
self._ker = self.A().right_kernel(basis='LLL')
return self._ker
def nvariables(self):
r"""
Return the number of variables of the ambient polynomial ring.
OUTPUT:
Integer. The number of columns of the defining matrix
:meth:`A`.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: IA.nvariables()
3
"""
return self.A().ncols()
def _init_ring(self, term_order):
r"""
Construct the ambient polynomial ring.
INPUT:
- ``term_order`` -- string. The order of the variables, for
example ``'neglex'`` and ``'degrevlex'``.
OUTPUT:
A polynomial ring with the given term order.
.. NOTE::
Reverse lexicographic ordering is equivalent to negative
lexicographic order with the reversed list of
variables. We are using the latter in the implementation
of the Hosten/Sturmfels algorithm.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: R = IA._init_ring('neglex'); R
Multivariate Polynomial Ring in z0, z1, z2 over Rational Field
sage: R.term_order()
Negative lexicographic term order
sage: R.inject_variables()
Defining z0, z1, z2
sage: z0 < z1 and z1 < z2
True
"""
return PolynomialRing(self._base_ring, self._names,
self.nvariables(), order=term_order)
def _naive_ideal(self, ring):
r"""
Return the "naive" subideal.
INPUT:
- ``ring`` -- the ambient ring of the ideal.
OUTPUT:
A subideal of the toric ideal in the polynomial ring ``ring``.
EXAMPLES::
sage: A = matrix([[1,1,1],[0,1,2]])
sage: IA = ToricIdeal(A)
sage: IA.ker()
Free module of degree 3 and rank 1 over Integer Ring
User basis matrix:
[-1 2 -1]
sage: IA._naive_ideal(IA.ring())
Ideal (z1^2 - z0*z2) of Multivariate Polynomial Ring in z0, z1, z2 over Rational Field
"""
x = ring.gens()
binomials = []
for row in self.ker().matrix().rows():
xpos = prod(x[i]**max( row[i],0) for i in range(0,len(x)))
xneg = prod(x[i]**max(-row[i],0) for i in range(0,len(x)))
binomials.append(xpos - xneg)
return ring.ideal(binomials)
def _ideal_quotient_by_variable(self, ring, ideal, n):
r"""
Return the ideal quotient `(J:x_n^\infty)`.
INPUT:
- ``ring`` -- the ambient polynomial ring in neglex order.
- ``ideal`` -- the ideal `J`.
- ``n`` -- Integer. The index of the next variable to divide by.
OUTPUT:
The ideal quotient `(J:x_n^\infty)`.
ALGORITHM:
Proposition 4 of [SH1995b]_.
EXAMPLES::
sage: A = lambda d: matrix([[1,1,1,1,1],[0,1,1,0,0],[0,0,1,1,d]])
sage: IA = ToricIdeal(A(3))
sage: R = PolynomialRing(QQ, 5, 'z', order='neglex')
sage: J0 = IA._naive_ideal(R)
sage: IA._ideal_quotient_by_variable(R, J0, 0)
Ideal (z2*z3^2 - z0*z1*z4, z1*z3 - z0*z2,
z2^2*z3 - z1^2*z4, z1^3*z4 - z0*z2^3)
of Multivariate Polynomial Ring in z0, z1, z2, z3, z4 over Rational Field
"""
N = self.nvariables()
y = list(ring.gens())
x = [ y[i-n] for i in range(N) ]
y_to_x = dict(zip(x,y))
x_to_y = dict(zip(y,x))
# swap variables such that the n-th variable becomes the last one
J = ideal.subs(y_to_x)
# TODO: Can we use the weighted homogeneity with respect to
# the rows of self.A() when computing the Groebner basis, see
# [SH1995b]?
basis = J.groebner_basis()
# x_n = y[0] # the cheapest variable in the revlex order
def subtract(e, power):
l = list(e)
return tuple([l[0]-power] + l[1:])
def divide_by_x_n(p):
d_old = p.dict()
power = min([ e[0] for e in d_old.keys() ])
d_new = dict((subtract(exponent, power), coefficient)
for exponent, coefficient in d_old.items())
return p.parent()(d_new)
basis = [divide_by_x_n(b) for b in basis]
quotient = ring.ideal(basis)
return quotient.subs(x_to_y)
def _ideal_HostenSturmfels(self):
r"""
Compute the toric ideal by Hosten and Sturmfels' algorithm.
OUTPUT:
The toric ideal as an ideal in the polynomial ring
``self.ring()``.
EXAMPLES::
sage: A = matrix([[3,2,1,0],[0,1,2,3]])
sage: IA = ToricIdeal(A); IA
Ideal (-z1*z2 + z0*z3, -z1^2 + z0*z2, z2^2 - z1*z3)
of Multivariate Polynomial Ring in z0, z1, z2, z3 over Rational Field
sage: R = IA.ring()
sage: IA == IA._ideal_HostenSturmfels()
True
TESTS::
sage: I_2x2 = identity_matrix(ZZ,2)
sage: ToricIdeal(I_2x2)
Ideal (0) of Multivariate Polynomial Ring in z0, z1 over Rational Field
"""
ring = self._init_ring('neglex')
J = self._naive_ideal(ring)
if J.is_zero():
return J
for i in range(0,self.nvariables()):
J = self._ideal_quotient_by_variable(ring, J, i)
return J
| 32.87689 | 100 | 0.549074 |
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.misc.misc_c import prod
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.polynomial.multi_polynomial_ideal import MPolynomialIdeal
class ToricIdeal(MPolynomialIdeal):
def __init__(self, A,
names='z', base_ring=QQ,
polynomial_ring=None,
algorithm='HostenSturmfels'):
self._A = matrix(ZZ, A)
if polynomial_ring:
if (names!='z') or (base_ring is not QQ):
raise ValueError('you must not specify both variable names and a polynomial ring')
self._names = [str(_) for _ in polynomial_ring.gens()]
self._base_ring = polynomial_ring.base_ring()
ring = polynomial_ring
else:
self._names = names
self._base_ring = base_ring
ring = self._init_ring('degrevlex')
if algorithm=='HostenSturmfels':
ideal = self._ideal_HostenSturmfels()
else:
raise ValueError(f'algorithm = {algorithm} is not known')
gens = [ring(x) for x in ideal.gens()]
MPolynomialIdeal.__init__(self, ring, gens, coerce=False)
def A(self):
return self._A
def ker(self):
if '_ker' in self.__dict__:
return self._ker
self._ker = self.A().right_kernel(basis='LLL')
return self._ker
def nvariables(self):
return self.A().ncols()
def _init_ring(self, term_order):
return PolynomialRing(self._base_ring, self._names,
self.nvariables(), order=term_order)
def _naive_ideal(self, ring):
x = ring.gens()
binomials = []
for row in self.ker().matrix().rows():
xpos = prod(x[i]**max( row[i],0) for i in range(0,len(x)))
xneg = prod(x[i]**max(-row[i],0) for i in range(0,len(x)))
binomials.append(xpos - xneg)
return ring.ideal(binomials)
def _ideal_quotient_by_variable(self, ring, ideal, n):
N = self.nvariables()
y = list(ring.gens())
x = [ y[i-n] for i in range(N) ]
y_to_x = dict(zip(x,y))
x_to_y = dict(zip(y,x))
J = ideal.subs(y_to_x)
basis = J.groebner_basis()
l = list(e)
return tuple([l[0]-power] + l[1:])
def divide_by_x_n(p):
d_old = p.dict()
power = min([ e[0] for e in d_old.keys() ])
d_new = dict((subtract(exponent, power), coefficient)
for exponent, coefficient in d_old.items())
return p.parent()(d_new)
basis = [divide_by_x_n(b) for b in basis]
quotient = ring.ideal(basis)
return quotient.subs(x_to_y)
def _ideal_HostenSturmfels(self):
ring = self._init_ring('neglex')
J = self._naive_ideal(ring)
if J.is_zero():
return J
for i in range(0,self.nvariables()):
J = self._ideal_quotient_by_variable(ring, J, i)
return J
| true | true |
1c31e113234907191663c0d92055af47b297a82a | 4,808 | py | Python | utils.py | Wendy-Xiao/ext_summ_disco_tree_attn | 5ff99c8260350c677e140b02521c75ac03d16673 | [
"MIT"
] | 6 | 2020-11-14T04:31:31.000Z | 2021-12-31T02:18:56.000Z | utils.py | Wendy-Xiao/ext_summ_disco_tree_attn | 5ff99c8260350c677e140b02521c75ac03d16673 | [
"MIT"
] | 1 | 2020-12-16T07:08:11.000Z | 2020-12-16T19:46:29.000Z | utils.py | Wendy-Xiao/ext_summ_disco_tree_attn | 5ff99c8260350c677e140b02521c75ac03d16673 | [
"MIT"
] | 1 | 2022-02-16T13:12:56.000Z | 2022-02-16T13:12:56.000Z | from collections import Counter
from pathlib import Path
from random import random
import rouge_papier_v2
import pandas as pd
import re
import numpy as np
import os
import json
import torch
import os
import subprocess
# import matplotlib.pyplot as plt
# Utility functions
def get_posweight(inputs_dir):
inputs_dir = Path(inputs_dir)
all_files = [path for path in inputs_dir.glob("*.pt")]
total_num=0
total_pos=0
for i in range(10):
data = torch.load(all_files[i])
for d in data:
total_num+=len(d['d_labels'][0])
total_pos+=sum(d['d_labels'][0])
print('Compute pos weight done! There are %d sentences in total, with %d sentences as positive'%(total_num,total_pos))
return torch.FloatTensor([(total_num-total_pos)/float(total_pos)])
def make_file_list(input_dir,file_list_file):
of = open(file_list_file,'r')
file_list = of.readlines()
of.close()
f_list = [Path(input_dir+'/'+f.strip()+'.json') for f in file_list]
return f_list
def get_all_text(train_input_dir):
if isinstance(train_input_dir,list):
file_l = train_input_dir
else:
train_input = Path(train_input_dir)
file_l = [path for path in train_input.glob("*.json")]
all_tokens = []
for f in file_l:
with f.open() as of:
d = json.load(of)
tokens = [t for sent in d['inputs'] for t in (sent['tokens']+['<eos>'])]
all_tokens.append(tokens)
return all_tokens
def build_word2ind(utt_l, vocabularySize):
word_counter = Counter([word for utt in utt_l for word in utt])
print('%d words found!'%(len(word_counter)))
vocabulary = ["<UNK>"] + [e[0] for e in word_counter.most_common(vocabularySize)]
word2index = {word:index for index,word in enumerate(vocabulary)}
global EOS_INDEX
EOS_INDEX = word2index['<eos>']
return word2index
# Build embedding matrix by importing the pretrained glove
def getEmbeddingMatrix(gloveDir, word2index, embedding_dim):
'''Refer to the official baseline model provided by SemEval.'''
embeddingsIndex = {}
# Load the embedding vectors from ther GloVe file
with open(os.path.join(gloveDir, 'glove.6B.300d.txt'), encoding="utf8") as f:
for line in f:
values = line.split()
word = values[0]
embeddingVector = np.asarray(values[1:], dtype='float32')
embeddingsIndex[word] = embeddingVector
# Minimum word index of any word is 1.
embeddingMatrix = np.zeros((len(word2index) , embedding_dim))
for word, i in word2index.items():
embeddingVector = embeddingsIndex.get(word)
if embeddingVector is not None:
# words not found in embedding index will be all-zeros.
embeddingMatrix[i] = embeddingVector
return embeddingMatrix
def get_rouge(hyp_pathlist, ref_pathlist, config_path= './config'):
path_data = []
uttnames = []
for i in range(len(hyp_pathlist)):
path_data.append([hyp_pathlist[i], [ref_pathlist[i]]])
uttnames.append(os.path.splitext(hyp_pathlist[i])[0].split('/')[-1])
config_text = rouge_papier_v2.util.make_simple_config_text(path_data)
config_path = config_path
of = open(config_path,'w')
of.write(config_text)
of.close()
uttnames.append('Average')
df,avgfs,conf = rouge_papier_v2.compute_rouge(
config_path, max_ngram=2, lcs=True,
remove_stopwords=False,stemmer=True,set_length = False, return_conf=True)
df['data_ids'] = pd.Series(np.array(uttnames),index =df.index)
avg = df.iloc[-1:].to_dict("records")[0]
c = conf.to_dict("records")
# if lcs:
# print(c)
print("Rouge-1 r score: %f, Rouge-1 p score: %f, Rouge-1 f-score: %f, 95-conf(%f-%f)"%(\
avg['rouge-1-r'],avg['rouge-1-p'],avg['rouge-1-f'],c[0]['lower_conf_f'],c[0]['upper_conf_f']))
print("Rouge-2 r score:%f, Rouge-1 p score: %f, Rouge-2 f-score:%f, 95-conf(%f-%f)"%(\
avg['rouge-2-r'],avg['rouge-2-p'],avg['rouge-2-f'],c[1]['lower_conf_f'],c[1]['upper_conf_f']))
print("Rouge-L r score:%f, Rouge-1 p score: %f, Rouge-L f-score:%f, 95-conf(%f-%f)"%(\
avg['rouge-L-r'],avg['rouge-L-p'],avg['rouge-L-f'],c[2]['lower_conf_f'],c[2]['upper_conf_f']))
return avgfs[1],df
if __name__ == '__main__':
# oracle_path = '/scratch/wenxiao/pubmed/oracle/test/'
# abstract_path = '/scratch/wenxiao/pubmed/human-abstracts/test/'
# lead_path = '/scratch/wenxiao/pubmed/lead/test/'
oracle_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/oracle-bigpatent_a/'
lead_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/lead-bigpatent_a/'
abstract_path = '/scratch/wenxiao/bigpatent/bigPatentData_splitted/a/human-abstracts/test/'
d = Path(oracle_path)
uttnames = [str(path.stem) for path in d.glob("*.txt")]
lead_pathlist = []
oracle_pathlist = []
ref_pathlist = []
for n in uttnames:
lead_pathlist.append(lead_path+n+'.txt')
oracle_pathlist.append(oracle_path+n+'.txt')
ref_pathlist.append(abstract_path+n+'.txt')
get_meteor(oracle_pathlist,ref_pathlist,'oracle')
get_meteor(lead_pathlist,ref_pathlist,'lead')
| 35.614815 | 119 | 0.720674 | from collections import Counter
from pathlib import Path
from random import random
import rouge_papier_v2
import pandas as pd
import re
import numpy as np
import os
import json
import torch
import os
import subprocess
def get_posweight(inputs_dir):
inputs_dir = Path(inputs_dir)
all_files = [path for path in inputs_dir.glob("*.pt")]
total_num=0
total_pos=0
for i in range(10):
data = torch.load(all_files[i])
for d in data:
total_num+=len(d['d_labels'][0])
total_pos+=sum(d['d_labels'][0])
print('Compute pos weight done! There are %d sentences in total, with %d sentences as positive'%(total_num,total_pos))
return torch.FloatTensor([(total_num-total_pos)/float(total_pos)])
def make_file_list(input_dir,file_list_file):
of = open(file_list_file,'r')
file_list = of.readlines()
of.close()
f_list = [Path(input_dir+'/'+f.strip()+'.json') for f in file_list]
return f_list
def get_all_text(train_input_dir):
if isinstance(train_input_dir,list):
file_l = train_input_dir
else:
train_input = Path(train_input_dir)
file_l = [path for path in train_input.glob("*.json")]
all_tokens = []
for f in file_l:
with f.open() as of:
d = json.load(of)
tokens = [t for sent in d['inputs'] for t in (sent['tokens']+['<eos>'])]
all_tokens.append(tokens)
return all_tokens
def build_word2ind(utt_l, vocabularySize):
word_counter = Counter([word for utt in utt_l for word in utt])
print('%d words found!'%(len(word_counter)))
vocabulary = ["<UNK>"] + [e[0] for e in word_counter.most_common(vocabularySize)]
word2index = {word:index for index,word in enumerate(vocabulary)}
global EOS_INDEX
EOS_INDEX = word2index['<eos>']
return word2index
def getEmbeddingMatrix(gloveDir, word2index, embedding_dim):
embeddingsIndex = {}
with open(os.path.join(gloveDir, 'glove.6B.300d.txt'), encoding="utf8") as f:
for line in f:
values = line.split()
word = values[0]
embeddingVector = np.asarray(values[1:], dtype='float32')
embeddingsIndex[word] = embeddingVector
embeddingMatrix = np.zeros((len(word2index) , embedding_dim))
for word, i in word2index.items():
embeddingVector = embeddingsIndex.get(word)
if embeddingVector is not None:
embeddingMatrix[i] = embeddingVector
return embeddingMatrix
def get_rouge(hyp_pathlist, ref_pathlist, config_path= './config'):
path_data = []
uttnames = []
for i in range(len(hyp_pathlist)):
path_data.append([hyp_pathlist[i], [ref_pathlist[i]]])
uttnames.append(os.path.splitext(hyp_pathlist[i])[0].split('/')[-1])
config_text = rouge_papier_v2.util.make_simple_config_text(path_data)
config_path = config_path
of = open(config_path,'w')
of.write(config_text)
of.close()
uttnames.append('Average')
df,avgfs,conf = rouge_papier_v2.compute_rouge(
config_path, max_ngram=2, lcs=True,
remove_stopwords=False,stemmer=True,set_length = False, return_conf=True)
df['data_ids'] = pd.Series(np.array(uttnames),index =df.index)
avg = df.iloc[-1:].to_dict("records")[0]
c = conf.to_dict("records")
print("Rouge-1 r score: %f, Rouge-1 p score: %f, Rouge-1 f-score: %f, 95-conf(%f-%f)"%(\
avg['rouge-1-r'],avg['rouge-1-p'],avg['rouge-1-f'],c[0]['lower_conf_f'],c[0]['upper_conf_f']))
print("Rouge-2 r score:%f, Rouge-1 p score: %f, Rouge-2 f-score:%f, 95-conf(%f-%f)"%(\
avg['rouge-2-r'],avg['rouge-2-p'],avg['rouge-2-f'],c[1]['lower_conf_f'],c[1]['upper_conf_f']))
print("Rouge-L r score:%f, Rouge-1 p score: %f, Rouge-L f-score:%f, 95-conf(%f-%f)"%(\
avg['rouge-L-r'],avg['rouge-L-p'],avg['rouge-L-f'],c[2]['lower_conf_f'],c[2]['upper_conf_f']))
return avgfs[1],df
if __name__ == '__main__':
oracle_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/oracle-bigpatent_a/'
lead_path = '/ubc/cs/research/nlp/wenxiao/official_code/test_hyp/lead-bigpatent_a/'
abstract_path = '/scratch/wenxiao/bigpatent/bigPatentData_splitted/a/human-abstracts/test/'
d = Path(oracle_path)
uttnames = [str(path.stem) for path in d.glob("*.txt")]
lead_pathlist = []
oracle_pathlist = []
ref_pathlist = []
for n in uttnames:
lead_pathlist.append(lead_path+n+'.txt')
oracle_pathlist.append(oracle_path+n+'.txt')
ref_pathlist.append(abstract_path+n+'.txt')
get_meteor(oracle_pathlist,ref_pathlist,'oracle')
get_meteor(lead_pathlist,ref_pathlist,'lead')
| true | true |
1c31e28c29cf19ed0c6c02d83971db1f3eb61dc1 | 3,107 | py | Python | tests/unit/scenarios/ironic/test_nodes.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | tests/unit/scenarios/ironic/test_nodes.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | tests/unit/scenarios/ironic/test_nodes.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | 1 | 2018-12-10T12:31:27.000Z | 2018-12-10T12:31:27.000Z | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally_openstack.scenarios.ironic import nodes
from tests.unit import test
class IronicNodesTestCase(test.ScenarioTestCase):
def test_create_and_list_node(self):
class Node(object):
def __init__(self, name):
self.name = name
scenario = nodes.CreateAndListNode(self.context)
scenario._create_node = mock.Mock(return_value=Node("node_obj1"))
scenario._list_nodes = mock.Mock(
return_value=[Node(name)
for name in ("node_obj1", "node_obj2", "node_obj3")])
driver = "foo"
properties = "fake_prop"
fake_params = {
"sort_dir": "foo1",
"associated": "foo2",
"detail": True,
"maintenance": "foo5",
"fake_parameter1": "foo7"
}
# Positive case:
scenario.run(driver, properties, **fake_params)
scenario._create_node.assert_called_once_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_once_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
# Negative case: created node not in the list of available nodes
scenario._create_node = mock.Mock(uuid="foooo")
self.assertRaises(exceptions.RallyAssertionError,
scenario.run, driver, properties, **fake_params)
scenario._create_node.assert_called_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
def test_create_and_delete_node(self):
fake_node = mock.Mock(uuid="fake_uuid")
scenario = nodes.CreateAndDeleteNode(self.context)
scenario._create_node = mock.Mock(return_value=fake_node)
scenario._delete_node = mock.Mock()
driver = "fake"
properties = "fake_prop"
scenario.run(driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._create_node.assert_called_once_with(
driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._delete_node.assert_called_once_with(
scenario._create_node.return_value)
| 37.890244 | 79 | 0.635661 |
import mock
from rally import exceptions
from rally_openstack.scenarios.ironic import nodes
from tests.unit import test
class IronicNodesTestCase(test.ScenarioTestCase):
def test_create_and_list_node(self):
class Node(object):
def __init__(self, name):
self.name = name
scenario = nodes.CreateAndListNode(self.context)
scenario._create_node = mock.Mock(return_value=Node("node_obj1"))
scenario._list_nodes = mock.Mock(
return_value=[Node(name)
for name in ("node_obj1", "node_obj2", "node_obj3")])
driver = "foo"
properties = "fake_prop"
fake_params = {
"sort_dir": "foo1",
"associated": "foo2",
"detail": True,
"maintenance": "foo5",
"fake_parameter1": "foo7"
}
scenario.run(driver, properties, **fake_params)
scenario._create_node.assert_called_once_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_once_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
scenario._create_node = mock.Mock(uuid="foooo")
self.assertRaises(exceptions.RallyAssertionError,
scenario.run, driver, properties, **fake_params)
scenario._create_node.assert_called_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
def test_create_and_delete_node(self):
fake_node = mock.Mock(uuid="fake_uuid")
scenario = nodes.CreateAndDeleteNode(self.context)
scenario._create_node = mock.Mock(return_value=fake_node)
scenario._delete_node = mock.Mock()
driver = "fake"
properties = "fake_prop"
scenario.run(driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._create_node.assert_called_once_with(
driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._delete_node.assert_called_once_with(
scenario._create_node.return_value)
| true | true |
1c31e295e8c14d94e58e8f3701d4b9e685c4ce8f | 486 | py | Python | django-loaders/loaders/migrations/0003_auto_20171211_2042.py | nabbisen/python-django2-tutorials | c6fd75366587f936e22293e02e1c1c1e374adf2a | [
"MIT"
] | null | null | null | django-loaders/loaders/migrations/0003_auto_20171211_2042.py | nabbisen/python-django2-tutorials | c6fd75366587f936e22293e02e1c1c1e374adf2a | [
"MIT"
] | null | null | null | django-loaders/loaders/migrations/0003_auto_20171211_2042.py | nabbisen/python-django2-tutorials | c6fd75366587f936e22293e02e1c1c1e374adf2a | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2017-12-11 11:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('loaders', '0002_auto_20171211_1732'),
]
operations = [
migrations.AlterField(
model_name='agency',
name='commission_form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loaders.CommissionForm'),
),
]
| 24.3 | 110 | 0.652263 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('loaders', '0002_auto_20171211_1732'),
]
operations = [
migrations.AlterField(
model_name='agency',
name='commission_form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='loaders.CommissionForm'),
),
]
| true | true |
1c31e2a3e3db553efe6f011e3997b03fa5efba1c | 1,180 | py | Python | tests/test_cli_serve_server.py | mwatts/datasette | 8c401ee0f054de2f568c3a8302c9223555146407 | [
"Apache-2.0"
] | 5,978 | 2017-11-13T21:59:52.000Z | 2022-03-31T12:10:42.000Z | tests/test_cli_serve_server.py | mwatts/datasette | 8c401ee0f054de2f568c3a8302c9223555146407 | [
"Apache-2.0"
] | 1,482 | 2017-11-13T21:19:43.000Z | 2022-03-31T07:45:26.000Z | tests/test_cli_serve_server.py | mwatts/datasette | 8c401ee0f054de2f568c3a8302c9223555146407 | [
"Apache-2.0"
] | 439 | 2017-11-13T22:03:30.000Z | 2022-03-30T08:00:39.000Z | import httpx
import pytest
import socket
@pytest.mark.serial
def test_serve_localhost_http(ds_localhost_http_server):
response = httpx.get("http://localhost:8041/_memory.json")
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
@pytest.mark.serial
def test_serve_localhost_https(ds_localhost_https_server):
_, client_cert = ds_localhost_https_server
response = httpx.get("https://localhost:8042/_memory.json", verify=client_cert)
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
@pytest.mark.serial
@pytest.mark.skipif(
not hasattr(socket, "AF_UNIX"), reason="Requires socket.AF_UNIX support"
)
def test_serve_unix_domain_socket(ds_unix_domain_socket_server):
_, uds = ds_unix_domain_socket_server
transport = httpx.HTTPTransport(uds=uds)
client = httpx.Client(transport=transport)
response = client.get("http://localhost/_memory.json")
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
| 28.780488 | 83 | 0.666102 | import httpx
import pytest
import socket
@pytest.mark.serial
def test_serve_localhost_http(ds_localhost_http_server):
response = httpx.get("http://localhost:8041/_memory.json")
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
@pytest.mark.serial
def test_serve_localhost_https(ds_localhost_https_server):
_, client_cert = ds_localhost_https_server
response = httpx.get("https://localhost:8042/_memory.json", verify=client_cert)
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
@pytest.mark.serial
@pytest.mark.skipif(
not hasattr(socket, "AF_UNIX"), reason="Requires socket.AF_UNIX support"
)
def test_serve_unix_domain_socket(ds_unix_domain_socket_server):
_, uds = ds_unix_domain_socket_server
transport = httpx.HTTPTransport(uds=uds)
client = httpx.Client(transport=transport)
response = client.get("http://localhost/_memory.json")
assert {
"database": "_memory",
"path": "/_memory",
"tables": [],
}.items() <= response.json().items()
| true | true |
1c31e2b3fe42291dc91cfb70aee5b32968fa8ba2 | 70,017 | py | Python | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_help.py | stefanb995/azure-cli | d1c0c406d3ec42e496770fc3f81f53de06e4b18c | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_help.py | stefanb995/azure-cli | d1c0c406d3ec42e496770fc3f81f53de06e4b18c | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_help.py | stefanb995/azure-cli | d1c0c406d3ec42e496770fc3f81f53de06e4b18c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
# pylint: disable=line-too-long, too-many-lines
helps['disk'] = """
type: group
short-summary: Manage Azure Managed Disks.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['disk create'] = """
type: command
short-summary: Create a managed disk.
examples:
- name: Create a managed disk by importing from a blob uri.
text: >
az disk create -g MyResourceGroup -n MyDisk --source https://vhd1234.blob.core.windows.net/vhds/osdisk1234.vhd
- name: Create an empty managed disk.
text: >
az disk create -g MyResourceGroup -n MyDisk --size-gb 10
- name: Create a managed disk by copying an existing disk or snapshot.
text: >
az disk create -g MyResourceGroup -n MyDisk2 --source MyDisk
- name: Create a disk in an availability zone in the region of "East US 2"
text: >
az disk create -n MyDisk -g MyResourceGroup --size-gb 10 --location eastus2 --zone 1
"""
helps['disk delete'] = """
type: command
short-summary: Delete a managed disk.
examples:
- name: Delete a managed disk. (autogenerated)
text: az disk delete --name MyManagedDisk --resource-group MyResourceGroup
crafted: true
"""
helps['disk grant-access'] = """
type: command
short-summary: Grant a resource read access to a managed disk.
examples:
- name: Grant a resource read access to a managed disk. (autogenerated)
text: az disk grant-access --duration-in-seconds 3600 --name MyManagedDisk --resource-group MyResourceGroup
crafted: true
"""
helps['disk list'] = """
type: command
short-summary: List managed disks.
"""
helps['disk revoke-access'] = """
type: command
short-summary: Revoke a resource's read access to a managed disk.
"""
helps['disk update'] = """
type: command
short-summary: Update a managed disk.
examples:
- name: Update a managed disk. (autogenerated)
text: az disk update --name MyManagedDisk --resource-group MyResourceGroup --size-gb 20
crafted: true
"""
helps['disk wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a managed disk is met.
"""
helps['image'] = """
type: group
short-summary: Manage custom virtual machine images.
"""
helps['image create'] = """
type: command
short-summary: Create a custom Virtual Machine Image from managed disks or snapshots.
examples:
- name: Create an image from an existing disk.
text: |
az image create -g MyResourceGroup -n image1 --os-type Linux \\
--source /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/rg1/providers/Microsoft.Compute/snapshots/s1
- name: Create an image by capturing an existing generalized virtual machine in the same resource group.
text: az image create -g MyResourceGroup -n image1 --source MyVm1
"""
helps['image list'] = """
type: command
short-summary: List custom VM images.
"""
helps['image update'] = """
type: command
short-summary: Update custom VM images.
examples:
- name: Add or update tags.
text: az image update -n ImageName -g ResourceGroup --tags tag1=val1 tag2=val2
- name: Remove all tags.
text: az image update -n ImageName -g resourceGroup --tags
"""
helps['sig'] = """
type: group
short-summary: manage shared image gallery
"""
helps['sig create'] = """
type: command
short-summary: create a share image gallery.
examples:
- name: create a share image gallery. (autogenerated)
text: az sig create --gallery-name MyGallery --resource-group MyResourceGroup
crafted: true
"""
helps['sig image-definition'] = """
type: group
short-summary: create an image definition
"""
helps['sig image-definition create'] = """
type: command
short-summary: create a gallery image definition
examples:
- name: Create a linux image defintion
text: |
az sig image-definition create -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --publisher GreatPublisher --offer GreatOffer --sku GreatSku --os-type linux
"""
helps['sig image-definition update'] = """
type: command
short-summary: update a share image defintiion.
"""
helps['sig image-version'] = """
type: group
short-summary: create a new version from an image defintion
"""
helps['sig image-version create'] = """
type: command
short-summary: creat a new image version
long-summary: this operation might take a long time depending on the replicate region number. Use "--no-wait" is advised.
examples:
- name: Add a new image version
text: |
az sig image-version create -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --managed-image /subscriptions/00000000-0000-0000-0000-00000000xxxx/resourceGroups/imageGroups/providers/images/MyManagedImage
- name: Add a new image version replicated across multiple regions with different replication counts each. Eastus2 will have it's replica count set to the default replica count.
text: |
az sig image-version create -g MyResourceGroup --gallery-name MyGallery \\
--gallery-image-definition MyImage --gallery-image-version 1.0.0 \\
--managed-image image-name --target-regions eastus2 ukwest=3 southindia=2
- name: Add a new image version and don't wait on it. Later you can invoke "az sig image-version wait" command when ready to create a vm from the gallery image version
text: |
az sig image-version create --no-wait -g MyResourceGroup --gallery-name MyGallery \\
--gallery-image-definition MyImage --gallery-image-version 1.0.0 \\
--managed-image imageInTheSameResourceGroup
"""
helps['sig image-version update'] = """
type: command
short-summary: update a share image version
examples:
- name: Replicate to a new set of regions
text: |
az sig image-version update -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --target-regions westcentralus=2 eastus2
- name: Replicate to one more region
text: |
az sig image-version update -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --add publishingProfile.targetRegions name=westcentralus
"""
helps['sig image-version wait'] = """
type: command
short-summary: wait for image version related operation
examples:
- name: wait for an image version gets updated
text: |
az sig image-version wait --updated -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0
"""
helps['sig list'] = """
type: command
short-summary: list share image galleries.
"""
helps['sig update'] = """
type: command
short-summary: update a share image gallery.
"""
helps['snapshot'] = """
type: group
short-summary: Manage point-in-time copies of managed disks, native blobs, or other snapshots.
"""
helps['snapshot create'] = """
type: command
short-summary: Create a snapshot.
examples:
- name: Create a snapshot by importing from a blob uri.
text: >
az snapshot create -g MyResourceGroup -n MySnapshot --source https://vhd1234.blob.core.windows.net/vhds/osdisk1234.vhd
- name: Create an empty snapshot.
text: az snapshot create -g MyResourceGroup -n MySnapshot --size-gb 10
- name: Create a snapshot by copying an existing disk in the same resource group.
text: az snapshot create -g MyResourceGroup -n MySnapshot2 --source MyDisk
"""
helps['snapshot grant-access'] = """
type: command
short-summary: Grant read access to a snapshot.
examples:
- name: Grant read access to a snapshot. (autogenerated)
text: az snapshot grant-access --duration-in-seconds 3600 --name MySnapshot --resource-group MyResourceGroup
crafted: true
"""
helps['snapshot list'] = """
type: command
short-summary: List snapshots.
"""
helps['snapshot revoke-access'] = """
type: command
short-summary: Revoke read access to a snapshot.
examples:
- name: Revoke read access to a snapshot. (autogenerated)
text: az snapshot revoke-access --name MySnapshot --resource-group MyResourceGroup
crafted: true
"""
helps['snapshot update'] = """
type: command
short-summary: Update a snapshot.
"""
helps['snapshot wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a snapshot is met.
"""
helps['vm'] = """
type: group
short-summary: Manage Linux or Windows virtual machines.
"""
helps['vm availability-set'] = """
type: group
short-summary: Group resources into availability sets.
long-summary: >
To provide redundancy to an application, it is recommended to group two or more virtual machines in an availability set.
This configuration ensures that during either a planned or unplanned maintenance event, at least one virtual machine
will be available.
"""
helps['vm availability-set convert'] = """
type: command
short-summary: Convert an Azure Availability Set to contain VMs with managed disks.
examples:
- name: Convert an availabiity set to use managed disks by name.
text: az vm availability-set convert -g MyResourceGroup -n MyAvSet
- name: Convert an availability set to use managed disks by ID.
text: >
az vm availability-set convert --ids $(az vm availability-set list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm availability-set create'] = """
type: command
short-summary: Create an Azure Availability Set.
long-summary: 'For more information, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-manage-availability.'
examples:
- name: Create an availability set.
text: az vm availability-set create -n MyAvSet -g MyResourceGroup --platform-fault-domain-count 2 --platform-update-domain-count 2
"""
helps['vm availability-set delete'] = """
type: command
short-summary: Delete an availability set.
examples:
- name: Delete an availability set.
text: az vm availability-set delete -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set list'] = """
type: command
short-summary: List availability sets.
examples:
- name: List availability sets.
text: az vm availability-set list -g MyResourceGroup
"""
helps['vm availability-set list-sizes'] = """
type: command
short-summary: List VM sizes for an availability set.
examples:
- name: List VM sizes for an availability set.
text: az vm availability-set list-sizes -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set show'] = """
type: command
short-summary: Get information for an availability set.
examples:
- name: Get information about an availability set.
text: az vm availability-set show -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set update'] = """
type: command
short-summary: Update an Azure Availability Set.
examples:
- name: Update an availability set.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup
- name: Update an availability set tag.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup --set tags.foo=value
- name: Remove an availability set tag.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup --remove tags.foo
"""
helps['vm boot-diagnostics'] = """
type: group
short-summary: Troubleshoot the startup of an Azure Virtual Machine.
long-summary: Use this feature to troubleshoot boot failures for custom or platform images.
"""
helps['vm boot-diagnostics disable'] = """
type: command
short-summary: Disable the boot diagnostics on a VM.
examples:
- name: Disable boot diagnostics on all VMs in a resource group.
text: >
az vm boot-diagnostics disable --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm boot-diagnostics enable'] = """
type: command
short-summary: Enable the boot diagnostics on a VM.
parameters:
- name: --storage
short-summary: Name or URI of a storage account (e.g. https://your_storage_account_name.blob.core.windows.net/)
examples:
- name: Enable boot diagnostics on all VMs in a resource group.
text: >
az vm boot-diagnostics enable --storage https://mystor.blob.core.windows.net/ --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
- name: Enable the boot diagnostics on a VM. (autogenerated)
text: az vm boot-diagnostics enable --name MyVirtualMachine --resource-group MyResourceGroup --storage https://mystor.blob.core.windows.net/
crafted: true
"""
helps['vm boot-diagnostics get-boot-log'] = """
type: command
short-summary: Get the boot diagnostics log from a VM.
examples:
- name: Get diagnostics logs for all VMs in a resource group.
text: >
az vm boot-diagnostics get-boot-log --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
- name: Get the boot diagnostics log from a VM. (autogenerated)
text: az vm boot-diagnostics get-boot-log --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm capture'] = """
type: command
short-summary: Capture information for a stopped VM.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
parameters:
- name: --vhd-name-prefix
type: string
short-summary: The VHD name prefix specify for the VM disks.
- name: --storage-container
short-summary: The storage account container name in which to save the disks.
- name: --overwrite
short-summary: Overwrite the existing disk file.
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm convert'] = """
type: command
short-summary: Convert a VM with unmanaged disks to use managed disks.
examples:
- name: Convert a VM with unmanaged disks to use managed disks.
text: az vm convert -g MyResourceGroup -n MyVm
- name: Convert all VMs with unmanaged disks in a resource group to use managed disks.
text: >
az vm convert --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm create'] = """
type: command
short-summary: Create an Azure Virtual Machine.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-quick-create-cli.'
parameters:
- name: --image
type: string
short-summary: >
The name of the operating system image as a URN alias, URN, custom image name or ID, or VHD blob URI.
This parameter is required unless using `--attach-os-disk.` Valid URN format: "Publisher:Offer:Sku:Version".
populator-commands:
- az vm image list
- az vm image show
- name: --ssh-key-value
short-summary: The SSH public key or public key file path.
examples:
- name: Create a default Ubuntu VM with automatic SSH authentication.
text: >
az vm create -n MyVm -g MyResourceGroup --image UbuntuLTS
- name: Create a default RedHat VM with automatic SSH authentication using an image URN.
text: >
az vm create -n MyVm -g MyResourceGroup --image RedHat:RHEL:7-RAW:7.4.2018010506
- name: Create a default Windows Server VM with a private IP address.
text: >
az vm create -n MyVm -g MyResourceGroup --public-ip-address "" --image Win2012R2Datacenter
- name: Create a VM from a custom managed image.
text: >
az vm create -g MyResourceGroup -n MyVm --image MyImage
- name: Create a VM by attaching to a managed operating system disk.
text: >
az vm create -g MyResourceGroup -n MyVm --attach-os-disk MyOsDisk --os-type linux
- name: 'Create an Ubuntu Linux VM using a cloud-init script for configuration. See: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init.'
text: >
az vm create -g MyResourceGroup -n MyVm --image debian --custom-data MyCloudInitScript.yml
- name: Create a Debian VM with SSH key authentication and a public DNS entry, located on an existing virtual network and availability set.
text: |
az vm create -n MyVm -g MyResourceGroup --image debian --vnet-name MyVnet --subnet subnet1 \\
--availability-set MyAvailabilitySet --public-ip-address-dns-name MyUniqueDnsName \\
--ssh-key-value @key-file
- name: Create a simple Ubuntu Linux VM with a public IP address, DNS entry, two data disks (10GB and 20GB), and then generate ssh key pairs.
text: |
az vm create -n MyVm -g MyResourceGroup --public-ip-address-dns-name MyUniqueDnsName \\
--image ubuntults --data-disk-sizes-gb 10 20 --size Standard_DS2_v2 \\
--generate-ssh-keys
- name: Create a Debian VM using Key Vault secrets.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
- name: Create a CentOS VM with a system assigned identity. The VM will have a 'Contributor' role with access to a storage account.
text: >
az vm create -n MyVm -g rg1 --image centos --assign-identity --scope /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/MyResourceGroup/myRG/providers/Microsoft.Storage/storageAccounts/storage1
- name: Create a debian VM with a user assigned identity.
text: >
az vm create -n MyVm -g rg1 --image debian --assign-identity /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a debian VM with both system and user assigned identity.
text: >
az vm create -n MyVm -g rg1 --image debian --assign-identity [system] /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a VM in an availability zone in the current resource group's region
supported-profiles: latest
text: >
az vm create -n MyVm -g MyResourceGroup --image Centos --zone 1
"""
helps['vm deallocate'] = """
type: command
short-summary: Deallocate a VM.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm delete'] = """
type: command
short-summary: Delete a VM.
examples:
- name: Delete a VM without a prompt for confirmation.
text: >
az vm delete -g MyResourceGroup -n MyVm --yes
- name: Delete all VMs in a resource group.
text: >
az vm delete --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm diagnostics'] = """
type: group
short-summary: Configure the Azure Virtual Machine diagnostics extension.
"""
helps['vm diagnostics get-default-config'] = """
type: command
short-summary: Get the default configuration settings for a VM.
examples:
- name: Get the default diagnostics for a Linux VM and override the storage account name and the VM resource ID.
text: |
az vm diagnostics get-default-config \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#MyStorageAccount#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#MyVmResourceId#g"
- name: Get the default diagnostics for a Windows VM.
text: >
az vm diagnostics get-default-config --is-windows-os
"""
helps['vm diagnostics set'] = """
type: command
short-summary: Configure the Azure VM diagnostics extension.
examples:
- name: Set up default diagnostics on a Linux VM for Azure Portal VM metrics graphs and syslog collection.
text: |
# Set the following 3 parameters first.
my_resource_group={Resource group name containing your Linux VM and the storage account}
my_linux_vm={Your Azure Linux VM name}
my_diagnostic_storage_account={Your Azure storage account for storing VM diagnostic data}
my_vm_resource_id=$(az vm show -g $my_resource_group -n $my_linux_vm --query "id" -o tsv)
default_config=$(az vm diagnostics get-default-config \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#$my_diagnostic_storage_account#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#$my_vm_resource_id#g")
storage_sastoken=$(az storage account generate-sas \\
--account-name $my_diagnostic_storage_account --expiry 2037-12-31T23:59:00Z \\
--permissions wlacu --resource-types co --services bt -o tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountSasToken': '$storage_sastoken'}"
az vm diagnostics set --settings "$default_config" \\
--protected-settings "$protected_settings" \\
--resource-group $my_resource_group --vm-name $my_linux_vm
- name: Set up default diagnostics on a Windows VM.
text: |
# Set the following 3 parameters first.
my_resource_group={Resource group name containing your Windows VM and the storage account}
my_windows_vm={Your Azure Windows VM name}
my_diagnostic_storage_account={Your Azure storage account for storing VM diagnostic data}
my_vm_resource_id=$(az vm show -g $my_resource_group -n $my_windows_vm --query "id" -o tsv)
default_config=$(az vm diagnostics get-default-config --is-windows-os \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#$my_diagnostic_storage_account#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#$my_vm_resource_id#g")
# Please use the same options, the WAD diagnostic extension has strict
# expectations of the sas token's format. Set the expiry as desired.
storage_sastoken=$(az storage account generate-sas \\
--account-name $my_diagnostic_storage_account --expiry 2037-12-31T23:59:00Z \\
--permissions acuw --resource-types co --services bt --https-only --output tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountSasToken': '$storage_sastoken'}"
az vm diagnostics set --settings "$default_config" \\
--protected-settings "$protected_settings" \\
--resource-group $my_resource_group --vm-name $my_windows_vm
# # Alternatively, if the WAD extension has issues parsing the sas token,
# # one can use a storage account key instead.
storage_account_key=$(az storage account keys list --account-name {my_storage_account} \\
--query [0].value -o tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountKey': '$storage_account_key'}"
"""
helps['vm disk'] = """
type: group
short-summary: Manage the managed data disks attached to a VM.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['vm disk attach'] = """
type: command
short-summary: Attach a managed persistent disk to a VM.
long-summary: This allows for the preservation of data, even if the VM is reprovisioned due to maintenance or resizing.
examples:
- name: Attach a new default sized (1023 GB) managed data disk to a VM.
text: az vm disk attach -g MyResourceGroup --vm-name MyVm --name disk_name --new
"""
helps['vm disk detach'] = """
type: command
short-summary: Detach a managed disk from a VM.
examples:
- name: Detach a data disk from a VM.
text: >
az vm disk detach -g MyResourceGroup --vm-name MyVm --name disk_name
"""
helps['vm encryption'] = """
type: group
short-summary: "Manage encryption of VM disks."
long-summary: |
For more information, see:
https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
"""
helps['vm encryption disable'] = """
type: command
short-summary: Disable disk encryption on the OS disk and/or data disks. Decrypt mounted disks.
long-summary: |
For Linux VMs, disabling encryption is only permitted on data volumes.
For Windows VMS, disabling encryption is permitted on both OS and data volumes.
examples:
- name: Disable disk encryption on the OS disk and/or data disks. (autogenerated)
text: az vm encryption disable --name MyVirtualMachine --resource-group MyResourceGroup --volume-type DATA
crafted: true
"""
helps['vm encryption enable'] = """
type: command
short-summary: "Enable disk encryption on the OS disk and/or data disks. Encrypt mounted disks."
long-summary: |
Note that Azure Active Directory / service principal arguments are unnecessary for vm encryption. The older version of Azure Disk Encryption required AAD arguments.
For more information, see:
https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview
parameters:
- name: --aad-client-id
short-summary: Client ID of an AAD app with permissions to write secrets to the key vault.
- name: --aad-client-secret
short-summary: Client secret of the AAD app with permissions to write secrets to the key vault.
- name: --aad-client-cert-thumbprint
short-summary: Thumbprint of the AAD app certificate with permissions to write secrets to the key vault.
examples:
- name: encrypt a VM using a key vault in the same resource group
text: >
az vm encryption enable -g MyResourceGroup -n MyVm --disk-encryption-keyvault MyVault
"""
helps['vm encryption show'] = """
type: command
short-summary: Show encryption status.
examples:
- name: Show encryption status. (autogenerated)
text: az vm encryption show --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm extension'] = """
type: group
short-summary: Manage extensions on VMs.
long-summary: >
Extensions are small applications that provide post-deployment configuration and automation tasks on Azure virtual machines.
For example, if a virtual machine requires software installation, anti-virus protection, or Docker configuration, a VM extension
can be used to complete these tasks. Extensions can be bundled with a new virtual machine deployment or run against any existing system.
"""
helps['vm extension delete'] = """
type: command
short-summary: Remove an extension attached to a VM.
examples:
- name: Use a VM name and extension to delete an extension from a VM.
text: az vm extension delete -g MyResourceGroup --vm-name MyVm -n extension_name
- name: Delete extensions with IDs containing the string "MyExtension" from a VM.
text: >
az vm extension delete --ids \\
$(az resource list --query "[?contains(name, 'MyExtension')].id" -o tsv)
"""
helps['vm extension image'] = """
type: group
short-summary: Find the available VM extensions for a subscription and region.
"""
helps['vm extension image list'] = """
type: command
short-summary: List the information on available extensions.
examples:
- name: List the unique publishers for extensions.
text: az vm extension image list --query "[].publisher" -o tsv | sort -u
- name: Find extensions with "Docker" in the name.
text: az vm extension image list --query "[].name" -o tsv | sort -u | grep Docker
- name: List extension names where the publisher name starts with "Microsoft.Azure.App".
text: |
az vm extension image list --query \\
"[?starts_with(publisher, 'Microsoft.Azure.App')].publisher" \\
-o tsv | sort -u | xargs -I{} az vm extension image list-names --publisher {} -l westus
"""
helps['vm extension image list-names'] = """
type: command
short-summary: List the names of available extensions.
examples:
- name: Find Docker extensions by publisher and location.
text: >
az vm extension image list-names --publisher Microsoft.Azure.Extensions \\
-l westus --query "[?starts_with(name, 'Docker')]"
- name: Find CustomScript extensions by publisher and location.
text: >
az vm extension image list-names --publisher Microsoft.Azure.Extensions \\
-l westus --query "[?starts_with(name, 'Custom')]"
"""
helps['vm extension image list-versions'] = """
type: command
short-summary: List the versions for available extensions.
examples:
- name: Find the available versions for the Docker extension.
text: >
az vm extension image list-versions --publisher Microsoft.Azure.Extensions \\
-l westus -n DockerExtension -otable
"""
helps['vm extension image show'] = """
type: command
short-summary: Display information for an extension.
examples:
- name: Show the CustomScript extension version 2.0.2.
text: >
az vm extension image show -l westus -n CustomScript \\
--publisher Microsoft.Azure.Extensions --version 2.0.2
- name: Show the latest version of the Docker extension.
text: >
publisher=Microsoft.Azure.Extensions
extension=DockerExtension
location=westus
latest=$(az vm extension image list-versions \\
--publisher {publisher} -l {location} -n {extension} \\
--query "[].name" -o tsv | sort | tail -n 1)
az vm extension image show -l {location} \\
--publisher {publisher} -n {extension} --version {latest}
"""
helps['vm extension list'] = """
type: command
short-summary: List the extensions attached to a VM.
examples:
- name: List attached extensions to a named VM.
text: az vm extension list -g MyResourceGroup --vm-name MyVm
"""
helps['vm extension set'] = """
type: command
short-summary: Set extensions for a VM.
long-summary: Get extension details from `az vm extension image list`.
examples:
- name: Add a user account to a Linux VM.
text: |
az vm extension set -n VMAccessForLinux --publisher Microsoft.OSTCExtensions --version 1.4 \\
--vm-name MyVm --resource-group MyResourceGroup \\
--protected-settings '{"username":"user1", "ssh_key":"ssh_rsa ..."}'
parameters:
- name: --name -n
populator-commands:
- az vm extension image list
"""
helps['vm extension show'] = """
type: command
short-summary: Display information about extensions attached to a VM.
examples:
- name: Use VM name and extension name to show the extensions attached to a VM.
text: az vm extension show -g MyResourceGroup --vm-name MyVm -n extension_name
"""
helps['vm extension wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a virtual machine extension is met.
"""
helps['vm generalize'] = """
type: command
short-summary: Mark a VM as generalized, allowing it to be imaged for multiple deployments.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm get-instance-view'] = """
type: command
short-summary: Get instance information about a VM.
examples:
- name: Use a resource group and name to get instance view information of a VM.
text: az vm get-instance-view -g MyResourceGroup -n MyVm
- name: Get instance views for all VMs in a resource group.
text: >
az vm get-instance-view --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm identity'] = """
type: group
short-summary: manage service identities of a VM
"""
helps['vm identity assign'] = """
type: command
short-summary: Enable managed service identity on a VM.
long-summary: This is required to authenticate and interact with other Azure services using bearer tokens.
examples:
- name: Enable the system assigned identity on a VM with the 'Reader' role.
text: az vm identity assign -g MyResourceGroup -n MyVm --role Reader --scope /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/MyResourceGroup
- name: Enable the system assigned identity and a user assigned identity on a VM.
text: az vm identity assign -g MyResourceGroup -n MyVm --role Reader --identities [system] myAssignedId
"""
helps['vm identity remove'] = """
type: command
short-summary: Remove managed service identities from a VM.
examples:
- name: Remove the system assigned identity
text: az vm identity remove -g MyResourceGroup -n MyVm
- name: Remove a user assigned identity
text: az vm identity remove -g MyResourceGroup -n MyVm --identities readerId
- name: Remove 2 identities which are in the same resource group with the VM
text: az vm identity remove -g MyResourceGroup -n MyVm --identities readerId writerId
- name: Remove the system assigned identity and a user identity
text: az vm identity remove -g MyResourceGroup -n MyVm --identities [system] readerId
"""
helps['vm identity show'] = """
type: command
short-summary: display VM's managed identity info.
examples:
- name: display VM's managed identity info. (autogenerated)
text: az vm identity show --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm image'] = """
type: group
short-summary: Information on available virtual machine images.
"""
helps['vm image accept-terms'] = """
type: command
short-summary: Accept Azure Marketplace term so that the image can be used to create VMs
examples:
- name: Accept Azure Marketplace term so that the image can be used to create VMs. (autogenerated)
text: az vm image accept-terms --urn publisher:offer:sku:version
crafted: true
"""
helps['vm image list'] = """
type: command
short-summary: List the VM/VMSS images available in the Azure Marketplace.
parameters:
- name: --all
short-summary: Retrieve image list from live Azure service rather using an offline image list
- name: --offer -f
short-summary: Image offer name, partial name is accepted
- name: --publisher -p
short-summary: Image publisher name, partial name is accepted
- name: --sku -s
short-summary: Image sku name, partial name is accepted
examples:
- name: List all available images.
text: az vm image list --all
- name: List all offline cached CentOS images.
text: az vm image list -f CentOS
- name: List all CentOS images.
text: az vm image list -f CentOS --all
"""
helps['vm image list-offers'] = """
type: command
short-summary: List the VM image offers available in the Azure Marketplace.
parameters:
- name: --publisher -p
populator-commands:
- az vm list-publishers
examples:
- name: List all offers from Microsoft in the West US region.
text: az vm image list-offers -l westus -p MicrosoftWindowsServer
- name: List all offers from OpenLocic in the West US region.
text: az vm image list-offers -l westus -p OpenLogic
"""
helps['vm image list-publishers'] = """
type: command
short-summary: List the VM image publishers available in the Azure Marketplace.
examples:
- name: List all publishers in the West US region.
text: az vm image list-publishers -l westus
- name: List all publishers with names starting with "Open" in westus.
text: az vm image list-publishers -l westus --query "[?starts_with(name, 'Open')]"
"""
helps['vm image list-skus'] = """
type: command
short-summary: List the VM image SKUs available in the Azure Marketplace.
parameters:
- name: --publisher -p
populator-commands:
- az vm image list-publishers
examples:
- name: List all skus available for CentOS published by OpenLogic in the West US region.
text: az vm image list-skus -l westus -f CentOS -p OpenLogic
"""
helps['vm image show'] = """
type: command
short-summary: Get the details for a VM image available in the Azure Marketplace.
examples:
- name: Show information for the latest available CentOS image from OpenLogic.
text: >
latest=$(az vm image list -p OpenLogic -s 7.3 --all --query \\
"[?offer=='CentOS'].version" -o tsv | sort -u | tail -n 1)
az vm image show -l westus -f CentOS -p OpenLogic --sku 7.3 --version {latest}
"""
helps['vm list'] = """
type: command
short-summary: List details of Virtual Machines.
long-summary: 'For more information on querying information about Virtual Machines, see https://docs.microsoft.com/en-us/cli/azure/query-az-cli2'
examples:
- name: List all VMs.
text: az vm list
- name: List all VMs by resource group.
text: az vm list -g MyResourceGroup
- name: List all VMs by resource group with details.
text: az vm list -g MyResourceGroup -d
"""
helps['vm list-ip-addresses'] = """
type: command
short-summary: List IP addresses associated with a VM.
examples:
- name: Get the IP addresses for a VM.
text: az vm list-ip-addresses -g MyResourceGroup -n MyVm
- name: Get IP addresses for all VMs in a resource group.
text: >
az vm list-ip-addresses --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm list-sizes'] = """
type: command
short-summary: List available sizes for VMs.
examples:
- name: List the available VM sizes in the West US region.
text: az vm list-sizes -l westus
"""
helps['vm list-skus'] = """
type: command
short-summary: Get details for compute-related resource SKUs.
long-summary: This command incorporates subscription level restriction, offering the most accurate information.
examples:
- name: List all SKUs in the West US region.
text: az vm list-skus -l westus
- name: List all available vm sizes in the East US2 region which support availability zone.
text: az vm list-skus -l eastus2 --zone
- name: List all available vm sizes in the East US2 region which support availability zone with name like "standard_ds1...".
text: az vm list-skus -l eastus2 --zone --size standard_ds1
- name: List availability set related sku information in The West US region.
text: az vm list-skus -l westus --resource-type availabilitySets
"""
helps['vm list-usage'] = """
type: command
short-summary: List available usage resources for VMs.
examples:
- name: Get the compute resource usage for the West US region.
text: az vm list-usage -l westus
"""
helps['vm list-vm-resize-options'] = """
type: command
short-summary: List available resizing options for VMs.
examples:
- name: List all available VM sizes for resizing.
text: az vm list-vm-resize-options -g MyResourceGroup -n MyVm
- name: List available sizes for all VMs in a resource group.
text: >
az vm list-vm-resize-options --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm nic'] = """
type: group
short-summary: Manage network interfaces. See also `az network nic`.
long-summary: >
A network interface (NIC) is the interconnection between a VM and the underlying software
network. For more information, see https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface-overview.
"""
helps['vm nic add'] = """
type: command
short-summary: Add existing NICs to a VM.
examples:
- name: Add two NICs to a VM.
text: az vm nic add -g MyResourceGroup --vm-name MyVm --nics nic_name1 nic_name2
"""
helps['vm nic list'] = """
type: command
short-summary: List the NICs available on a VM.
examples:
- name: List all of the NICs on a VM.
text: az vm nic list -g MyResourceGroup --vm-name MyVm
"""
helps['vm nic remove'] = """
type: command
short-summary: Remove NICs from a VM.
examples:
- name: Remove two NICs from a VM.
text: az vm nic remove -g MyResourceGroup --vm-name MyVm --nics nic_name1 nic_name2
"""
helps['vm nic set'] = """
type: command
short-summary: Configure settings of a NIC attached to a VM.
examples:
- name: Set a NIC on a VM to be the primary interface.
text: az vm nic set -g MyResourceGroup --vm-name MyVm --nic nic_name1 nic_name2 --primary-nic nic_name2
"""
helps['vm nic show'] = """
type: command
short-summary: Display information for a NIC attached to a VM.
examples:
- name: Show details of a NIC on a VM.
text: az vm nic show -g MyResourceGroup --vm-name MyVm --nic nic_name1
"""
helps['vm open-port'] = """
type: command
short-summary: Opens a VM to inbound traffic on specified ports.
long-summary: >
Adds a security rule to the network security group (NSG) that is attached to the VM's
network interface (NIC) or subnet. The existing NSG will be used or a new one will be
created. The rule name is 'open-port-{port}' and will overwrite an existing rule with
this name. For multi-NIC VMs, or for more fine-grained control, use the appropriate
network commands directly (nsg rule create, etc).
examples:
- name: Open all ports on a VM to inbound traffic.
text: az vm open-port -g MyResourceGroup -n MyVm --port '*'
- name: Open a range of ports on a VM to inbound traffic with the highest priority.
text: az vm open-port -g MyResourceGroup -n MyVm --port 80-100 --priority 100
- name: Open all ports for all VMs in a resource group.
text: >
az vm open-port --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv) --port '*'
"""
helps['vm redeploy'] = """
type: command
short-summary: Redeploy an existing VM.
examples:
- name: Redeploy a VM.
text: az vm redeploy -g MyResourceGroup -n MyVm
- name: Redeploy all VMs in a resource group.
text: >
az vm redeploy --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm resize'] = """
type: command
short-summary: Update a VM's size.
parameters:
- name: --size
type: string
short-summary: The VM size.
populator-commands:
- az vm list-vm-resize-options
examples:
- name: Resize a VM.
text: az vm resize -g MyResourceGroup -n MyVm --size Standard_DS3_v2
- name: Resize all VMs in a resource group.
text: >
az vm resize --size Standard_DS3_v2 --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm restart'] = """
type: command
short-summary: Restart VMs.
examples:
- name: Restart a VM.
text: az vm restart -g MyResourceGroup -n MyVm
- name: Restart all VMs in a resource group.
text: >
az vm restart --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm run-command'] = """
type: group
short-summary: Manage run commands on a Virtual Machine.
long-summary: 'For more information, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/run-command or https://docs.microsoft.com/en-us/azure/virtual-machines/linux/run-command.'
"""
helps['vm run-command invoke'] = """
type: command
short-summary: Execute a specific run command on a vm.
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vm run-command list
examples:
- name: install nginx on a vm
text: az vm run-command invoke -g MyResourceGroup -n MyVm --command-id RunShellScript --scripts "sudo apt-get update && sudo apt-get install -y nginx"
- name: invoke command with parameters
text: az vm run-command invoke -g MyResourceGroup -n MyVm --command-id RunShellScript --scripts 'echo $1 $2' --parameters hello world
"""
helps['vm run-command show'] = """
type: command
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vm run-command list
"""
helps['vm secret'] = """
type: group
short-summary: Manage VM secrets.
"""
helps['vm secret add'] = """
type: command
short-summary: Add a secret to a VM.
"""
helps['vm secret format'] = """
type: command
short-summary: Transform secrets into a form that can be used by VMs and VMSSes.
parameters:
- name: --secrets -s
long-summary: >
The command will attempt to resolve the vault ID for each secret. If it is unable to do so,
specify the vault ID to use for *all* secrets using: --keyvault NAME --resource-group NAME | --keyvault ID.
examples:
- name: Create a self-signed certificate with the default policy, and add it to a virtual machine.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
"""
helps['vm secret list'] = """
type: command
short-summary: List secrets on a VM.
"""
helps['vm secret remove'] = """
type: command
short-summary: Remove a secret from a VM.
"""
helps['vm show'] = """
type: command
short-summary: Get the details of a VM.
examples:
- name: Show information about a VM.
text: az vm show -g MyResourceGroup -n MyVm -d
- name: Get the details for all VMs in a resource group.
text: >
az vm show -d --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm start'] = """
type: command
short-summary: Start a stopped VM.
examples:
- name: Start a stopped VM.
text: az vm start -g MyResourceGroup -n MyVm
- name: Start all VMs in a resource group.
text: >
az vm start --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm stop'] = """
type: command
short-summary: Power off (stop) a running VM.
long-summary: The VM will continue to be billed. To avoid this, you can deallocate the VM through "az vm deallocate"
examples:
- name: Power off (stop) a running VM.
text: az vm stop -g MyResourceGroup -n MyVm
- name: Stop all VMs in a resource group.
text: >
az vm stop --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm unmanaged-disk'] = """
type: group
short-summary: Manage the unmanaged data disks attached to a VM.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['vm unmanaged-disk attach'] = """
type: command
short-summary: Attach an unmanaged persistent disk to a VM.
long-summary: This allows for the preservation of data, even if the VM is reprovisioned due to maintenance or resizing.
examples:
- name: Attach a new default sized (1023 GB) unmanaged data disk to a VM.
text: az vm unmanaged-disk attach -g MyResourceGroup --vm-name MyVm --new
- name: Attach an existing data disk to a VM as unmanaged.
text: >
az vm unmanaged-disk attach -g MyResourceGroup --vm-name MyVm \\
--vhd-uri https://mystorage.blob.core.windows.net/vhds/d1.vhd
"""
helps['vm unmanaged-disk detach'] = """
type: command
short-summary: Detach an unmanaged disk from a VM.
examples:
- name: Detach a data disk from a VM.
text: >
az vm unmanaged-disk detach -g MyResourceGroup --vm-name MyVm -n disk_name
"""
helps['vm unmanaged-disk list'] = """
type: command
short-summary: List unmanaged disks of a VM.
examples:
- name: List the unmanaged disks attached to a VM.
text: az vm unmanaged-disk list -g MyResourceGroup --vm-name MyVm
- name: List unmanaged disks with names containing the string "data_disk".
text: >
az vm unmanaged-disk list -g MyResourceGroup --vm-name MyVm \\
--query "[?contains(name, 'data_disk')]" --output table
"""
helps['vm update'] = """
type: command
short-summary: Update the properties of a VM.
long-summary: Update VM objects and properties using paths that correspond to 'az vm show'.
examples:
- name: Add or update a tag.
text: az vm update -n name -g group --set tags.tagName=tagValue
- name: Remove a tag.
text: az vm update -n name -g group --remove tags.tagName
- name: Set the primary NIC of a VM.
text: az vm update -n name -g group --set networkProfile.networkInterfaces[1].primary=false networkProfile.networkInterfaces[0].primary=true
- name: Add a new non-primary NIC to a VM.
text: az vm update -n name -g group --add networkProfile.networkInterfaces primary=false id={NIC_ID}
- name: Remove the fourth NIC from a VM.
text: az vm update -n name -g group --remove networkProfile.networkInterfaces 3
"""
helps['vm user'] = """
type: group
short-summary: Manage user accounts for a VM.
"""
helps['vm user delete'] = """
type: command
short-summary: Delete a user account from a VM.
examples:
- name: Delete a user account.
text: az vm user delete -u username -n MyVm -g MyResourceGroup
- name: Delete a user on all VMs in a resource group.
text: >
az vm user delete -u username --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm user reset-ssh'] = """
type: command
short-summary: Reset the SSH configuration on a VM.
long-summary: >
The extension will restart the SSH service, open the SSH port on your VM, and reset the SSH configuration to default values. The user account (name, password, and SSH keys) are not changed.
examples:
- name: Reset the SSH configuration.
text: az vm user reset-ssh -n MyVm -g MyResourceGroup
- name: Reset the SSH server on all VMs in a resource group.
text: >
az vm user reset-ssh --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm user update'] = """
type: command
short-summary: Update a user account.
parameters:
- name: --ssh-key-value
short-summary: SSH public key file value or public key file path
examples:
- name: Update a Windows user account.
text: az vm user update -u username -p password -n MyVm -g MyResourceGroup
- name: Update a Linux user account.
text: az vm user update -u username --ssh-key-value "$({ ~/.ssh/id_rsa.pub)" -n MyVm -g MyResourceGroup
- name: Update a user on all VMs in a resource group.
text: >
az vm user update -u username --ssh-key-value "$({ ~/.ssh/id_rsa.pub)" --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the VM is met.
examples:
- name: Wait until a VM is created.
text: az vm wait -g MyResourceGroup -n MyVm --created
- name: Wait until all VMs in a resource group are deleted.
text: >
az vm wait --deleted --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vmss'] = """
type: group
short-summary: Manage groupings of virtual machines in an Azure Virtual Machine Scale Set (VMSS).
"""
helps['vmss create'] = """
type: command
short-summary: Create an Azure Virtual Machine Scale Set.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-linux-create-cli.'
parameters:
- name: --image
type: string
short-summary: >
The name of the operating system image as a URN alias, URN, custom image name or ID, or VHD blob URI.
Valid URN format: "Publisher:Offer:Sku:Version".
populator-commands:
- az vm image list
- az vm image show
examples:
- name: Create a Windows VM scale set with 5 instances, a load balancer, a public IP address, and a 2GB data disk.
text: >
az vmss create -n MyVmss -g MyResourceGroup --instance-count 5 --image Win2016Datacenter --data-disk-sizes-gb 2
- name: Create a Linux VM scale set with an auto-generated ssh key pair, a public IP address, a DNS entry, an existing load balancer, and an existing virtual network.
text: |
az vmss create -n MyVmss -g MyResourceGroup --public-ip-address-dns-name my-globally-dns-name \\
--load-balancer MyLoadBalancer --vnet-name MyVnet --subnet MySubnet --image UbuntuLTS \\
--generate-ssh-keys
- name: Create a Linux VM scale set from a custom image using the default existing public SSH key.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image MyImage
- name: Create a Linux VM scale set with a load balancer and custom DNS servers. Each VM has a public-ip address and a custom domain name.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image centos \\
--public-ip-per-vm --vm-domain-name myvmss --dns-servers 10.0.0.6 10.0.0.5
- name: 'Create a Linux VM scale set using a cloud-init script for configuration. See: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init'
text: >
az vmss create -g MyResourceGroup -n MyVmss --image debian --custom-data MyCloudInitScript.yml
- name: Create a Debian VM scaleset using Key Vault secrets.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vmss create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
- name: Create a VM scaleset with system assigned identity. The VM will have a 'Contributor' Role with access to a storage account.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image centos --assign-identity --scope /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/MyResourceGroup/myRG/providers/Microsoft.Storage/storageAccounts/storage1
- name: Create a debian VM scaleset with a user assigned identity.
text: >
az vmss create -n MyVmss -g rg1 --image debian --assign-identity /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a debian VM scaleset with both system and user assigned identity.
text: >
az vmss create -n MyVmss -g rg1 --image debian --assign-identity [system] /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a single zone VM scaleset in the current resource group's region
supported-profiles: latest
text: >
az vmss create -n MyVmss -g MyResourceGroup --image Centos --zones 1
"""
helps['vmss deallocate'] = """
type: command
short-summary: Deallocate VMs within a VMSS.
"""
helps['vmss delete-instances'] = """
type: command
short-summary: Delete VMs within a VMSS.
"""
helps['vmss diagnostics'] = """
type: group
short-summary: Configure the Azure Virtual Machine Scale Set diagnostics extension.
"""
helps['vmss diagnostics get-default-config'] = """
type: command
short-summary: Show the default config file which defines data to be collected.
"""
helps['vmss diagnostics set'] = """
type: command
short-summary: Enable diagnostics on a VMSS.
"""
helps['vmss disk'] = """
type: group
short-summary: Manage data disks of a VMSS.
"""
helps['vmss disk attach'] = """
type: command
short-summary: Attach managed data disks to a scale set or its instances.
"""
helps['vmss disk detach'] = """
type: command
short-summary: Detach managed data disks from a scale set or its instances.
"""
helps['vmss encryption'] = """
type: group
short-summary: "(PREVIEW) Manage encryption of VMSS."
long-summary: "For more information, see: https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
"""
helps['vmss encryption disable'] = """
type: command
short-summary: Disable the encryption on a VMSS with managed disks.
examples:
- name: disable encryption a VMSS
text: >
az vmss encryption disable -g MyResourceGroup -n MyVm
"""
helps['vmss encryption enable'] = """
type: command
short-summary: "Encrypt a VMSS with managed disks."
long-summary: "For more information, see: For more information, see: https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
examples:
- name: encrypt a VM scale set using a key vault in the same resource group
text: >
az vmss encryption enable -g MyResourceGroup -n MyVmss --disk-encryption-keyvault MyVault
"""
helps['vmss encryption show'] = """
type: command
short-summary: Show encryption status.
"""
helps['vmss extension'] = """
type: group
short-summary: Manage extensions on a VM scale set.
"""
helps['vmss extension delete'] = """
type: command
short-summary: Delete an extension from a VMSS.
"""
helps['vmss extension image'] = """
type: group
short-summary: Find the available VM extensions for a subscription and region.
"""
helps['vmss extension image list'] = """
type: command
short-summary: List the information on available extensions.
examples:
- name: List the unique publishers for extensions.
text: az vmss extension image list --query "[].publisher" -o tsv | sort -u
- name: Find extensions with "Docker" in the name.
text: az vmss extension image list --query "[].name" -o tsv | sort -u | grep Docker
- name: List extension names where the publisher name starts with "Microsoft.Azure.App".
text: |
az vmss extension image list --query \\
"[?starts_with(publisher, 'Microsoft.Azure.App')].publisher" \\
-o tsv | sort -u | xargs -I{} az vmss extension image list-names --publisher {} -l westus
"""
helps['vmss extension list'] = """
type: command
short-summary: List extensions associated with a VMSS.
examples:
- name: List extensions associated with a VMSS. (autogenerated)
text: az vmss extension list --resource-group MyResourceGroup --vmss-name MyVmss
crafted: true
"""
helps['vmss extension set'] = """
type: command
short-summary: Add an extension to a VMSS or update an existing extension.
long-summary: Get extension details from `az vmss extension image list`.
parameters:
- name: --name -n
populator-commands:
- az vm extension image list
examples:
- name: >
Set an extension which depends on two previously set extensions. That is, When a VMSS instance is created or reimaged, the customScript extension will be provisioned only after all extensions that it depends on have been provisioned. The extension need not depend on the other extensions for pre-requisite configurations.
text: >
az vmss extension set --vmss-name my-vmss --name customScript --resource-group my-group \\
--version 2.0 --publisher Microsoft.Azure.Extensions \\
--provision-after-extensions NetworkWatcherAgentLinux VMAccessForLinux \\
--settings '{"commandToExecute": "echo testing"}'
"""
helps['vmss extension show'] = """
type: command
short-summary: Show details on a VMSS extension.
"""
helps['vmss get-instance-view'] = """
type: command
short-summary: View an instance of a VMSS.
parameters:
- name: --instance-id
short-summary: A VM instance ID or "*" to list instance view for all VMs in a scale set.
examples:
- name: View an instance of a VMSS. (autogenerated)
text: az vmss get-instance-view --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss identity'] = """
type: group
short-summary: manage service identities of a VM scaleset.
"""
helps['vmss identity assign'] = """
type: command
short-summary: Enable managed service identity on a VMSS.
long-summary: This is required to authenticate and interact with other Azure services using bearer tokens.
examples:
- name: Enable system assigned identity on a VMSS with the 'Owner' role.
text: az vmss identity assign -g MyResourceGroup -n MyVmss --role Owner --scope /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/MyResourceGroup
"""
helps['vmss identity remove'] = """
type: command
short-summary: (PREVIEW) Remove user assigned identities from a VM scaleset.
examples:
- name: Remove system assigned identity
text: az vmss identity remove -g MyResourceGroup -n MyVmss
- name: Remove 2 identities which are in the same resource group with the VM scaleset
text: az vmss identity remove -g MyResourceGroup -n MyVmss --identities readerId writerId
- name: Remove system assigned identity and a user identity
text: az vmss identity remove -g MyResourceGroup -n MyVmss --identities [system] readerId
"""
helps['vmss identity show'] = """
type: command
short-summary: display VM scaleset's managed identity info.
"""
helps['vmss list'] = """
type: command
short-summary: List VMSS.
"""
helps['vmss list-instance-connection-info'] = """
type: command
short-summary: Get the IP address and port number used to connect to individual VM instances within a set.
examples:
- name: Get the IP address and port number used to connect to individual VM instances within a set. (autogenerated)
text: az vmss list-instance-connection-info --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss list-instance-public-ips'] = """
type: command
short-summary: List public IP addresses of VM instances within a set.
examples:
- name: List public IP addresses of VM instances within a set. (autogenerated)
text: az vmss list-instance-public-ips --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss nic'] = """
type: group
short-summary: Manage network interfaces of a VMSS.
"""
helps['vmss reimage'] = """
type: command
short-summary: Reimage VMs within a VMSS.
parameters:
- name: --instance-id
short-summary: VM instance ID. If missing, reimage all instances.
"""
helps['vmss restart'] = """
type: command
short-summary: Restart VMs within a VMSS.
"""
helps['vmss rolling-upgrade'] = """
type: group
short-summary: (PREVIEW) Manage rolling upgrades.
"""
helps['vmss run-command'] = """
type: group
short-summary: Manage run commands on a Virtual Machine Scale Set.
long-summary: 'For more information, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/run-command or https://docs.microsoft.com/en-us/azure/virtual-machines/linux/run-command.'
"""
helps['vmss run-command invoke'] = """
type: command
short-summary: Execute a specific run command on a Virtual Machine Scale Set instance.
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vmss run-command list
- name: --instance-id
short-summary: Scale set VM instance id.
examples:
- name: install nginx on a VMSS instance
text: az vmss run-command invoke -g MyResourceGroup -n MyVMSS --command-id RunShellScript \\ --instance-id 0 --scripts "sudo apt-get update && sudo apt-get install -y nginx"
- name: invoke a run-command with parameters on a VMSS instance
text: az vmss run-command invoke -g MyResourceGroup -n MyVMSS --command-id RunShellScript \\ --instance-id 4 --scripts 'echo $1 $2' --parameters hello world
- name: 'invoke command on all VMSS instances using the VMSS instance resource IDs. Note: "@-" expands to stdin.'
text: |
az vmss list-instances -n MyVMSS -g ova-test --query "[].id" --output tsv | \\
az vmss run-command invoke --scripts 'echo $1 $2' --parameters hello world \\
--command-id RunShellScript --ids @-
"""
helps['vmss run-command show'] = """
type: command
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vmss run-command list
"""
helps['vmss scale'] = """
type: command
short-summary: Change the number of VMs within a VMSS.
parameters:
- name: --new-capacity
short-summary: Number of VMs in the VMSS.
examples:
- name: Change the number of VMs within a VMSS. (autogenerated)
text: az vmss scale --name MyScaleSet --new-capacity 6 --resource-group MyResourceGroup
crafted: true
"""
helps['vmss show'] = """
type: command
short-summary: Get details on VMs within a VMSS.
parameters:
- name: --instance-id
short-summary: VM instance ID. If missing, show the VMSS.
examples:
- name: Get details on VMs within a VMSS. (autogenerated)
text: az vmss show --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss start'] = """
type: command
short-summary: Start VMs within a VMSS.
"""
helps['vmss stop'] = """
type: command
short-summary: Power off (stop) VMs within a VMSS.
long-summary: The VMs will continue to be billed. To avoid this, you can deallocate VM instances within a VMSS through "az vmss deallocate"
"""
helps['vmss update'] = """
type: command
short-summary: Update a VMSS.
examples:
- name: Update a VMSS. (autogenerated)
text: az vmss update --name MyScaleSet --resource-group MyResourceGroup --set virtualMachineProfile.storageProfile.imageReference.version=16.04.201801090
crafted: true
"""
helps['vmss update-instances'] = """
type: command
short-summary: Upgrade VMs within a VMSS.
examples:
- name: Upgrade VMs within a VMSS. (autogenerated)
text: az vmss update-instances --instance-ids 1 --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a scale set is met.
"""
| 39.006685 | 329 | 0.707185 |
from knack.help_files import helps
helps['disk'] = """
type: group
short-summary: Manage Azure Managed Disks.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['disk create'] = """
type: command
short-summary: Create a managed disk.
examples:
- name: Create a managed disk by importing from a blob uri.
text: >
az disk create -g MyResourceGroup -n MyDisk --source https://vhd1234.blob.core.windows.net/vhds/osdisk1234.vhd
- name: Create an empty managed disk.
text: >
az disk create -g MyResourceGroup -n MyDisk --size-gb 10
- name: Create a managed disk by copying an existing disk or snapshot.
text: >
az disk create -g MyResourceGroup -n MyDisk2 --source MyDisk
- name: Create a disk in an availability zone in the region of "East US 2"
text: >
az disk create -n MyDisk -g MyResourceGroup --size-gb 10 --location eastus2 --zone 1
"""
helps['disk delete'] = """
type: command
short-summary: Delete a managed disk.
examples:
- name: Delete a managed disk. (autogenerated)
text: az disk delete --name MyManagedDisk --resource-group MyResourceGroup
crafted: true
"""
helps['disk grant-access'] = """
type: command
short-summary: Grant a resource read access to a managed disk.
examples:
- name: Grant a resource read access to a managed disk. (autogenerated)
text: az disk grant-access --duration-in-seconds 3600 --name MyManagedDisk --resource-group MyResourceGroup
crafted: true
"""
helps['disk list'] = """
type: command
short-summary: List managed disks.
"""
helps['disk revoke-access'] = """
type: command
short-summary: Revoke a resource's read access to a managed disk.
"""
helps['disk update'] = """
type: command
short-summary: Update a managed disk.
examples:
- name: Update a managed disk. (autogenerated)
text: az disk update --name MyManagedDisk --resource-group MyResourceGroup --size-gb 20
crafted: true
"""
helps['disk wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a managed disk is met.
"""
helps['image'] = """
type: group
short-summary: Manage custom virtual machine images.
"""
helps['image create'] = """
type: command
short-summary: Create a custom Virtual Machine Image from managed disks or snapshots.
examples:
- name: Create an image from an existing disk.
text: |
az image create -g MyResourceGroup -n image1 --os-type Linux \\
--source /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/rg1/providers/Microsoft.Compute/snapshots/s1
- name: Create an image by capturing an existing generalized virtual machine in the same resource group.
text: az image create -g MyResourceGroup -n image1 --source MyVm1
"""
helps['image list'] = """
type: command
short-summary: List custom VM images.
"""
helps['image update'] = """
type: command
short-summary: Update custom VM images.
examples:
- name: Add or update tags.
text: az image update -n ImageName -g ResourceGroup --tags tag1=val1 tag2=val2
- name: Remove all tags.
text: az image update -n ImageName -g resourceGroup --tags
"""
helps['sig'] = """
type: group
short-summary: manage shared image gallery
"""
helps['sig create'] = """
type: command
short-summary: create a share image gallery.
examples:
- name: create a share image gallery. (autogenerated)
text: az sig create --gallery-name MyGallery --resource-group MyResourceGroup
crafted: true
"""
helps['sig image-definition'] = """
type: group
short-summary: create an image definition
"""
helps['sig image-definition create'] = """
type: command
short-summary: create a gallery image definition
examples:
- name: Create a linux image defintion
text: |
az sig image-definition create -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --publisher GreatPublisher --offer GreatOffer --sku GreatSku --os-type linux
"""
helps['sig image-definition update'] = """
type: command
short-summary: update a share image defintiion.
"""
helps['sig image-version'] = """
type: group
short-summary: create a new version from an image defintion
"""
helps['sig image-version create'] = """
type: command
short-summary: creat a new image version
long-summary: this operation might take a long time depending on the replicate region number. Use "--no-wait" is advised.
examples:
- name: Add a new image version
text: |
az sig image-version create -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --managed-image /subscriptions/00000000-0000-0000-0000-00000000xxxx/resourceGroups/imageGroups/providers/images/MyManagedImage
- name: Add a new image version replicated across multiple regions with different replication counts each. Eastus2 will have it's replica count set to the default replica count.
text: |
az sig image-version create -g MyResourceGroup --gallery-name MyGallery \\
--gallery-image-definition MyImage --gallery-image-version 1.0.0 \\
--managed-image image-name --target-regions eastus2 ukwest=3 southindia=2
- name: Add a new image version and don't wait on it. Later you can invoke "az sig image-version wait" command when ready to create a vm from the gallery image version
text: |
az sig image-version create --no-wait -g MyResourceGroup --gallery-name MyGallery \\
--gallery-image-definition MyImage --gallery-image-version 1.0.0 \\
--managed-image imageInTheSameResourceGroup
"""
helps['sig image-version update'] = """
type: command
short-summary: update a share image version
examples:
- name: Replicate to a new set of regions
text: |
az sig image-version update -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --target-regions westcentralus=2 eastus2
- name: Replicate to one more region
text: |
az sig image-version update -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0 --add publishingProfile.targetRegions name=westcentralus
"""
helps['sig image-version wait'] = """
type: command
short-summary: wait for image version related operation
examples:
- name: wait for an image version gets updated
text: |
az sig image-version wait --updated -g MyResourceGroup --gallery-name MyGallery --gallery-image-definition MyImage --gallery-image-version 1.0.0
"""
helps['sig list'] = """
type: command
short-summary: list share image galleries.
"""
helps['sig update'] = """
type: command
short-summary: update a share image gallery.
"""
helps['snapshot'] = """
type: group
short-summary: Manage point-in-time copies of managed disks, native blobs, or other snapshots.
"""
helps['snapshot create'] = """
type: command
short-summary: Create a snapshot.
examples:
- name: Create a snapshot by importing from a blob uri.
text: >
az snapshot create -g MyResourceGroup -n MySnapshot --source https://vhd1234.blob.core.windows.net/vhds/osdisk1234.vhd
- name: Create an empty snapshot.
text: az snapshot create -g MyResourceGroup -n MySnapshot --size-gb 10
- name: Create a snapshot by copying an existing disk in the same resource group.
text: az snapshot create -g MyResourceGroup -n MySnapshot2 --source MyDisk
"""
helps['snapshot grant-access'] = """
type: command
short-summary: Grant read access to a snapshot.
examples:
- name: Grant read access to a snapshot. (autogenerated)
text: az snapshot grant-access --duration-in-seconds 3600 --name MySnapshot --resource-group MyResourceGroup
crafted: true
"""
helps['snapshot list'] = """
type: command
short-summary: List snapshots.
"""
helps['snapshot revoke-access'] = """
type: command
short-summary: Revoke read access to a snapshot.
examples:
- name: Revoke read access to a snapshot. (autogenerated)
text: az snapshot revoke-access --name MySnapshot --resource-group MyResourceGroup
crafted: true
"""
helps['snapshot update'] = """
type: command
short-summary: Update a snapshot.
"""
helps['snapshot wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a snapshot is met.
"""
helps['vm'] = """
type: group
short-summary: Manage Linux or Windows virtual machines.
"""
helps['vm availability-set'] = """
type: group
short-summary: Group resources into availability sets.
long-summary: >
To provide redundancy to an application, it is recommended to group two or more virtual machines in an availability set.
This configuration ensures that during either a planned or unplanned maintenance event, at least one virtual machine
will be available.
"""
helps['vm availability-set convert'] = """
type: command
short-summary: Convert an Azure Availability Set to contain VMs with managed disks.
examples:
- name: Convert an availabiity set to use managed disks by name.
text: az vm availability-set convert -g MyResourceGroup -n MyAvSet
- name: Convert an availability set to use managed disks by ID.
text: >
az vm availability-set convert --ids $(az vm availability-set list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm availability-set create'] = """
type: command
short-summary: Create an Azure Availability Set.
long-summary: 'For more information, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-manage-availability.'
examples:
- name: Create an availability set.
text: az vm availability-set create -n MyAvSet -g MyResourceGroup --platform-fault-domain-count 2 --platform-update-domain-count 2
"""
helps['vm availability-set delete'] = """
type: command
short-summary: Delete an availability set.
examples:
- name: Delete an availability set.
text: az vm availability-set delete -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set list'] = """
type: command
short-summary: List availability sets.
examples:
- name: List availability sets.
text: az vm availability-set list -g MyResourceGroup
"""
helps['vm availability-set list-sizes'] = """
type: command
short-summary: List VM sizes for an availability set.
examples:
- name: List VM sizes for an availability set.
text: az vm availability-set list-sizes -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set show'] = """
type: command
short-summary: Get information for an availability set.
examples:
- name: Get information about an availability set.
text: az vm availability-set show -n MyAvSet -g MyResourceGroup
"""
helps['vm availability-set update'] = """
type: command
short-summary: Update an Azure Availability Set.
examples:
- name: Update an availability set.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup
- name: Update an availability set tag.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup --set tags.foo=value
- name: Remove an availability set tag.
text: az vm availability-set update -n MyAvSet -g MyResourceGroup --remove tags.foo
"""
helps['vm boot-diagnostics'] = """
type: group
short-summary: Troubleshoot the startup of an Azure Virtual Machine.
long-summary: Use this feature to troubleshoot boot failures for custom or platform images.
"""
helps['vm boot-diagnostics disable'] = """
type: command
short-summary: Disable the boot diagnostics on a VM.
examples:
- name: Disable boot diagnostics on all VMs in a resource group.
text: >
az vm boot-diagnostics disable --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm boot-diagnostics enable'] = """
type: command
short-summary: Enable the boot diagnostics on a VM.
parameters:
- name: --storage
short-summary: Name or URI of a storage account (e.g. https://your_storage_account_name.blob.core.windows.net/)
examples:
- name: Enable boot diagnostics on all VMs in a resource group.
text: >
az vm boot-diagnostics enable --storage https://mystor.blob.core.windows.net/ --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
- name: Enable the boot diagnostics on a VM. (autogenerated)
text: az vm boot-diagnostics enable --name MyVirtualMachine --resource-group MyResourceGroup --storage https://mystor.blob.core.windows.net/
crafted: true
"""
helps['vm boot-diagnostics get-boot-log'] = """
type: command
short-summary: Get the boot diagnostics log from a VM.
examples:
- name: Get diagnostics logs for all VMs in a resource group.
text: >
az vm boot-diagnostics get-boot-log --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
- name: Get the boot diagnostics log from a VM. (autogenerated)
text: az vm boot-diagnostics get-boot-log --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm capture'] = """
type: command
short-summary: Capture information for a stopped VM.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
parameters:
- name: --vhd-name-prefix
type: string
short-summary: The VHD name prefix specify for the VM disks.
- name: --storage-container
short-summary: The storage account container name in which to save the disks.
- name: --overwrite
short-summary: Overwrite the existing disk file.
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm convert'] = """
type: command
short-summary: Convert a VM with unmanaged disks to use managed disks.
examples:
- name: Convert a VM with unmanaged disks to use managed disks.
text: az vm convert -g MyResourceGroup -n MyVm
- name: Convert all VMs with unmanaged disks in a resource group to use managed disks.
text: >
az vm convert --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm create'] = """
type: command
short-summary: Create an Azure Virtual Machine.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-quick-create-cli.'
parameters:
- name: --image
type: string
short-summary: >
The name of the operating system image as a URN alias, URN, custom image name or ID, or VHD blob URI.
This parameter is required unless using `--attach-os-disk.` Valid URN format: "Publisher:Offer:Sku:Version".
populator-commands:
- az vm image list
- az vm image show
- name: --ssh-key-value
short-summary: The SSH public key or public key file path.
examples:
- name: Create a default Ubuntu VM with automatic SSH authentication.
text: >
az vm create -n MyVm -g MyResourceGroup --image UbuntuLTS
- name: Create a default RedHat VM with automatic SSH authentication using an image URN.
text: >
az vm create -n MyVm -g MyResourceGroup --image RedHat:RHEL:7-RAW:7.4.2018010506
- name: Create a default Windows Server VM with a private IP address.
text: >
az vm create -n MyVm -g MyResourceGroup --public-ip-address "" --image Win2012R2Datacenter
- name: Create a VM from a custom managed image.
text: >
az vm create -g MyResourceGroup -n MyVm --image MyImage
- name: Create a VM by attaching to a managed operating system disk.
text: >
az vm create -g MyResourceGroup -n MyVm --attach-os-disk MyOsDisk --os-type linux
- name: 'Create an Ubuntu Linux VM using a cloud-init script for configuration. See: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init.'
text: >
az vm create -g MyResourceGroup -n MyVm --image debian --custom-data MyCloudInitScript.yml
- name: Create a Debian VM with SSH key authentication and a public DNS entry, located on an existing virtual network and availability set.
text: |
az vm create -n MyVm -g MyResourceGroup --image debian --vnet-name MyVnet --subnet subnet1 \\
--availability-set MyAvailabilitySet --public-ip-address-dns-name MyUniqueDnsName \\
--ssh-key-value @key-file
- name: Create a simple Ubuntu Linux VM with a public IP address, DNS entry, two data disks (10GB and 20GB), and then generate ssh key pairs.
text: |
az vm create -n MyVm -g MyResourceGroup --public-ip-address-dns-name MyUniqueDnsName \\
--image ubuntults --data-disk-sizes-gb 10 20 --size Standard_DS2_v2 \\
--generate-ssh-keys
- name: Create a Debian VM using Key Vault secrets.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
- name: Create a CentOS VM with a system assigned identity. The VM will have a 'Contributor' role with access to a storage account.
text: >
az vm create -n MyVm -g rg1 --image centos --assign-identity --scope /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/MyResourceGroup/myRG/providers/Microsoft.Storage/storageAccounts/storage1
- name: Create a debian VM with a user assigned identity.
text: >
az vm create -n MyVm -g rg1 --image debian --assign-identity /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a debian VM with both system and user assigned identity.
text: >
az vm create -n MyVm -g rg1 --image debian --assign-identity [system] /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a VM in an availability zone in the current resource group's region
supported-profiles: latest
text: >
az vm create -n MyVm -g MyResourceGroup --image Centos --zone 1
"""
helps['vm deallocate'] = """
type: command
short-summary: Deallocate a VM.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm delete'] = """
type: command
short-summary: Delete a VM.
examples:
- name: Delete a VM without a prompt for confirmation.
text: >
az vm delete -g MyResourceGroup -n MyVm --yes
- name: Delete all VMs in a resource group.
text: >
az vm delete --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm diagnostics'] = """
type: group
short-summary: Configure the Azure Virtual Machine diagnostics extension.
"""
helps['vm diagnostics get-default-config'] = """
type: command
short-summary: Get the default configuration settings for a VM.
examples:
- name: Get the default diagnostics for a Linux VM and override the storage account name and the VM resource ID.
text: |
az vm diagnostics get-default-config \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#MyStorageAccount#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#MyVmResourceId#g"
- name: Get the default diagnostics for a Windows VM.
text: >
az vm diagnostics get-default-config --is-windows-os
"""
helps['vm diagnostics set'] = """
type: command
short-summary: Configure the Azure VM diagnostics extension.
examples:
- name: Set up default diagnostics on a Linux VM for Azure Portal VM metrics graphs and syslog collection.
text: |
# Set the following 3 parameters first.
my_resource_group={Resource group name containing your Linux VM and the storage account}
my_linux_vm={Your Azure Linux VM name}
my_diagnostic_storage_account={Your Azure storage account for storing VM diagnostic data}
my_vm_resource_id=$(az vm show -g $my_resource_group -n $my_linux_vm --query "id" -o tsv)
default_config=$(az vm diagnostics get-default-config \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#$my_diagnostic_storage_account#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#$my_vm_resource_id#g")
storage_sastoken=$(az storage account generate-sas \\
--account-name $my_diagnostic_storage_account --expiry 2037-12-31T23:59:00Z \\
--permissions wlacu --resource-types co --services bt -o tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountSasToken': '$storage_sastoken'}"
az vm diagnostics set --settings "$default_config" \\
--protected-settings "$protected_settings" \\
--resource-group $my_resource_group --vm-name $my_linux_vm
- name: Set up default diagnostics on a Windows VM.
text: |
# Set the following 3 parameters first.
my_resource_group={Resource group name containing your Windows VM and the storage account}
my_windows_vm={Your Azure Windows VM name}
my_diagnostic_storage_account={Your Azure storage account for storing VM diagnostic data}
my_vm_resource_id=$(az vm show -g $my_resource_group -n $my_windows_vm --query "id" -o tsv)
default_config=$(az vm diagnostics get-default-config --is-windows-os \\
| sed "s#__DIAGNOSTIC_STORAGE_ACCOUNT__#$my_diagnostic_storage_account#g" \\
| sed "s#__VM_OR_VMSS_RESOURCE_ID__#$my_vm_resource_id#g")
# Please use the same options, the WAD diagnostic extension has strict
# expectations of the sas token's format. Set the expiry as desired.
storage_sastoken=$(az storage account generate-sas \\
--account-name $my_diagnostic_storage_account --expiry 2037-12-31T23:59:00Z \\
--permissions acuw --resource-types co --services bt --https-only --output tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountSasToken': '$storage_sastoken'}"
az vm diagnostics set --settings "$default_config" \\
--protected-settings "$protected_settings" \\
--resource-group $my_resource_group --vm-name $my_windows_vm
# # Alternatively, if the WAD extension has issues parsing the sas token,
# # one can use a storage account key instead.
storage_account_key=$(az storage account keys list --account-name {my_storage_account} \\
--query [0].value -o tsv)
protected_settings="{'storageAccountName': '$my_diagnostic_storage_account', \\
'storageAccountKey': '$storage_account_key'}"
"""
helps['vm disk'] = """
type: group
short-summary: Manage the managed data disks attached to a VM.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['vm disk attach'] = """
type: command
short-summary: Attach a managed persistent disk to a VM.
long-summary: This allows for the preservation of data, even if the VM is reprovisioned due to maintenance or resizing.
examples:
- name: Attach a new default sized (1023 GB) managed data disk to a VM.
text: az vm disk attach -g MyResourceGroup --vm-name MyVm --name disk_name --new
"""
helps['vm disk detach'] = """
type: command
short-summary: Detach a managed disk from a VM.
examples:
- name: Detach a data disk from a VM.
text: >
az vm disk detach -g MyResourceGroup --vm-name MyVm --name disk_name
"""
helps['vm encryption'] = """
type: group
short-summary: "Manage encryption of VM disks."
long-summary: |
For more information, see:
https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
"""
helps['vm encryption disable'] = """
type: command
short-summary: Disable disk encryption on the OS disk and/or data disks. Decrypt mounted disks.
long-summary: |
For Linux VMs, disabling encryption is only permitted on data volumes.
For Windows VMS, disabling encryption is permitted on both OS and data volumes.
examples:
- name: Disable disk encryption on the OS disk and/or data disks. (autogenerated)
text: az vm encryption disable --name MyVirtualMachine --resource-group MyResourceGroup --volume-type DATA
crafted: true
"""
helps['vm encryption enable'] = """
type: command
short-summary: "Enable disk encryption on the OS disk and/or data disks. Encrypt mounted disks."
long-summary: |
Note that Azure Active Directory / service principal arguments are unnecessary for vm encryption. The older version of Azure Disk Encryption required AAD arguments.
For more information, see:
https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview
parameters:
- name: --aad-client-id
short-summary: Client ID of an AAD app with permissions to write secrets to the key vault.
- name: --aad-client-secret
short-summary: Client secret of the AAD app with permissions to write secrets to the key vault.
- name: --aad-client-cert-thumbprint
short-summary: Thumbprint of the AAD app certificate with permissions to write secrets to the key vault.
examples:
- name: encrypt a VM using a key vault in the same resource group
text: >
az vm encryption enable -g MyResourceGroup -n MyVm --disk-encryption-keyvault MyVault
"""
helps['vm encryption show'] = """
type: command
short-summary: Show encryption status.
examples:
- name: Show encryption status. (autogenerated)
text: az vm encryption show --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm extension'] = """
type: group
short-summary: Manage extensions on VMs.
long-summary: >
Extensions are small applications that provide post-deployment configuration and automation tasks on Azure virtual machines.
For example, if a virtual machine requires software installation, anti-virus protection, or Docker configuration, a VM extension
can be used to complete these tasks. Extensions can be bundled with a new virtual machine deployment or run against any existing system.
"""
helps['vm extension delete'] = """
type: command
short-summary: Remove an extension attached to a VM.
examples:
- name: Use a VM name and extension to delete an extension from a VM.
text: az vm extension delete -g MyResourceGroup --vm-name MyVm -n extension_name
- name: Delete extensions with IDs containing the string "MyExtension" from a VM.
text: >
az vm extension delete --ids \\
$(az resource list --query "[?contains(name, 'MyExtension')].id" -o tsv)
"""
helps['vm extension image'] = """
type: group
short-summary: Find the available VM extensions for a subscription and region.
"""
helps['vm extension image list'] = """
type: command
short-summary: List the information on available extensions.
examples:
- name: List the unique publishers for extensions.
text: az vm extension image list --query "[].publisher" -o tsv | sort -u
- name: Find extensions with "Docker" in the name.
text: az vm extension image list --query "[].name" -o tsv | sort -u | grep Docker
- name: List extension names where the publisher name starts with "Microsoft.Azure.App".
text: |
az vm extension image list --query \\
"[?starts_with(publisher, 'Microsoft.Azure.App')].publisher" \\
-o tsv | sort -u | xargs -I{} az vm extension image list-names --publisher {} -l westus
"""
helps['vm extension image list-names'] = """
type: command
short-summary: List the names of available extensions.
examples:
- name: Find Docker extensions by publisher and location.
text: >
az vm extension image list-names --publisher Microsoft.Azure.Extensions \\
-l westus --query "[?starts_with(name, 'Docker')]"
- name: Find CustomScript extensions by publisher and location.
text: >
az vm extension image list-names --publisher Microsoft.Azure.Extensions \\
-l westus --query "[?starts_with(name, 'Custom')]"
"""
helps['vm extension image list-versions'] = """
type: command
short-summary: List the versions for available extensions.
examples:
- name: Find the available versions for the Docker extension.
text: >
az vm extension image list-versions --publisher Microsoft.Azure.Extensions \\
-l westus -n DockerExtension -otable
"""
helps['vm extension image show'] = """
type: command
short-summary: Display information for an extension.
examples:
- name: Show the CustomScript extension version 2.0.2.
text: >
az vm extension image show -l westus -n CustomScript \\
--publisher Microsoft.Azure.Extensions --version 2.0.2
- name: Show the latest version of the Docker extension.
text: >
publisher=Microsoft.Azure.Extensions
extension=DockerExtension
location=westus
latest=$(az vm extension image list-versions \\
--publisher {publisher} -l {location} -n {extension} \\
--query "[].name" -o tsv | sort | tail -n 1)
az vm extension image show -l {location} \\
--publisher {publisher} -n {extension} --version {latest}
"""
helps['vm extension list'] = """
type: command
short-summary: List the extensions attached to a VM.
examples:
- name: List attached extensions to a named VM.
text: az vm extension list -g MyResourceGroup --vm-name MyVm
"""
helps['vm extension set'] = """
type: command
short-summary: Set extensions for a VM.
long-summary: Get extension details from `az vm extension image list`.
examples:
- name: Add a user account to a Linux VM.
text: |
az vm extension set -n VMAccessForLinux --publisher Microsoft.OSTCExtensions --version 1.4 \\
--vm-name MyVm --resource-group MyResourceGroup \\
--protected-settings '{"username":"user1", "ssh_key":"ssh_rsa ..."}'
parameters:
- name: --name -n
populator-commands:
- az vm extension image list
"""
helps['vm extension show'] = """
type: command
short-summary: Display information about extensions attached to a VM.
examples:
- name: Use VM name and extension name to show the extensions attached to a VM.
text: az vm extension show -g MyResourceGroup --vm-name MyVm -n extension_name
"""
helps['vm extension wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a virtual machine extension is met.
"""
helps['vm generalize'] = """
type: command
short-summary: Mark a VM as generalized, allowing it to be imaged for multiple deployments.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-capture-image'
examples:
- name: Deallocate, generalize, and capture a stopped virtual machine.
text: |
az vm deallocate -g MyResourceGroup -n MyVm
az vm generalize -g MyResourceGroup -n MyVm
az vm capture -g MyResourceGroup -n MyVm --vhd-name-prefix MyPrefix
- name: Deallocate, generalize, and capture multiple stopped virtual machines.
text: |
vms_ids=$(az vm list -g MyResourceGroup --query "[].id" -o tsv)
az vm deallocate --ids {vms_ids}
az vm generalize --ids {vms_ids}
az vm capture --ids {vms_ids} --vhd-name-prefix MyPrefix
"""
helps['vm get-instance-view'] = """
type: command
short-summary: Get instance information about a VM.
examples:
- name: Use a resource group and name to get instance view information of a VM.
text: az vm get-instance-view -g MyResourceGroup -n MyVm
- name: Get instance views for all VMs in a resource group.
text: >
az vm get-instance-view --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm identity'] = """
type: group
short-summary: manage service identities of a VM
"""
helps['vm identity assign'] = """
type: command
short-summary: Enable managed service identity on a VM.
long-summary: This is required to authenticate and interact with other Azure services using bearer tokens.
examples:
- name: Enable the system assigned identity on a VM with the 'Reader' role.
text: az vm identity assign -g MyResourceGroup -n MyVm --role Reader --scope /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/MyResourceGroup
- name: Enable the system assigned identity and a user assigned identity on a VM.
text: az vm identity assign -g MyResourceGroup -n MyVm --role Reader --identities [system] myAssignedId
"""
helps['vm identity remove'] = """
type: command
short-summary: Remove managed service identities from a VM.
examples:
- name: Remove the system assigned identity
text: az vm identity remove -g MyResourceGroup -n MyVm
- name: Remove a user assigned identity
text: az vm identity remove -g MyResourceGroup -n MyVm --identities readerId
- name: Remove 2 identities which are in the same resource group with the VM
text: az vm identity remove -g MyResourceGroup -n MyVm --identities readerId writerId
- name: Remove the system assigned identity and a user identity
text: az vm identity remove -g MyResourceGroup -n MyVm --identities [system] readerId
"""
helps['vm identity show'] = """
type: command
short-summary: display VM's managed identity info.
examples:
- name: display VM's managed identity info. (autogenerated)
text: az vm identity show --name MyVirtualMachine --resource-group MyResourceGroup
crafted: true
"""
helps['vm image'] = """
type: group
short-summary: Information on available virtual machine images.
"""
helps['vm image accept-terms'] = """
type: command
short-summary: Accept Azure Marketplace term so that the image can be used to create VMs
examples:
- name: Accept Azure Marketplace term so that the image can be used to create VMs. (autogenerated)
text: az vm image accept-terms --urn publisher:offer:sku:version
crafted: true
"""
helps['vm image list'] = """
type: command
short-summary: List the VM/VMSS images available in the Azure Marketplace.
parameters:
- name: --all
short-summary: Retrieve image list from live Azure service rather using an offline image list
- name: --offer -f
short-summary: Image offer name, partial name is accepted
- name: --publisher -p
short-summary: Image publisher name, partial name is accepted
- name: --sku -s
short-summary: Image sku name, partial name is accepted
examples:
- name: List all available images.
text: az vm image list --all
- name: List all offline cached CentOS images.
text: az vm image list -f CentOS
- name: List all CentOS images.
text: az vm image list -f CentOS --all
"""
helps['vm image list-offers'] = """
type: command
short-summary: List the VM image offers available in the Azure Marketplace.
parameters:
- name: --publisher -p
populator-commands:
- az vm list-publishers
examples:
- name: List all offers from Microsoft in the West US region.
text: az vm image list-offers -l westus -p MicrosoftWindowsServer
- name: List all offers from OpenLocic in the West US region.
text: az vm image list-offers -l westus -p OpenLogic
"""
helps['vm image list-publishers'] = """
type: command
short-summary: List the VM image publishers available in the Azure Marketplace.
examples:
- name: List all publishers in the West US region.
text: az vm image list-publishers -l westus
- name: List all publishers with names starting with "Open" in westus.
text: az vm image list-publishers -l westus --query "[?starts_with(name, 'Open')]"
"""
helps['vm image list-skus'] = """
type: command
short-summary: List the VM image SKUs available in the Azure Marketplace.
parameters:
- name: --publisher -p
populator-commands:
- az vm image list-publishers
examples:
- name: List all skus available for CentOS published by OpenLogic in the West US region.
text: az vm image list-skus -l westus -f CentOS -p OpenLogic
"""
helps['vm image show'] = """
type: command
short-summary: Get the details for a VM image available in the Azure Marketplace.
examples:
- name: Show information for the latest available CentOS image from OpenLogic.
text: >
latest=$(az vm image list -p OpenLogic -s 7.3 --all --query \\
"[?offer=='CentOS'].version" -o tsv | sort -u | tail -n 1)
az vm image show -l westus -f CentOS -p OpenLogic --sku 7.3 --version {latest}
"""
helps['vm list'] = """
type: command
short-summary: List details of Virtual Machines.
long-summary: 'For more information on querying information about Virtual Machines, see https://docs.microsoft.com/en-us/cli/azure/query-az-cli2'
examples:
- name: List all VMs.
text: az vm list
- name: List all VMs by resource group.
text: az vm list -g MyResourceGroup
- name: List all VMs by resource group with details.
text: az vm list -g MyResourceGroup -d
"""
helps['vm list-ip-addresses'] = """
type: command
short-summary: List IP addresses associated with a VM.
examples:
- name: Get the IP addresses for a VM.
text: az vm list-ip-addresses -g MyResourceGroup -n MyVm
- name: Get IP addresses for all VMs in a resource group.
text: >
az vm list-ip-addresses --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm list-sizes'] = """
type: command
short-summary: List available sizes for VMs.
examples:
- name: List the available VM sizes in the West US region.
text: az vm list-sizes -l westus
"""
helps['vm list-skus'] = """
type: command
short-summary: Get details for compute-related resource SKUs.
long-summary: This command incorporates subscription level restriction, offering the most accurate information.
examples:
- name: List all SKUs in the West US region.
text: az vm list-skus -l westus
- name: List all available vm sizes in the East US2 region which support availability zone.
text: az vm list-skus -l eastus2 --zone
- name: List all available vm sizes in the East US2 region which support availability zone with name like "standard_ds1...".
text: az vm list-skus -l eastus2 --zone --size standard_ds1
- name: List availability set related sku information in The West US region.
text: az vm list-skus -l westus --resource-type availabilitySets
"""
helps['vm list-usage'] = """
type: command
short-summary: List available usage resources for VMs.
examples:
- name: Get the compute resource usage for the West US region.
text: az vm list-usage -l westus
"""
helps['vm list-vm-resize-options'] = """
type: command
short-summary: List available resizing options for VMs.
examples:
- name: List all available VM sizes for resizing.
text: az vm list-vm-resize-options -g MyResourceGroup -n MyVm
- name: List available sizes for all VMs in a resource group.
text: >
az vm list-vm-resize-options --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm nic'] = """
type: group
short-summary: Manage network interfaces. See also `az network nic`.
long-summary: >
A network interface (NIC) is the interconnection between a VM and the underlying software
network. For more information, see https://docs.microsoft.com/azure/virtual-network/virtual-network-network-interface-overview.
"""
helps['vm nic add'] = """
type: command
short-summary: Add existing NICs to a VM.
examples:
- name: Add two NICs to a VM.
text: az vm nic add -g MyResourceGroup --vm-name MyVm --nics nic_name1 nic_name2
"""
helps['vm nic list'] = """
type: command
short-summary: List the NICs available on a VM.
examples:
- name: List all of the NICs on a VM.
text: az vm nic list -g MyResourceGroup --vm-name MyVm
"""
helps['vm nic remove'] = """
type: command
short-summary: Remove NICs from a VM.
examples:
- name: Remove two NICs from a VM.
text: az vm nic remove -g MyResourceGroup --vm-name MyVm --nics nic_name1 nic_name2
"""
helps['vm nic set'] = """
type: command
short-summary: Configure settings of a NIC attached to a VM.
examples:
- name: Set a NIC on a VM to be the primary interface.
text: az vm nic set -g MyResourceGroup --vm-name MyVm --nic nic_name1 nic_name2 --primary-nic nic_name2
"""
helps['vm nic show'] = """
type: command
short-summary: Display information for a NIC attached to a VM.
examples:
- name: Show details of a NIC on a VM.
text: az vm nic show -g MyResourceGroup --vm-name MyVm --nic nic_name1
"""
helps['vm open-port'] = """
type: command
short-summary: Opens a VM to inbound traffic on specified ports.
long-summary: >
Adds a security rule to the network security group (NSG) that is attached to the VM's
network interface (NIC) or subnet. The existing NSG will be used or a new one will be
created. The rule name is 'open-port-{port}' and will overwrite an existing rule with
this name. For multi-NIC VMs, or for more fine-grained control, use the appropriate
network commands directly (nsg rule create, etc).
examples:
- name: Open all ports on a VM to inbound traffic.
text: az vm open-port -g MyResourceGroup -n MyVm --port '*'
- name: Open a range of ports on a VM to inbound traffic with the highest priority.
text: az vm open-port -g MyResourceGroup -n MyVm --port 80-100 --priority 100
- name: Open all ports for all VMs in a resource group.
text: >
az vm open-port --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv) --port '*'
"""
helps['vm redeploy'] = """
type: command
short-summary: Redeploy an existing VM.
examples:
- name: Redeploy a VM.
text: az vm redeploy -g MyResourceGroup -n MyVm
- name: Redeploy all VMs in a resource group.
text: >
az vm redeploy --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm resize'] = """
type: command
short-summary: Update a VM's size.
parameters:
- name: --size
type: string
short-summary: The VM size.
populator-commands:
- az vm list-vm-resize-options
examples:
- name: Resize a VM.
text: az vm resize -g MyResourceGroup -n MyVm --size Standard_DS3_v2
- name: Resize all VMs in a resource group.
text: >
az vm resize --size Standard_DS3_v2 --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm restart'] = """
type: command
short-summary: Restart VMs.
examples:
- name: Restart a VM.
text: az vm restart -g MyResourceGroup -n MyVm
- name: Restart all VMs in a resource group.
text: >
az vm restart --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm run-command'] = """
type: group
short-summary: Manage run commands on a Virtual Machine.
long-summary: 'For more information, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/run-command or https://docs.microsoft.com/en-us/azure/virtual-machines/linux/run-command.'
"""
helps['vm run-command invoke'] = """
type: command
short-summary: Execute a specific run command on a vm.
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vm run-command list
examples:
- name: install nginx on a vm
text: az vm run-command invoke -g MyResourceGroup -n MyVm --command-id RunShellScript --scripts "sudo apt-get update && sudo apt-get install -y nginx"
- name: invoke command with parameters
text: az vm run-command invoke -g MyResourceGroup -n MyVm --command-id RunShellScript --scripts 'echo $1 $2' --parameters hello world
"""
helps['vm run-command show'] = """
type: command
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vm run-command list
"""
helps['vm secret'] = """
type: group
short-summary: Manage VM secrets.
"""
helps['vm secret add'] = """
type: command
short-summary: Add a secret to a VM.
"""
helps['vm secret format'] = """
type: command
short-summary: Transform secrets into a form that can be used by VMs and VMSSes.
parameters:
- name: --secrets -s
long-summary: >
The command will attempt to resolve the vault ID for each secret. If it is unable to do so,
specify the vault ID to use for *all* secrets using: --keyvault NAME --resource-group NAME | --keyvault ID.
examples:
- name: Create a self-signed certificate with the default policy, and add it to a virtual machine.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vm create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
"""
helps['vm secret list'] = """
type: command
short-summary: List secrets on a VM.
"""
helps['vm secret remove'] = """
type: command
short-summary: Remove a secret from a VM.
"""
helps['vm show'] = """
type: command
short-summary: Get the details of a VM.
examples:
- name: Show information about a VM.
text: az vm show -g MyResourceGroup -n MyVm -d
- name: Get the details for all VMs in a resource group.
text: >
az vm show -d --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm start'] = """
type: command
short-summary: Start a stopped VM.
examples:
- name: Start a stopped VM.
text: az vm start -g MyResourceGroup -n MyVm
- name: Start all VMs in a resource group.
text: >
az vm start --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm stop'] = """
type: command
short-summary: Power off (stop) a running VM.
long-summary: The VM will continue to be billed. To avoid this, you can deallocate the VM through "az vm deallocate"
examples:
- name: Power off (stop) a running VM.
text: az vm stop -g MyResourceGroup -n MyVm
- name: Stop all VMs in a resource group.
text: >
az vm stop --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm unmanaged-disk'] = """
type: group
short-summary: Manage the unmanaged data disks attached to a VM.
long-summary: >4
Azure Virtual Machines use disks as a place to store an operating system, applications, and data.
All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk.
The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs)
stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs.
Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB.
For more information, see:
- Azure Disks - https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds.
- Larger Managed Disks in Public Preview - https://azure.microsoft.com/en-us/blog/introducing-the-public-preview-of-larger-managed-disks-sizes/
- Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-ultra-ssd
"""
helps['vm unmanaged-disk attach'] = """
type: command
short-summary: Attach an unmanaged persistent disk to a VM.
long-summary: This allows for the preservation of data, even if the VM is reprovisioned due to maintenance or resizing.
examples:
- name: Attach a new default sized (1023 GB) unmanaged data disk to a VM.
text: az vm unmanaged-disk attach -g MyResourceGroup --vm-name MyVm --new
- name: Attach an existing data disk to a VM as unmanaged.
text: >
az vm unmanaged-disk attach -g MyResourceGroup --vm-name MyVm \\
--vhd-uri https://mystorage.blob.core.windows.net/vhds/d1.vhd
"""
helps['vm unmanaged-disk detach'] = """
type: command
short-summary: Detach an unmanaged disk from a VM.
examples:
- name: Detach a data disk from a VM.
text: >
az vm unmanaged-disk detach -g MyResourceGroup --vm-name MyVm -n disk_name
"""
helps['vm unmanaged-disk list'] = """
type: command
short-summary: List unmanaged disks of a VM.
examples:
- name: List the unmanaged disks attached to a VM.
text: az vm unmanaged-disk list -g MyResourceGroup --vm-name MyVm
- name: List unmanaged disks with names containing the string "data_disk".
text: >
az vm unmanaged-disk list -g MyResourceGroup --vm-name MyVm \\
--query "[?contains(name, 'data_disk')]" --output table
"""
helps['vm update'] = """
type: command
short-summary: Update the properties of a VM.
long-summary: Update VM objects and properties using paths that correspond to 'az vm show'.
examples:
- name: Add or update a tag.
text: az vm update -n name -g group --set tags.tagName=tagValue
- name: Remove a tag.
text: az vm update -n name -g group --remove tags.tagName
- name: Set the primary NIC of a VM.
text: az vm update -n name -g group --set networkProfile.networkInterfaces[1].primary=false networkProfile.networkInterfaces[0].primary=true
- name: Add a new non-primary NIC to a VM.
text: az vm update -n name -g group --add networkProfile.networkInterfaces primary=false id={NIC_ID}
- name: Remove the fourth NIC from a VM.
text: az vm update -n name -g group --remove networkProfile.networkInterfaces 3
"""
helps['vm user'] = """
type: group
short-summary: Manage user accounts for a VM.
"""
helps['vm user delete'] = """
type: command
short-summary: Delete a user account from a VM.
examples:
- name: Delete a user account.
text: az vm user delete -u username -n MyVm -g MyResourceGroup
- name: Delete a user on all VMs in a resource group.
text: >
az vm user delete -u username --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm user reset-ssh'] = """
type: command
short-summary: Reset the SSH configuration on a VM.
long-summary: >
The extension will restart the SSH service, open the SSH port on your VM, and reset the SSH configuration to default values. The user account (name, password, and SSH keys) are not changed.
examples:
- name: Reset the SSH configuration.
text: az vm user reset-ssh -n MyVm -g MyResourceGroup
- name: Reset the SSH server on all VMs in a resource group.
text: >
az vm user reset-ssh --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm user update'] = """
type: command
short-summary: Update a user account.
parameters:
- name: --ssh-key-value
short-summary: SSH public key file value or public key file path
examples:
- name: Update a Windows user account.
text: az vm user update -u username -p password -n MyVm -g MyResourceGroup
- name: Update a Linux user account.
text: az vm user update -u username --ssh-key-value "$({ ~/.ssh/id_rsa.pub)" -n MyVm -g MyResourceGroup
- name: Update a user on all VMs in a resource group.
text: >
az vm user update -u username --ssh-key-value "$({ ~/.ssh/id_rsa.pub)" --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vm wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the VM is met.
examples:
- name: Wait until a VM is created.
text: az vm wait -g MyResourceGroup -n MyVm --created
- name: Wait until all VMs in a resource group are deleted.
text: >
az vm wait --deleted --ids $(az vm list -g MyResourceGroup --query "[].id" -o tsv)
"""
helps['vmss'] = """
type: group
short-summary: Manage groupings of virtual machines in an Azure Virtual Machine Scale Set (VMSS).
"""
helps['vmss create'] = """
type: command
short-summary: Create an Azure Virtual Machine Scale Set.
long-summary: 'For an end-to-end tutorial, see https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-linux-create-cli.'
parameters:
- name: --image
type: string
short-summary: >
The name of the operating system image as a URN alias, URN, custom image name or ID, or VHD blob URI.
Valid URN format: "Publisher:Offer:Sku:Version".
populator-commands:
- az vm image list
- az vm image show
examples:
- name: Create a Windows VM scale set with 5 instances, a load balancer, a public IP address, and a 2GB data disk.
text: >
az vmss create -n MyVmss -g MyResourceGroup --instance-count 5 --image Win2016Datacenter --data-disk-sizes-gb 2
- name: Create a Linux VM scale set with an auto-generated ssh key pair, a public IP address, a DNS entry, an existing load balancer, and an existing virtual network.
text: |
az vmss create -n MyVmss -g MyResourceGroup --public-ip-address-dns-name my-globally-dns-name \\
--load-balancer MyLoadBalancer --vnet-name MyVnet --subnet MySubnet --image UbuntuLTS \\
--generate-ssh-keys
- name: Create a Linux VM scale set from a custom image using the default existing public SSH key.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image MyImage
- name: Create a Linux VM scale set with a load balancer and custom DNS servers. Each VM has a public-ip address and a custom domain name.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image centos \\
--public-ip-per-vm --vm-domain-name myvmss --dns-servers 10.0.0.6 10.0.0.5
- name: 'Create a Linux VM scale set using a cloud-init script for configuration. See: https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init'
text: >
az vmss create -g MyResourceGroup -n MyVmss --image debian --custom-data MyCloudInitScript.yml
- name: Create a Debian VM scaleset using Key Vault secrets.
text: >
az keyvault certificate create --vault-name vaultname -n cert1 \\
-p "$(az keyvault certificate get-default-policy)"
secrets=$(az keyvault secret list-versions --vault-name vaultname \\
-n cert1 --query "[?attributes.enabled].id" -o tsv)
vm_secrets=$(az vm secret format -s "$secrets")
az vmss create -g group-name -n vm-name --admin-username deploy \\
--image debian --secrets "$vm_secrets"
- name: Create a VM scaleset with system assigned identity. The VM will have a 'Contributor' Role with access to a storage account.
text: >
az vmss create -n MyVmss -g MyResourceGroup --image centos --assign-identity --scope /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/MyResourceGroup/myRG/providers/Microsoft.Storage/storageAccounts/storage1
- name: Create a debian VM scaleset with a user assigned identity.
text: >
az vmss create -n MyVmss -g rg1 --image debian --assign-identity /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a debian VM scaleset with both system and user assigned identity.
text: >
az vmss create -n MyVmss -g rg1 --image debian --assign-identity [system] /subscriptions/99999999-1bf0-4dda-aec3-cb9272f09590/resourcegroups/myRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myID
- name: Create a single zone VM scaleset in the current resource group's region
supported-profiles: latest
text: >
az vmss create -n MyVmss -g MyResourceGroup --image Centos --zones 1
"""
helps['vmss deallocate'] = """
type: command
short-summary: Deallocate VMs within a VMSS.
"""
helps['vmss delete-instances'] = """
type: command
short-summary: Delete VMs within a VMSS.
"""
helps['vmss diagnostics'] = """
type: group
short-summary: Configure the Azure Virtual Machine Scale Set diagnostics extension.
"""
helps['vmss diagnostics get-default-config'] = """
type: command
short-summary: Show the default config file which defines data to be collected.
"""
helps['vmss diagnostics set'] = """
type: command
short-summary: Enable diagnostics on a VMSS.
"""
helps['vmss disk'] = """
type: group
short-summary: Manage data disks of a VMSS.
"""
helps['vmss disk attach'] = """
type: command
short-summary: Attach managed data disks to a scale set or its instances.
"""
helps['vmss disk detach'] = """
type: command
short-summary: Detach managed data disks from a scale set or its instances.
"""
helps['vmss encryption'] = """
type: group
short-summary: "(PREVIEW) Manage encryption of VMSS."
long-summary: "For more information, see: https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
"""
helps['vmss encryption disable'] = """
type: command
short-summary: Disable the encryption on a VMSS with managed disks.
examples:
- name: disable encryption a VMSS
text: >
az vmss encryption disable -g MyResourceGroup -n MyVm
"""
helps['vmss encryption enable'] = """
type: command
short-summary: "Encrypt a VMSS with managed disks."
long-summary: "For more information, see: For more information, see: https://docs.microsoft.com/en-us/azure/security/azure-security-disk-encryption-overview"
examples:
- name: encrypt a VM scale set using a key vault in the same resource group
text: >
az vmss encryption enable -g MyResourceGroup -n MyVmss --disk-encryption-keyvault MyVault
"""
helps['vmss encryption show'] = """
type: command
short-summary: Show encryption status.
"""
helps['vmss extension'] = """
type: group
short-summary: Manage extensions on a VM scale set.
"""
helps['vmss extension delete'] = """
type: command
short-summary: Delete an extension from a VMSS.
"""
helps['vmss extension image'] = """
type: group
short-summary: Find the available VM extensions for a subscription and region.
"""
helps['vmss extension image list'] = """
type: command
short-summary: List the information on available extensions.
examples:
- name: List the unique publishers for extensions.
text: az vmss extension image list --query "[].publisher" -o tsv | sort -u
- name: Find extensions with "Docker" in the name.
text: az vmss extension image list --query "[].name" -o tsv | sort -u | grep Docker
- name: List extension names where the publisher name starts with "Microsoft.Azure.App".
text: |
az vmss extension image list --query \\
"[?starts_with(publisher, 'Microsoft.Azure.App')].publisher" \\
-o tsv | sort -u | xargs -I{} az vmss extension image list-names --publisher {} -l westus
"""
helps['vmss extension list'] = """
type: command
short-summary: List extensions associated with a VMSS.
examples:
- name: List extensions associated with a VMSS. (autogenerated)
text: az vmss extension list --resource-group MyResourceGroup --vmss-name MyVmss
crafted: true
"""
helps['vmss extension set'] = """
type: command
short-summary: Add an extension to a VMSS or update an existing extension.
long-summary: Get extension details from `az vmss extension image list`.
parameters:
- name: --name -n
populator-commands:
- az vm extension image list
examples:
- name: >
Set an extension which depends on two previously set extensions. That is, When a VMSS instance is created or reimaged, the customScript extension will be provisioned only after all extensions that it depends on have been provisioned. The extension need not depend on the other extensions for pre-requisite configurations.
text: >
az vmss extension set --vmss-name my-vmss --name customScript --resource-group my-group \\
--version 2.0 --publisher Microsoft.Azure.Extensions \\
--provision-after-extensions NetworkWatcherAgentLinux VMAccessForLinux \\
--settings '{"commandToExecute": "echo testing"}'
"""
helps['vmss extension show'] = """
type: command
short-summary: Show details on a VMSS extension.
"""
helps['vmss get-instance-view'] = """
type: command
short-summary: View an instance of a VMSS.
parameters:
- name: --instance-id
short-summary: A VM instance ID or "*" to list instance view for all VMs in a scale set.
examples:
- name: View an instance of a VMSS. (autogenerated)
text: az vmss get-instance-view --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss identity'] = """
type: group
short-summary: manage service identities of a VM scaleset.
"""
helps['vmss identity assign'] = """
type: command
short-summary: Enable managed service identity on a VMSS.
long-summary: This is required to authenticate and interact with other Azure services using bearer tokens.
examples:
- name: Enable system assigned identity on a VMSS with the 'Owner' role.
text: az vmss identity assign -g MyResourceGroup -n MyVmss --role Owner --scope /subscriptions/db5eb68e-73e2-4fa8-b18a-0123456789999/resourceGroups/MyResourceGroup
"""
helps['vmss identity remove'] = """
type: command
short-summary: (PREVIEW) Remove user assigned identities from a VM scaleset.
examples:
- name: Remove system assigned identity
text: az vmss identity remove -g MyResourceGroup -n MyVmss
- name: Remove 2 identities which are in the same resource group with the VM scaleset
text: az vmss identity remove -g MyResourceGroup -n MyVmss --identities readerId writerId
- name: Remove system assigned identity and a user identity
text: az vmss identity remove -g MyResourceGroup -n MyVmss --identities [system] readerId
"""
helps['vmss identity show'] = """
type: command
short-summary: display VM scaleset's managed identity info.
"""
helps['vmss list'] = """
type: command
short-summary: List VMSS.
"""
helps['vmss list-instance-connection-info'] = """
type: command
short-summary: Get the IP address and port number used to connect to individual VM instances within a set.
examples:
- name: Get the IP address and port number used to connect to individual VM instances within a set. (autogenerated)
text: az vmss list-instance-connection-info --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss list-instance-public-ips'] = """
type: command
short-summary: List public IP addresses of VM instances within a set.
examples:
- name: List public IP addresses of VM instances within a set. (autogenerated)
text: az vmss list-instance-public-ips --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss nic'] = """
type: group
short-summary: Manage network interfaces of a VMSS.
"""
helps['vmss reimage'] = """
type: command
short-summary: Reimage VMs within a VMSS.
parameters:
- name: --instance-id
short-summary: VM instance ID. If missing, reimage all instances.
"""
helps['vmss restart'] = """
type: command
short-summary: Restart VMs within a VMSS.
"""
helps['vmss rolling-upgrade'] = """
type: group
short-summary: (PREVIEW) Manage rolling upgrades.
"""
helps['vmss run-command'] = """
type: group
short-summary: Manage run commands on a Virtual Machine Scale Set.
long-summary: 'For more information, see https://docs.microsoft.com/en-us/azure/virtual-machines/windows/run-command or https://docs.microsoft.com/en-us/azure/virtual-machines/linux/run-command.'
"""
helps['vmss run-command invoke'] = """
type: command
short-summary: Execute a specific run command on a Virtual Machine Scale Set instance.
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vmss run-command list
- name: --instance-id
short-summary: Scale set VM instance id.
examples:
- name: install nginx on a VMSS instance
text: az vmss run-command invoke -g MyResourceGroup -n MyVMSS --command-id RunShellScript \\ --instance-id 0 --scripts "sudo apt-get update && sudo apt-get install -y nginx"
- name: invoke a run-command with parameters on a VMSS instance
text: az vmss run-command invoke -g MyResourceGroup -n MyVMSS --command-id RunShellScript \\ --instance-id 4 --scripts 'echo $1 $2' --parameters hello world
- name: 'invoke command on all VMSS instances using the VMSS instance resource IDs. Note: "@-" expands to stdin.'
text: |
az vmss list-instances -n MyVMSS -g ova-test --query "[].id" --output tsv | \\
az vmss run-command invoke --scripts 'echo $1 $2' --parameters hello world \\
--command-id RunShellScript --ids @-
"""
helps['vmss run-command show'] = """
type: command
parameters:
- name: --command-id
type: string
short-summary: The command id
populator-commands:
- az vmss run-command list
"""
helps['vmss scale'] = """
type: command
short-summary: Change the number of VMs within a VMSS.
parameters:
- name: --new-capacity
short-summary: Number of VMs in the VMSS.
examples:
- name: Change the number of VMs within a VMSS. (autogenerated)
text: az vmss scale --name MyScaleSet --new-capacity 6 --resource-group MyResourceGroup
crafted: true
"""
helps['vmss show'] = """
type: command
short-summary: Get details on VMs within a VMSS.
parameters:
- name: --instance-id
short-summary: VM instance ID. If missing, show the VMSS.
examples:
- name: Get details on VMs within a VMSS. (autogenerated)
text: az vmss show --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss start'] = """
type: command
short-summary: Start VMs within a VMSS.
"""
helps['vmss stop'] = """
type: command
short-summary: Power off (stop) VMs within a VMSS.
long-summary: The VMs will continue to be billed. To avoid this, you can deallocate VM instances within a VMSS through "az vmss deallocate"
"""
helps['vmss update'] = """
type: command
short-summary: Update a VMSS.
examples:
- name: Update a VMSS. (autogenerated)
text: az vmss update --name MyScaleSet --resource-group MyResourceGroup --set virtualMachineProfile.storageProfile.imageReference.version=16.04.201801090
crafted: true
"""
helps['vmss update-instances'] = """
type: command
short-summary: Upgrade VMs within a VMSS.
examples:
- name: Upgrade VMs within a VMSS. (autogenerated)
text: az vmss update-instances --instance-ids 1 --name MyScaleSet --resource-group MyResourceGroup
crafted: true
"""
helps['vmss wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a scale set is met.
"""
| true | true |
1c31e33a225d329db4b0d6e1fde6090b3aa90df9 | 881 | py | Python | back/depends/user.py | Bash-Air/bashair | 76d07c0cca9323f5089d0c49450470bde887c4ea | [
"MIT"
] | null | null | null | back/depends/user.py | Bash-Air/bashair | 76d07c0cca9323f5089d0c49450470bde887c4ea | [
"MIT"
] | null | null | null | back/depends/user.py | Bash-Air/bashair | 76d07c0cca9323f5089d0c49450470bde887c4ea | [
"MIT"
] | 1 | 2022-02-14T11:27:50.000Z | 2022-02-14T11:27:50.000Z | from django.contrib.auth.models import User
from fastapi import Depends, HTTPException
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi_jwt_auth import AuthJWT
from jose import JWTError
from back.utils.exceptions import Credentials
def get_current_user(auth: AuthJWT = Depends(), credentials: HTTPAuthorizationCredentials = Depends(HTTPBearer())):
try:
username = auth.get_jwt_subject()
if username is None:
raise Credentials
except JWTError:
raise Credentials
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise Credentials
return user
def get_current_active_user(current_user: User = Depends(get_current_user)):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
| 31.464286 | 115 | 0.750284 | from django.contrib.auth.models import User
from fastapi import Depends, HTTPException
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi_jwt_auth import AuthJWT
from jose import JWTError
from back.utils.exceptions import Credentials
def get_current_user(auth: AuthJWT = Depends(), credentials: HTTPAuthorizationCredentials = Depends(HTTPBearer())):
try:
username = auth.get_jwt_subject()
if username is None:
raise Credentials
except JWTError:
raise Credentials
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise Credentials
return user
def get_current_active_user(current_user: User = Depends(get_current_user)):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
| true | true |
1c31e3562725a00bab24cf0a53bf3d7bbc8ab967 | 32,836 | py | Python | djangosaml2/tests/__init__.py | yvess/djangosaml2 | 5561d114c8721abde6d7b9967e9cc36732bcfb9d | [
"Apache-2.0"
] | 2 | 2021-04-23T09:16:15.000Z | 2021-06-14T14:35:49.000Z | djangosaml2/tests/__init__.py | yvess/djangosaml2 | 5561d114c8721abde6d7b9967e9cc36732bcfb9d | [
"Apache-2.0"
] | 1 | 2018-11-26T17:02:54.000Z | 2018-11-26T17:02:54.000Z | djangosaml2/tests/__init__.py | yvess/djangosaml2 | 5561d114c8721abde6d7b9967e9cc36732bcfb9d | [
"Apache-2.0"
] | 1 | 2018-04-23T15:17:32.000Z | 2018-04-23T15:17:32.000Z | # Copyright (C) 2012 Sam Bull (lsb@pocketuniverse.ca)
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import base64
import re
from unittest import skip
import sys
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.text import force_text
from django.utils.six.moves.urllib.parse import urlparse, parse_qs
from saml2.config import SPConfig
from saml2.s_utils import decode_base64_and_inflate, deflate_and_base64_encode
from djangosaml2 import views
from djangosaml2.cache import OutstandingQueriesCache
from djangosaml2.conf import get_config
from djangosaml2.tests import conf
from djangosaml2.tests.auth_response import auth_response
from djangosaml2.signals import post_authenticated
from djangosaml2.views import finish_logout
User = get_user_model()
PY_VERSION = sys.version_info[:2]
class SAML2Tests(TestCase):
urls = 'djangosaml2.tests.urls'
def setUp(self):
if hasattr(settings, 'SAML_ATTRIBUTE_MAPPING'):
self.actual_attribute_mapping = settings.SAML_ATTRIBUTE_MAPPING
del settings.SAML_ATTRIBUTE_MAPPING
if hasattr(settings, 'SAML_CONFIG_LOADER'):
self.actual_conf_loader = settings.SAML_CONFIG_LOADER
del settings.SAML_CONFIG_LOADER
def tearDown(self):
if hasattr(self, 'actual_attribute_mapping'):
settings.SAML_ATTRIBUTE_MAPPING = self.actual_attribute_mapping
if hasattr(self, 'actual_conf_loader'):
settings.SAML_CONFIG_LOADER = self.actual_conf_loader
def assertSAMLRequestsEquals(self, real_xml, expected_xmls):
def remove_variable_attributes(xml_string):
xml_string = re.sub(r' ID=".*?" ', ' ', xml_string)
xml_string = re.sub(r' IssueInstant=".*?" ', ' ', xml_string)
xml_string = re.sub(
r'<saml:NameID(.*)>.*</saml:NameID>',
r'<saml:NameID\1></saml:NameID>',
xml_string)
return xml_string
self.assertEqual(remove_variable_attributes(real_xml),
remove_variable_attributes(expected_xmls))
def init_cookies(self):
self.client.cookies[settings.SESSION_COOKIE_NAME] = 'testing'
def add_outstanding_query(self, session_id, came_from):
session = self.client.session
oq_cache = OutstandingQueriesCache(session)
oq_cache.set(session_id, came_from)
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
def render_template(self, text):
return Template(text).render(Context())
def b64_for_post(self, xml_text, encoding='utf-8'):
return base64.b64encode(xml_text.encode(encoding)).decode('ascii')
def test_login_evil_redirect(self):
"""
Make sure that if we give an URL other than our own host as the next
parameter, it is replaced with the default LOGIN_REDIRECT_URL.
"""
# monkey patch SAML configuration
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
response = self.client.get(reverse('saml2_login') + '?next=http://evil.com')
url = urlparse(response['Location'])
params = parse_qs(url.query)
self.assertEqual(params['RelayState'], [settings.LOGIN_REDIRECT_URL, ])
def test_login_one_idp(self):
# monkey patch SAML configuration
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
response = self.client.get(reverse('saml2_login'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
else:
expected_request = """<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
self.assertSAMLRequestsEquals(
decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
# if we set a next arg in the login view, it is preserverd
# in the RelayState argument
next = '/another-view/'
response = self.client.get(reverse('saml2_login'), {'next': next})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
self.assertEqual(params['RelayState'][0], next)
def test_login_several_idps(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp1.example.com',
'idp2.example.com',
'idp3.example.com'],
metadata_file='remote_metadata_three_idps.xml',
)
response = self.client.get(reverse('saml2_login'))
# a WAYF page should be displayed
self.assertContains(response, 'Where are you from?', status_code=200)
for i in range(1, 4):
link = '/login/?idp=https://idp%d.example.com/simplesaml/saml2/idp/metadata.php&next=/'
self.assertContains(response, link % i)
# click on the second idp
response = self.client.get(reverse('saml2_login'), {
'idp': 'https://idp2.example.com/simplesaml/saml2/idp/metadata.php',
'next': '/',
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp2.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp2.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
else:
expected_request = """<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp2.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
def test_assertion_consumer_service(self):
# Get initial number of users
initial_user_count = User.objects.count()
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
# session_id should start with a letter since it is a NCName
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
# this will create a user
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.path, came_from)
self.assertEqual(User.objects.count(), initial_user_count + 1)
user_id = self.client.session[SESSION_KEY]
user = User.objects.get(id=user_id)
self.assertEqual(user.username, 'student')
# let's create another user and log in with that one
new_user = User.objects.create(username='teacher', password='not-used')
session_id = "a1111111111111111111111111111111"
came_from = '' # bad, let's see if we can deal with this
saml_response = auth_response(session_id, 'teacher')
self.add_outstanding_query(session_id, '/')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
# as the RelayState is empty we have redirect to LOGIN_REDIRECT_URL
self.assertEqual(url.path, settings.LOGIN_REDIRECT_URL)
self.assertEqual(force_text(new_user.id), self.client.session[SESSION_KEY])
def test_assertion_consumer_service_no_session(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
# session_id should start with a letter since it is a NCName
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
# Authentication is confirmed.
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.path, came_from)
# Session should no longer be in outstanding queries.
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 403)
def test_missing_param_to_assertion_consumer_service_request(self):
# Send request without SAML2Response parameter
response = self.client.post(reverse('saml2_acs'))
# Assert that view responded with "Bad Request" error
self.assertEqual(response.status_code, 400)
def test_bad_request_method_to_assertion_consumer_service(self):
# Send request with non-POST method.
response = self.client.get(reverse('saml2_acs'))
# Assert that view responded with method not allowed status
self.assertEqual(response.status_code, 405)
def do_login(self):
"""Auxiliary method used in several tests (mainly logout tests)"""
self.init_cookies()
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
saml_response = auth_response(session_id, 'student')
# this will create a user
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
@skip("This is a known issue caused by pysaml2. Needs more investigation. Fixes are welcome.")
def test_logout(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
response = self.client.get(reverse('saml2_logout'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
else:
expected_request = """<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
def test_logout_service_local(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
response = self.client.get(reverse('saml2_logout'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
else:
expected_request = """<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
# now simulate a logout response sent by the idp
request_id = re.findall(r' ID="(.*?)" ', expected_request)[0]
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_response = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="http://sp.example.com/saml2/ls/" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="%s" IssueInstant="%s" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>""" % (
request_id, instant)
response = self.client.get(reverse('saml2_ls'), {
'SAMLResponse': deflate_and_base64_encode(saml_response),
})
self.assertContains(response, "Logged out", status_code=200)
self.assertListEqual(list(self.client.session.keys()), [])
def test_logout_service_global(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
# now simulate a global logout process initiated by another SP
subject_id = views._get_subject_id(self.client.session)
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ID="_9961abbaae6d06d251226cb25e38bf8f468036e57e" Version="2.0" IssueInstant="%s" Destination="http://sp.example.com/saml2/ls/"><saml:Issuer>https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><saml:NameID SPNameQualifier="http://sp.example.com/saml2/metadata/" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">%s</saml:NameID><samlp:SessionIndex>_1837687b7bc9faad85839dbeb319627889f3021757</samlp:SessionIndex></samlp:LogoutRequest>""" % (instant, subject_id.text)
response = self.client.get(reverse('saml2_ls'), {
'SAMLRequest': deflate_and_base64_encode(saml_request),
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLResponse', params)
saml_response = params['SAMLResponse'][0]
if PY_VERSION < (3,):
expected_response = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="_9961abbaae6d06d251226cb25e38bf8f468036e57e" IssueInstant="2010-09-05T09:10:12Z" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>"""
else:
expected_response = """<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="_9961abbaae6d06d251226cb25e38bf8f468036e57e" IssueInstant="2010-09-05T09:10:12Z" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_response).decode('utf-8'),
expected_response)
def test_incomplete_logout(self):
settings.SAML_CONFIG = conf.create_conf(sp_host='sp.example.com',
idp_hosts=['idp.example.com'])
# don't do a login
# now simulate a global logout process initiated by another SP
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_request = '<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ID="_9961abbaae6d06d251226cb25e38bf8f468036e57e" Version="2.0" IssueInstant="%s" Destination="http://sp.example.com/saml2/ls/"><saml:Issuer>https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><saml:NameID SPNameQualifier="http://sp.example.com/saml2/metadata/" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">%s</saml:NameID><samlp:SessionIndex>_1837687b7bc9faad85839dbeb319627889f3021757</samlp:SessionIndex></samlp:LogoutRequest>' % (
instant, 'invalid-subject-id')
response = self.client.get(reverse('saml2_ls'), {
'SAMLRequest': deflate_and_base64_encode(saml_request),
})
self.assertContains(response, 'Logout error', status_code=403)
def test_finish_logout_renders_error_template(self):
request = RequestFactory().get('/bar/foo')
response = finish_logout(request, None)
self.assertContains(response, "<h1>Logout error</h1>", status_code=200)
def _test_metadata(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
valid_until = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
expected_metadata = """<?xml version='1.0' encoding='UTF-8'?>
<md:EntityDescriptor entityID="http://sp.example.com/saml2/metadata/" validUntil="%s" xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"><md:SPSSODescriptor AuthnRequestsSigned="false" WantAssertionsSigned="true" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"><md:KeyDescriptor><ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:X509Data><ds:X509Certificate>MIIDPjCCAiYCCQCkHjPQlll+mzANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQGEwJF
UzEQMA4GA1UECBMHU2V2aWxsYTEbMBkGA1UEChMSWWFjbyBTaXN0ZW1hcyBTLkwu
MRAwDgYDVQQHEwdTZXZpbGxhMREwDwYDVQQDEwh0aWNvdGljbzAeFw0wOTEyMDQx
OTQzNTJaFw0xMDEyMDQxOTQzNTJaMGExCzAJBgNVBAYTAkVTMRAwDgYDVQQIEwdT
ZXZpbGxhMRswGQYDVQQKExJZYWNvIFNpc3RlbWFzIFMuTC4xEDAOBgNVBAcTB1Nl
dmlsbGExETAPBgNVBAMTCHRpY290aWNvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEA7rMOMOaIZ/YYD5hYS6Hpjpovcu4k8gaIY+om9zCxLV5F8BLEfkxo
Pk9IA3cRQNRxf7AXCFxEOH3nKy56AIi1gU7X6fCT30JBT8NQlYdgOVMLlR+tjy1b
YV07tDa9U8gzjTyKQHgVwH0436+rmSPnacGj3fMwfySTMhtmrJmax0bIa8EB+gY1
77DBtvf8dIZIXLlGMQFloZeUspvHOrgNoEA9xU4E9AanGnV9HeV37zv3mLDUOQLx
4tk9sMQmylCpij7WZmcOV07DyJ/cEmnvHSalBTcyIgkcwlhmjtSgfCy6o5zuWxYd
T9ia80SZbWzn8N6B0q+nq23+Oee9H0lvcwIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
AQCQBhKOqucJZAqGHx4ybDXNzpPethszonLNVg5deISSpWagy55KlGCi5laio/xq
hHRx18eTzeCeLHQYvTQxw0IjZOezJ1X30DD9lEqPr6C+IrmZc6bn/pF76xsvdaRS
gduNQPT1B25SV2HrEmbf8wafSlRARmBsyUHh860TqX7yFVjhYIAUF/El9rLca51j
ljCIqqvT+klPdjQoZwODWPFHgute2oNRmoIcMjSnoy1+mxOC2Q/j7kcD8/etulg2
XDxB3zD81gfdtT8VBFP+G4UrBa+5zFk6fT6U8a7ZqVsyH+rCXAdCyVlEC4Y5fZri
ID4zT0FcZASGuthM56rRJJSx
</ds:X509Certificate></ds:X509Data></ds:KeyInfo></md:KeyDescriptor><md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://sp.example.com/saml2/ls/" /><md:AssertionConsumerService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="http://sp.example.com/saml2/acs/" index="1" /><md:AttributeConsumingService index="1"><md:ServiceName xml:lang="en">Test SP</md:ServiceName><md:RequestedAttribute FriendlyName="uid" Name="urn:oid:0.9.2342.19200300.100.1.1" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" isRequired="true" /><md:RequestedAttribute FriendlyName="eduPersonAffiliation" Name="urn:oid:1.3.6.1.4.1.5923.1.1.1.1" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" isRequired="false" /></md:AttributeConsumingService></md:SPSSODescriptor><md:Organization><md:OrganizationName xml:lang="es">Ejemplo S.A.</md:OrganizationName><md:OrganizationName xml:lang="en">Example Inc.</md:OrganizationName><md:OrganizationDisplayName xml:lang="es">Ejemplo</md:OrganizationDisplayName><md:OrganizationDisplayName xml:lang="en">Example</md:OrganizationDisplayName><md:OrganizationURL xml:lang="es">http://www.example.es</md:OrganizationURL><md:OrganizationURL xml:lang="en">http://www.example.com</md:OrganizationURL></md:Organization><md:ContactPerson contactType="technical"><md:Company>Example Inc.</md:Company><md:GivenName>Technical givenname</md:GivenName><md:SurName>Technical surname</md:SurName><md:EmailAddress>technical@sp.example.com</md:EmailAddress></md:ContactPerson><md:ContactPerson contactType="administrative"><md:Company>Example Inc.</md:Company><md:GivenName>Administrative givenname</md:GivenName><md:SurName>Administrative surname</md:SurName><md:EmailAddress>administrative@sp.example.ccom</md:EmailAddress></md:ContactPerson></md:EntityDescriptor>"""
expected_metadata = expected_metadata % valid_until
response = self.client.get('/metadata/')
self.assertEqual(response['Content-type'], 'text/xml; charset=utf8')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, expected_metadata)
def test_post_authenticated_signal(self):
def signal_handler(signal, user, session_info):
self.assertEqual(isinstance(user, User), True)
post_authenticated.connect(signal_handler, dispatch_uid='test_signal')
self.do_login()
post_authenticated.disconnect(dispatch_uid='test_signal')
def test_idplist_templatetag(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp1.example.com',
'idp2.example.com',
'idp3.example.com'],
metadata_file='remote_metadata_three_idps.xml',
)
rendered = self.render_template(
'{% load idplist %}'
'{% idplist as idps %}'
'{% for url, name in idps.items %}'
'{{ url }} - {{ name }}; '
'{% endfor %}'
)
# the idplist is unordered, so convert the result into a set.
rendered = set(rendered.split('; '))
expected = set([
u'https://idp1.example.com/simplesaml/saml2/idp/metadata.php - idp1.example.com IdP',
u'https://idp2.example.com/simplesaml/saml2/idp/metadata.php - idp2.example.com IdP',
u'https://idp3.example.com/simplesaml/saml2/idp/metadata.php - idp3.example.com IdP',
u'',
])
self.assertEqual(rendered, expected)
def test_config_loader(request):
config = SPConfig()
config.load({'entityid': 'testentity'})
return config
def test_config_loader_with_real_conf(request):
config = SPConfig()
config.load(conf.create_conf(sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml'))
return config
class ConfTests(TestCase):
def test_custom_conf_loader(self):
config_loader_path = 'djangosaml2.tests.test_config_loader'
request = RequestFactory().get('/bar/foo')
conf = get_config(config_loader_path, request)
self.assertEqual(conf.entityid, 'testentity')
def test_custom_conf_loader_from_view(self):
config_loader_path = 'djangosaml2.tests.test_config_loader_with_real_conf'
request = RequestFactory().get('/login/')
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = views.login(request, config_loader_path)
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
| 58.116814 | 1,852 | 0.701578 |
import datetime
import base64
import re
from unittest import skip
import sys
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.test import TestCase
from django.test.client import RequestFactory
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.text import force_text
from django.utils.six.moves.urllib.parse import urlparse, parse_qs
from saml2.config import SPConfig
from saml2.s_utils import decode_base64_and_inflate, deflate_and_base64_encode
from djangosaml2 import views
from djangosaml2.cache import OutstandingQueriesCache
from djangosaml2.conf import get_config
from djangosaml2.tests import conf
from djangosaml2.tests.auth_response import auth_response
from djangosaml2.signals import post_authenticated
from djangosaml2.views import finish_logout
User = get_user_model()
PY_VERSION = sys.version_info[:2]
class SAML2Tests(TestCase):
urls = 'djangosaml2.tests.urls'
def setUp(self):
if hasattr(settings, 'SAML_ATTRIBUTE_MAPPING'):
self.actual_attribute_mapping = settings.SAML_ATTRIBUTE_MAPPING
del settings.SAML_ATTRIBUTE_MAPPING
if hasattr(settings, 'SAML_CONFIG_LOADER'):
self.actual_conf_loader = settings.SAML_CONFIG_LOADER
del settings.SAML_CONFIG_LOADER
def tearDown(self):
if hasattr(self, 'actual_attribute_mapping'):
settings.SAML_ATTRIBUTE_MAPPING = self.actual_attribute_mapping
if hasattr(self, 'actual_conf_loader'):
settings.SAML_CONFIG_LOADER = self.actual_conf_loader
def assertSAMLRequestsEquals(self, real_xml, expected_xmls):
def remove_variable_attributes(xml_string):
xml_string = re.sub(r' ID=".*?" ', ' ', xml_string)
xml_string = re.sub(r' IssueInstant=".*?" ', ' ', xml_string)
xml_string = re.sub(
r'<saml:NameID(.*)>.*</saml:NameID>',
r'<saml:NameID\1></saml:NameID>',
xml_string)
return xml_string
self.assertEqual(remove_variable_attributes(real_xml),
remove_variable_attributes(expected_xmls))
def init_cookies(self):
self.client.cookies[settings.SESSION_COOKIE_NAME] = 'testing'
def add_outstanding_query(self, session_id, came_from):
session = self.client.session
oq_cache = OutstandingQueriesCache(session)
oq_cache.set(session_id, came_from)
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
def render_template(self, text):
return Template(text).render(Context())
def b64_for_post(self, xml_text, encoding='utf-8'):
return base64.b64encode(xml_text.encode(encoding)).decode('ascii')
def test_login_evil_redirect(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
response = self.client.get(reverse('saml2_login') + '?next=http://evil.com')
url = urlparse(response['Location'])
params = parse_qs(url.query)
self.assertEqual(params['RelayState'], [settings.LOGIN_REDIRECT_URL, ])
def test_login_one_idp(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
response = self.client.get(reverse('saml2_login'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
else:
expected_request = """<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
self.assertSAMLRequestsEquals(
decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
next = '/another-view/'
response = self.client.get(reverse('saml2_login'), {'next': next})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
self.assertEqual(params['RelayState'][0], next)
def test_login_several_idps(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp1.example.com',
'idp2.example.com',
'idp3.example.com'],
metadata_file='remote_metadata_three_idps.xml',
)
response = self.client.get(reverse('saml2_login'))
self.assertContains(response, 'Where are you from?', status_code=200)
for i in range(1, 4):
link = '/login/?idp=https://idp%d.example.com/simplesaml/saml2/idp/metadata.php&next=/'
self.assertContains(response, link % i)
response = self.client.get(reverse('saml2_login'), {
'idp': 'https://idp2.example.com/simplesaml/saml2/idp/metadata.php',
'next': '/',
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp2.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
self.assertIn('RelayState', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp2.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
else:
expected_request = """<samlp:AuthnRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" AssertionConsumerServiceURL="http://sp.example.com/saml2/acs/" Destination="https://idp2.example.com/simplesaml/saml2/idp/SSOService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" ProtocolBinding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:NameIDPolicy AllowCreate="false" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" /></samlp:AuthnRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
def test_assertion_consumer_service(self):
initial_user_count = User.objects.count()
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.path, came_from)
self.assertEqual(User.objects.count(), initial_user_count + 1)
user_id = self.client.session[SESSION_KEY]
user = User.objects.get(id=user_id)
self.assertEqual(user.username, 'student')
new_user = User.objects.create(username='teacher', password='not-used')
session_id = "a1111111111111111111111111111111"
came_from = '' # bad, let's see if we can deal with this
saml_response = auth_response(session_id, 'teacher')
self.add_outstanding_query(session_id, '/')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.path, settings.LOGIN_REDIRECT_URL)
self.assertEqual(force_text(new_user.id), self.client.session[SESSION_KEY])
def test_assertion_consumer_service_no_session(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.path, came_from)
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 403)
def test_missing_param_to_assertion_consumer_service_request(self):
response = self.client.post(reverse('saml2_acs'))
self.assertEqual(response.status_code, 400)
def test_bad_request_method_to_assertion_consumer_service(self):
response = self.client.get(reverse('saml2_acs'))
self.assertEqual(response.status_code, 405)
def do_login(self):
self.init_cookies()
session_id = "a0123456789abcdef0123456789abcdef"
came_from = '/another-view/'
self.add_outstanding_query(session_id, came_from)
saml_response = auth_response(session_id, 'student')
response = self.client.post(reverse('saml2_acs'), {
'SAMLResponse': self.b64_for_post(saml_response),
'RelayState': came_from,
})
self.assertEqual(response.status_code, 302)
@skip("This is a known issue caused by pysaml2. Needs more investigation. Fixes are welcome.")
def test_logout(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
response = self.client.get(reverse('saml2_logout'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
else:
expected_request = """<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
def test_logout_service_local(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
response = self.client.get(reverse('saml2_logout'))
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLRequest', params)
saml_request = params['SAMLRequest'][0]
if PY_VERSION < (3,):
expected_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
else:
expected_request = """<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="XXXXXXXXXXXXXXXXXXXXXX" IssueInstant="2010-01-01T00:00:00Z" Reason="" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" SPNameQualifier="http://sp.example.com/saml2/metadata/">58bcc81ea14700f66aeb707a0eff1360</saml:NameID><samlp:SessionIndex>a0123456789abcdef0123456789abcdef</samlp:SessionIndex></samlp:LogoutRequest>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_request).decode('utf-8'),
expected_request)
request_id = re.findall(r' ID="(.*?)" ', expected_request)[0]
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_response = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="http://sp.example.com/saml2/ls/" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="%s" IssueInstant="%s" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>""" % (
request_id, instant)
response = self.client.get(reverse('saml2_ls'), {
'SAMLResponse': deflate_and_base64_encode(saml_response),
})
self.assertContains(response, "Logged out", status_code=200)
self.assertListEqual(list(self.client.session.keys()), [])
def test_logout_service_global(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
self.do_login()
subject_id = views._get_subject_id(self.client.session)
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_request = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ID="_9961abbaae6d06d251226cb25e38bf8f468036e57e" Version="2.0" IssueInstant="%s" Destination="http://sp.example.com/saml2/ls/"><saml:Issuer>https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><saml:NameID SPNameQualifier="http://sp.example.com/saml2/metadata/" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">%s</saml:NameID><samlp:SessionIndex>_1837687b7bc9faad85839dbeb319627889f3021757</samlp:SessionIndex></samlp:LogoutRequest>""" % (instant, subject_id.text)
response = self.client.get(reverse('saml2_ls'), {
'SAMLRequest': deflate_and_base64_encode(saml_request),
})
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path,
'/simplesaml/saml2/idp/SingleLogoutService.php')
params = parse_qs(url.query)
self.assertIn('SAMLResponse', params)
saml_response = params['SAMLResponse'][0]
if PY_VERSION < (3,):
expected_response = """<?xml version='1.0' encoding='UTF-8'?>
<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="_9961abbaae6d06d251226cb25e38bf8f468036e57e" IssueInstant="2010-09-05T09:10:12Z" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>"""
else:
expected_response = """<samlp:LogoutResponse xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" Destination="https://idp.example.com/simplesaml/saml2/idp/SingleLogoutService.php" ID="a140848e7ce2bce834d7264ecdde0151" InResponseTo="_9961abbaae6d06d251226cb25e38bf8f468036e57e" IssueInstant="2010-09-05T09:10:12Z" Version="2.0"><saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">http://sp.example.com/saml2/metadata/</saml:Issuer><samlp:Status><samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" /></samlp:Status></samlp:LogoutResponse>"""
self.assertSAMLRequestsEquals(decode_base64_and_inflate(saml_response).decode('utf-8'),
expected_response)
def test_incomplete_logout(self):
settings.SAML_CONFIG = conf.create_conf(sp_host='sp.example.com',
idp_hosts=['idp.example.com'])
# now simulate a global logout process initiated by another SP
instant = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
saml_request = '<samlp:LogoutRequest xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ID="_9961abbaae6d06d251226cb25e38bf8f468036e57e" Version="2.0" IssueInstant="%s" Destination="http://sp.example.com/saml2/ls/"><saml:Issuer>https://idp.example.com/simplesaml/saml2/idp/metadata.php</saml:Issuer><saml:NameID SPNameQualifier="http://sp.example.com/saml2/metadata/" Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient">%s</saml:NameID><samlp:SessionIndex>_1837687b7bc9faad85839dbeb319627889f3021757</samlp:SessionIndex></samlp:LogoutRequest>' % (
instant, 'invalid-subject-id')
response = self.client.get(reverse('saml2_ls'), {
'SAMLRequest': deflate_and_base64_encode(saml_request),
})
self.assertContains(response, 'Logout error', status_code=403)
def test_finish_logout_renders_error_template(self):
request = RequestFactory().get('/bar/foo')
response = finish_logout(request, None)
self.assertContains(response, "<h1>Logout error</h1>", status_code=200)
def _test_metadata(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml',
)
valid_until = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
expected_metadata = """<?xml version='1.0' encoding='UTF-8'?>
<md:EntityDescriptor entityID="http://sp.example.com/saml2/metadata/" validUntil="%s" xmlns:md="urn:oasis:names:tc:SAML:2.0:metadata"><md:SPSSODescriptor AuthnRequestsSigned="false" WantAssertionsSigned="true" protocolSupportEnumeration="urn:oasis:names:tc:SAML:2.0:protocol"><md:KeyDescriptor><ds:KeyInfo xmlns:ds="http://www.w3.org/2000/09/xmldsig#"><ds:X509Data><ds:X509Certificate>MIIDPjCCAiYCCQCkHjPQlll+mzANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQGEwJF
UzEQMA4GA1UECBMHU2V2aWxsYTEbMBkGA1UEChMSWWFjbyBTaXN0ZW1hcyBTLkwu
MRAwDgYDVQQHEwdTZXZpbGxhMREwDwYDVQQDEwh0aWNvdGljbzAeFw0wOTEyMDQx
OTQzNTJaFw0xMDEyMDQxOTQzNTJaMGExCzAJBgNVBAYTAkVTMRAwDgYDVQQIEwdT
ZXZpbGxhMRswGQYDVQQKExJZYWNvIFNpc3RlbWFzIFMuTC4xEDAOBgNVBAcTB1Nl
dmlsbGExETAPBgNVBAMTCHRpY290aWNvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEA7rMOMOaIZ/YYD5hYS6Hpjpovcu4k8gaIY+om9zCxLV5F8BLEfkxo
Pk9IA3cRQNRxf7AXCFxEOH3nKy56AIi1gU7X6fCT30JBT8NQlYdgOVMLlR+tjy1b
YV07tDa9U8gzjTyKQHgVwH0436+rmSPnacGj3fMwfySTMhtmrJmax0bIa8EB+gY1
77DBtvf8dIZIXLlGMQFloZeUspvHOrgNoEA9xU4E9AanGnV9HeV37zv3mLDUOQLx
4tk9sMQmylCpij7WZmcOV07DyJ/cEmnvHSalBTcyIgkcwlhmjtSgfCy6o5zuWxYd
T9ia80SZbWzn8N6B0q+nq23+Oee9H0lvcwIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
AQCQBhKOqucJZAqGHx4ybDXNzpPethszonLNVg5deISSpWagy55KlGCi5laio/xq
hHRx18eTzeCeLHQYvTQxw0IjZOezJ1X30DD9lEqPr6C+IrmZc6bn/pF76xsvdaRS
gduNQPT1B25SV2HrEmbf8wafSlRARmBsyUHh860TqX7yFVjhYIAUF/El9rLca51j
ljCIqqvT+klPdjQoZwODWPFHgute2oNRmoIcMjSnoy1+mxOC2Q/j7kcD8/etulg2
XDxB3zD81gfdtT8VBFP+G4UrBa+5zFk6fT6U8a7ZqVsyH+rCXAdCyVlEC4Y5fZri
ID4zT0FcZASGuthM56rRJJSx
</ds:X509Certificate></ds:X509Data></ds:KeyInfo></md:KeyDescriptor><md:SingleLogoutService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" Location="http://sp.example.com/saml2/ls/" /><md:AssertionConsumerService Binding="urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" Location="http://sp.example.com/saml2/acs/" index="1" /><md:AttributeConsumingService index="1"><md:ServiceName xml:lang="en">Test SP</md:ServiceName><md:RequestedAttribute FriendlyName="uid" Name="urn:oid:0.9.2342.19200300.100.1.1" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" isRequired="true" /><md:RequestedAttribute FriendlyName="eduPersonAffiliation" Name="urn:oid:1.3.6.1.4.1.5923.1.1.1.1" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" isRequired="false" /></md:AttributeConsumingService></md:SPSSODescriptor><md:Organization><md:OrganizationName xml:lang="es">Ejemplo S.A.</md:OrganizationName><md:OrganizationName xml:lang="en">Example Inc.</md:OrganizationName><md:OrganizationDisplayName xml:lang="es">Ejemplo</md:OrganizationDisplayName><md:OrganizationDisplayName xml:lang="en">Example</md:OrganizationDisplayName><md:OrganizationURL xml:lang="es">http://www.example.es</md:OrganizationURL><md:OrganizationURL xml:lang="en">http://www.example.com</md:OrganizationURL></md:Organization><md:ContactPerson contactType="technical"><md:Company>Example Inc.</md:Company><md:GivenName>Technical givenname</md:GivenName><md:SurName>Technical surname</md:SurName><md:EmailAddress>technical@sp.example.com</md:EmailAddress></md:ContactPerson><md:ContactPerson contactType="administrative"><md:Company>Example Inc.</md:Company><md:GivenName>Administrative givenname</md:GivenName><md:SurName>Administrative surname</md:SurName><md:EmailAddress>administrative@sp.example.ccom</md:EmailAddress></md:ContactPerson></md:EntityDescriptor>"""
expected_metadata = expected_metadata % valid_until
response = self.client.get('/metadata/')
self.assertEqual(response['Content-type'], 'text/xml; charset=utf8')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, expected_metadata)
def test_post_authenticated_signal(self):
def signal_handler(signal, user, session_info):
self.assertEqual(isinstance(user, User), True)
post_authenticated.connect(signal_handler, dispatch_uid='test_signal')
self.do_login()
post_authenticated.disconnect(dispatch_uid='test_signal')
def test_idplist_templatetag(self):
settings.SAML_CONFIG = conf.create_conf(
sp_host='sp.example.com',
idp_hosts=['idp1.example.com',
'idp2.example.com',
'idp3.example.com'],
metadata_file='remote_metadata_three_idps.xml',
)
rendered = self.render_template(
'{% load idplist %}'
'{% idplist as idps %}'
'{% for url, name in idps.items %}'
'{{ url }} - {{ name }}; '
'{% endfor %}'
)
# the idplist is unordered, so convert the result into a set.
rendered = set(rendered.split('; '))
expected = set([
u'https://idp1.example.com/simplesaml/saml2/idp/metadata.php - idp1.example.com IdP',
u'https://idp2.example.com/simplesaml/saml2/idp/metadata.php - idp2.example.com IdP',
u'https://idp3.example.com/simplesaml/saml2/idp/metadata.php - idp3.example.com IdP',
u'',
])
self.assertEqual(rendered, expected)
def test_config_loader(request):
config = SPConfig()
config.load({'entityid': 'testentity'})
return config
def test_config_loader_with_real_conf(request):
config = SPConfig()
config.load(conf.create_conf(sp_host='sp.example.com',
idp_hosts=['idp.example.com'],
metadata_file='remote_metadata_one_idp.xml'))
return config
class ConfTests(TestCase):
def test_custom_conf_loader(self):
config_loader_path = 'djangosaml2.tests.test_config_loader'
request = RequestFactory().get('/bar/foo')
conf = get_config(config_loader_path, request)
self.assertEqual(conf.entityid, 'testentity')
def test_custom_conf_loader_from_view(self):
config_loader_path = 'djangosaml2.tests.test_config_loader_with_real_conf'
request = RequestFactory().get('/login/')
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
response = views.login(request, config_loader_path)
self.assertEqual(response.status_code, 302)
location = response['Location']
url = urlparse(location)
self.assertEqual(url.hostname, 'idp.example.com')
self.assertEqual(url.path, '/simplesaml/saml2/idp/SSOService.php')
| true | true |
1c31e595cdd83f4d43facb3e59e7e4944fd03f64 | 6,572 | py | Python | continuous_delivery_scripts/plugins/golang.py | ARMmbed/continuous-delivery-scripts | 3df724ae5705c675261349ecd3ac38b0781c1d65 | [
"Apache-2.0"
] | 2 | 2021-09-10T14:01:24.000Z | 2022-02-08T10:21:27.000Z | continuous_delivery_scripts/plugins/golang.py | acabarbaye/continuous-delivery-scripts-1 | 3df724ae5705c675261349ecd3ac38b0781c1d65 | [
"Apache-2.0"
] | 29 | 2020-11-30T10:02:57.000Z | 2022-03-29T06:14:54.000Z | continuous_delivery_scripts/plugins/golang.py | acabarbaye/continuous-delivery-scripts-1 | 3df724ae5705c675261349ecd3ac38b0781c1d65 | [
"Apache-2.0"
] | 1 | 2021-09-10T14:01:38.000Z | 2021-09-10T14:01:38.000Z | #
# Copyright (C) 2020-2021 Arm Limited or its affiliates and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Plugin for Golang projects."""
import logging
import os
from pathlib import Path
from typing import Optional, List
from subprocess import check_call
from continuous_delivery_scripts.utils.language_specifics_base import BaseLanguage, get_language_from_file_name
from continuous_delivery_scripts.spdx_report.spdx_project import SpdxProject
from continuous_delivery_scripts.utils.configuration import configuration, ConfigurationVariable
from continuous_delivery_scripts.utils.git_helpers import LocalProjectRepository, GitWrapper
logger = logging.getLogger(__name__)
SRC_DIR = Path(str(configuration.get_value(ConfigurationVariable.SOURCE_DIR)))
ROOT_DIR = Path(str(configuration.get_value(ConfigurationVariable.PROJECT_ROOT)))
ENVVAR_GORELEASER_GIT_TOKEN = "GITHUB_TOKEN"
ENVVAR_GORELEASER_CUSTOMISED_TAG = "GORELEASER_CURRENT_TAG"
ENVVAR_GO_MOD = "GO111MODULE"
GO_MOD_ON_VALUE = "on"
def _generate_golds_command_list(output_directory: Path, module: str) -> List[str]:
return ["golds", "-gen", "-wdpkgs-listing=promoted", f"-dir={str(output_directory)}", "-nouses", f"{module}"]
def _generate_goreleaser_release_command_list(changelog: Path) -> List[str]:
return [
"goreleaser",
"release",
"--rm-dist",
"--release-notes",
f"{str(changelog)}",
]
def _generate_goreleaser_check_command_list() -> List[str]:
return [
"goreleaser",
"check",
]
def _install_golds_command_list() -> List[str]:
return ["go", "install", "go101.org/golds@latest"]
def _install_goreleaser_command_list() -> List[str]:
return ["go", "install", "github.com/goreleaser/goreleaser@latest"]
def _call_golds(output_directory: Path, module: str) -> None:
"""Calls Golds for generating the docs."""
logger.info("Installing Golds if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_golds_command_list(), env=env)
logger.info("Creating Golds documentation.")
check_call(_generate_golds_command_list(output_directory, module), cwd=SRC_DIR, env=env)
def _call_goreleaser_check(version: str) -> None:
"""Calls go releaser check to verify configuration."""
logger.info("Installing GoReleaser if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_goreleaser_command_list(), env=env)
logger.info("Checking GoReleaser configuration.")
env[ENVVAR_GORELEASER_CUSTOMISED_TAG] = version
env[ENVVAR_GORELEASER_GIT_TOKEN] = configuration.get_value(ConfigurationVariable.GIT_TOKEN)
check_call(_generate_goreleaser_check_command_list(), cwd=ROOT_DIR, env=env)
def _determine_go_module_tag(version) -> Optional[str]:
"""Determines go module for tagging.
See https://golang.org/ref/mod#vcs-version.
and https://github.com/golang/go/wiki/Modules#should-i-have-multiple-modules-in-a-single-repository.
"""
module = ""
try:
module = str(SRC_DIR.relative_to(ROOT_DIR))
except ValueError:
try:
module = str(ROOT_DIR.relative_to(SRC_DIR))
except ValueError as exception:
logger.warning(exception)
if module == "." or len(module) == 0:
return None
module = module.rstrip("/")
return f"{module}/{version}"
class Go(BaseLanguage):
"""Specific actions for a Golang project."""
def get_related_language(self) -> str:
"""Gets the related language."""
return get_language_from_file_name(__file__)
def get_version_tag(self, version: str):
"""Gets tag based on version."""
cleansed_version = version.strip().lstrip("v")
return f"v{cleansed_version}"
def package_software(self, version: str) -> None:
"""No operation."""
super().package_software(version)
_call_goreleaser_check(version)
def release_package_to_repository(self, version: str) -> None:
"""No operation."""
super().release_package_to_repository(version)
self._call_goreleaser_release(version)
def check_credentials(self) -> None:
"""Checks any credentials."""
super().check_credentials()
configuration.get_value(ConfigurationVariable.GIT_TOKEN)
def generate_code_documentation(self, output_directory: Path, module_to_document: str) -> None:
"""Generates the code documentation."""
super().generate_code_documentation(output_directory, module_to_document)
_call_golds(output_directory, "./...")
def can_add_licence_headers(self) -> bool:
"""States that licence headers can be added."""
return True
def can_get_project_metadata(self) -> bool:
"""States whether project metadata can be retrieved."""
return False
def get_current_spdx_project(self) -> Optional[SpdxProject]:
"""Gets current SPDX description."""
# TODO
return None
def should_clean_before_packaging(self) -> bool:
"""States whether the repository must be cleaned before packaging happens."""
return True
def tag_release(self, git: GitWrapper, version: str) -> None:
"""Tags release commit."""
super().tag_release(git, version)
go_tag = _determine_go_module_tag(self.get_version_tag(version))
if go_tag:
git.create_tag(go_tag, message=f"Golang module release: {go_tag}")
def _call_goreleaser_release(self, version: str) -> None:
"""Calls go releaser release to upload packages."""
logger.info("Installing GoReleaser if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_goreleaser_command_list(), env=env)
tag = self.get_version_tag(version)
# The tag of the release must be retrieved
# See https://github.com/goreleaser/goreleaser/discussions/1426
logger.info(f"Checking out tag: {tag}.")
with LocalProjectRepository() as git:
git.configure_for_github()
git.fetch()
git.checkout(f"tags/{tag}")
logger.info("Release package.")
changelogPath = configuration.get_value(ConfigurationVariable.CHANGELOG_FILE_PATH)
env[ENVVAR_GORELEASER_CUSTOMISED_TAG] = tag
env[ENVVAR_GORELEASER_GIT_TOKEN] = configuration.get_value(ConfigurationVariable.GIT_TOKEN)
check_call(_generate_goreleaser_release_command_list(changelogPath), cwd=ROOT_DIR, env=env)
| 38.209302 | 113 | 0.706939 |
import logging
import os
from pathlib import Path
from typing import Optional, List
from subprocess import check_call
from continuous_delivery_scripts.utils.language_specifics_base import BaseLanguage, get_language_from_file_name
from continuous_delivery_scripts.spdx_report.spdx_project import SpdxProject
from continuous_delivery_scripts.utils.configuration import configuration, ConfigurationVariable
from continuous_delivery_scripts.utils.git_helpers import LocalProjectRepository, GitWrapper
logger = logging.getLogger(__name__)
SRC_DIR = Path(str(configuration.get_value(ConfigurationVariable.SOURCE_DIR)))
ROOT_DIR = Path(str(configuration.get_value(ConfigurationVariable.PROJECT_ROOT)))
ENVVAR_GORELEASER_GIT_TOKEN = "GITHUB_TOKEN"
ENVVAR_GORELEASER_CUSTOMISED_TAG = "GORELEASER_CURRENT_TAG"
ENVVAR_GO_MOD = "GO111MODULE"
GO_MOD_ON_VALUE = "on"
def _generate_golds_command_list(output_directory: Path, module: str) -> List[str]:
return ["golds", "-gen", "-wdpkgs-listing=promoted", f"-dir={str(output_directory)}", "-nouses", f"{module}"]
def _generate_goreleaser_release_command_list(changelog: Path) -> List[str]:
return [
"goreleaser",
"release",
"--rm-dist",
"--release-notes",
f"{str(changelog)}",
]
def _generate_goreleaser_check_command_list() -> List[str]:
return [
"goreleaser",
"check",
]
def _install_golds_command_list() -> List[str]:
return ["go", "install", "go101.org/golds@latest"]
def _install_goreleaser_command_list() -> List[str]:
return ["go", "install", "github.com/goreleaser/goreleaser@latest"]
def _call_golds(output_directory: Path, module: str) -> None:
logger.info("Installing Golds if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_golds_command_list(), env=env)
logger.info("Creating Golds documentation.")
check_call(_generate_golds_command_list(output_directory, module), cwd=SRC_DIR, env=env)
def _call_goreleaser_check(version: str) -> None:
logger.info("Installing GoReleaser if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_goreleaser_command_list(), env=env)
logger.info("Checking GoReleaser configuration.")
env[ENVVAR_GORELEASER_CUSTOMISED_TAG] = version
env[ENVVAR_GORELEASER_GIT_TOKEN] = configuration.get_value(ConfigurationVariable.GIT_TOKEN)
check_call(_generate_goreleaser_check_command_list(), cwd=ROOT_DIR, env=env)
def _determine_go_module_tag(version) -> Optional[str]:
module = ""
try:
module = str(SRC_DIR.relative_to(ROOT_DIR))
except ValueError:
try:
module = str(ROOT_DIR.relative_to(SRC_DIR))
except ValueError as exception:
logger.warning(exception)
if module == "." or len(module) == 0:
return None
module = module.rstrip("/")
return f"{module}/{version}"
class Go(BaseLanguage):
def get_related_language(self) -> str:
return get_language_from_file_name(__file__)
def get_version_tag(self, version: str):
cleansed_version = version.strip().lstrip("v")
return f"v{cleansed_version}"
def package_software(self, version: str) -> None:
super().package_software(version)
_call_goreleaser_check(version)
def release_package_to_repository(self, version: str) -> None:
super().release_package_to_repository(version)
self._call_goreleaser_release(version)
def check_credentials(self) -> None:
super().check_credentials()
configuration.get_value(ConfigurationVariable.GIT_TOKEN)
def generate_code_documentation(self, output_directory: Path, module_to_document: str) -> None:
super().generate_code_documentation(output_directory, module_to_document)
_call_golds(output_directory, "./...")
def can_add_licence_headers(self) -> bool:
return True
def can_get_project_metadata(self) -> bool:
return False
def get_current_spdx_project(self) -> Optional[SpdxProject]:
return None
def should_clean_before_packaging(self) -> bool:
return True
def tag_release(self, git: GitWrapper, version: str) -> None:
super().tag_release(git, version)
go_tag = _determine_go_module_tag(self.get_version_tag(version))
if go_tag:
git.create_tag(go_tag, message=f"Golang module release: {go_tag}")
def _call_goreleaser_release(self, version: str) -> None:
logger.info("Installing GoReleaser if missing.")
env = os.environ
env[ENVVAR_GO_MOD] = GO_MOD_ON_VALUE
check_call(_install_goreleaser_command_list(), env=env)
tag = self.get_version_tag(version)
logger.info(f"Checking out tag: {tag}.")
with LocalProjectRepository() as git:
git.configure_for_github()
git.fetch()
git.checkout(f"tags/{tag}")
logger.info("Release package.")
changelogPath = configuration.get_value(ConfigurationVariable.CHANGELOG_FILE_PATH)
env[ENVVAR_GORELEASER_CUSTOMISED_TAG] = tag
env[ENVVAR_GORELEASER_GIT_TOKEN] = configuration.get_value(ConfigurationVariable.GIT_TOKEN)
check_call(_generate_goreleaser_release_command_list(changelogPath), cwd=ROOT_DIR, env=env)
| true | true |
1c31e59cfa13f341e8e9d09d9ddc476b52177ea9 | 7,442 | py | Python | ml-agents/mlagents/trainers/tests/test_simple_rl.py | robertnoneman/ml-agents | 797b0e880f4db61ab36783357bf555621affce2a | [
"Apache-2.0"
] | 1 | 2019-01-20T19:57:46.000Z | 2019-01-20T19:57:46.000Z | ml-agents/mlagents/trainers/tests/test_simple_rl.py | ruairidhcumming/ml-agents | d4205fed06b5ac5c2cac6c594bbd25dfe128103f | [
"Apache-2.0"
] | null | null | null | ml-agents/mlagents/trainers/tests/test_simple_rl.py | ruairidhcumming/ml-agents | d4205fed06b5ac5c2cac6c594bbd25dfe128103f | [
"Apache-2.0"
] | null | null | null | import math
import random
import tempfile
import pytest
import yaml
from typing import Any, Dict
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.trainer_util import TrainerFactory
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.brain import BrainInfo, AllBrainInfo, BrainParameters
from mlagents.envs.communicator_objects.agent_info_pb2 import AgentInfoProto
from mlagents.envs.communicator_objects.observation_pb2 import (
ObservationProto,
NONE as COMPRESSION_TYPE_NONE,
)
from mlagents.envs.simple_env_manager import SimpleEnvManager
from mlagents.envs.sampler_class import SamplerManager
from mlagents.envs.side_channel.float_properties_channel import FloatPropertiesChannel
BRAIN_NAME = __name__
OBS_SIZE = 1
STEP_SIZE = 0.1
TIME_PENALTY = 0.001
MIN_STEPS = int(1.0 / STEP_SIZE) + 1
SUCCESS_REWARD = 1.0 + MIN_STEPS * TIME_PENALTY
def clamp(x, min_val, max_val):
return max(min_val, min(x, max_val))
class Simple1DEnvironment(BaseUnityEnvironment):
"""
Very simple "game" - the agent has a position on [-1, 1], gets a reward of 1 if it reaches 1, and a reward of -1 if
it reaches -1. The position is incremented by the action amount (clamped to [-step_size, step_size]).
"""
def __init__(self, use_discrete):
super().__init__()
self.discrete = use_discrete
self._brains: Dict[str, BrainParameters] = {}
brain_params = BrainParameters(
brain_name=BRAIN_NAME,
vector_observation_space_size=OBS_SIZE,
camera_resolutions=[],
vector_action_space_size=[2] if use_discrete else [1],
vector_action_descriptions=["moveDirection"],
vector_action_space_type=0 if use_discrete else 1,
)
self._brains[BRAIN_NAME] = brain_params
# state
self.position = 0.0
self.step_count = 0
self.random = random.Random(str(brain_params))
self.goal = self.random.choice([-1, 1])
def step(
self,
vector_action: Dict[str, Any] = None,
memory: Dict[str, Any] = None,
value: Dict[str, Any] = None,
) -> AllBrainInfo:
assert vector_action is not None
if self.discrete:
act = vector_action[BRAIN_NAME][0][0]
delta = 1 if act else -1
else:
delta = vector_action[BRAIN_NAME][0][0]
delta = clamp(delta, -STEP_SIZE, STEP_SIZE)
self.position += delta
self.position = clamp(self.position, -1, 1)
self.step_count += 1
done = self.position >= 1.0 or self.position <= -1.0
if done:
reward = SUCCESS_REWARD * self.position * self.goal
else:
reward = -TIME_PENALTY
vector_obs = [self.goal] * OBS_SIZE
vector_obs_proto = ObservationProto(
float_data=ObservationProto.FloatData(data=vector_obs),
shape=[len(vector_obs)],
compression_type=COMPRESSION_TYPE_NONE,
)
agent_info = AgentInfoProto(
reward=reward, done=bool(done), observations=[vector_obs_proto]
)
if done:
self._reset_agent()
return {
BRAIN_NAME: BrainInfo.from_agent_proto(
0, [agent_info], self._brains[BRAIN_NAME]
)
}
def _reset_agent(self):
self.position = 0.0
self.step_count = 0
self.goal = self.random.choice([-1, 1])
def reset(
self,
config: Dict[str, float] = None,
train_mode: bool = True,
custom_reset_parameters: Any = None,
) -> AllBrainInfo: # type: ignore
self._reset_agent()
vector_obs = [self.goal] * OBS_SIZE
vector_obs_proto = ObservationProto(
float_data=ObservationProto.FloatData(data=vector_obs),
shape=[len(vector_obs)],
compression_type=COMPRESSION_TYPE_NONE,
)
agent_info = AgentInfoProto(
done=False, max_step_reached=False, observations=[vector_obs_proto]
)
return {
BRAIN_NAME: BrainInfo.from_agent_proto(
0, [agent_info], self._brains[BRAIN_NAME]
)
}
@property
def external_brains(self) -> Dict[str, BrainParameters]:
return self._brains
@property
def reset_parameters(self) -> Dict[str, str]:
return {}
def close(self):
pass
PPO_CONFIG = """
default:
trainer: ppo
batch_size: 16
beta: 5.0e-3
buffer_size: 64
epsilon: 0.2
hidden_units: 128
lambd: 0.95
learning_rate: 5.0e-3
max_steps: 2500
memory_size: 256
normalize: false
num_epoch: 3
num_layers: 2
time_horizon: 64
sequence_length: 64
summary_freq: 500
use_recurrent: false
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
"""
SAC_CONFIG = """
default:
trainer: sac
batch_size: 8
buffer_size: 500
buffer_init_steps: 100
hidden_units: 16
init_entcoef: 0.01
learning_rate: 5.0e-3
max_steps: 1000
memory_size: 256
normalize: false
num_update: 1
train_interval: 1
num_layers: 1
time_horizon: 64
sequence_length: 64
summary_freq: 500
tau: 0.005
use_recurrent: false
curiosity_enc_size: 128
demo_path: None
vis_encode_type: simple
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
"""
def _check_environment_trains(env, config):
# Create controller and begin training.
with tempfile.TemporaryDirectory() as dir:
run_id = "id"
save_freq = 99999
seed = 1337
trainer_config = yaml.safe_load(config)
env_manager = SimpleEnvManager(env, FloatPropertiesChannel())
trainer_factory = TrainerFactory(
trainer_config=trainer_config,
summaries_dir=dir,
run_id=run_id,
model_path=dir,
keep_checkpoints=1,
train_model=True,
load_model=False,
seed=seed,
meta_curriculum=None,
multi_gpu=False,
)
tc = TrainerController(
trainer_factory=trainer_factory,
summaries_dir=dir,
model_path=dir,
run_id=run_id,
meta_curriculum=None,
train=True,
training_seed=seed,
sampler_manager=SamplerManager(None),
resampling_interval=None,
save_freq=save_freq,
)
# Begin training
tc.start_learning(env_manager)
print(tc._get_measure_vals())
for brain_name, mean_reward in tc._get_measure_vals().items():
assert not math.isnan(mean_reward)
assert mean_reward > 0.99
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_ppo(use_discrete):
env = Simple1DEnvironment(use_discrete=use_discrete)
_check_environment_trains(env, PPO_CONFIG)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_sac(use_discrete):
env = Simple1DEnvironment(use_discrete=use_discrete)
_check_environment_trains(env, SAC_CONFIG)
| 29.41502 | 119 | 0.619323 | import math
import random
import tempfile
import pytest
import yaml
from typing import Any, Dict
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.trainer_util import TrainerFactory
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.brain import BrainInfo, AllBrainInfo, BrainParameters
from mlagents.envs.communicator_objects.agent_info_pb2 import AgentInfoProto
from mlagents.envs.communicator_objects.observation_pb2 import (
ObservationProto,
NONE as COMPRESSION_TYPE_NONE,
)
from mlagents.envs.simple_env_manager import SimpleEnvManager
from mlagents.envs.sampler_class import SamplerManager
from mlagents.envs.side_channel.float_properties_channel import FloatPropertiesChannel
BRAIN_NAME = __name__
OBS_SIZE = 1
STEP_SIZE = 0.1
TIME_PENALTY = 0.001
MIN_STEPS = int(1.0 / STEP_SIZE) + 1
SUCCESS_REWARD = 1.0 + MIN_STEPS * TIME_PENALTY
def clamp(x, min_val, max_val):
return max(min_val, min(x, max_val))
class Simple1DEnvironment(BaseUnityEnvironment):
def __init__(self, use_discrete):
super().__init__()
self.discrete = use_discrete
self._brains: Dict[str, BrainParameters] = {}
brain_params = BrainParameters(
brain_name=BRAIN_NAME,
vector_observation_space_size=OBS_SIZE,
camera_resolutions=[],
vector_action_space_size=[2] if use_discrete else [1],
vector_action_descriptions=["moveDirection"],
vector_action_space_type=0 if use_discrete else 1,
)
self._brains[BRAIN_NAME] = brain_params
self.position = 0.0
self.step_count = 0
self.random = random.Random(str(brain_params))
self.goal = self.random.choice([-1, 1])
def step(
self,
vector_action: Dict[str, Any] = None,
memory: Dict[str, Any] = None,
value: Dict[str, Any] = None,
) -> AllBrainInfo:
assert vector_action is not None
if self.discrete:
act = vector_action[BRAIN_NAME][0][0]
delta = 1 if act else -1
else:
delta = vector_action[BRAIN_NAME][0][0]
delta = clamp(delta, -STEP_SIZE, STEP_SIZE)
self.position += delta
self.position = clamp(self.position, -1, 1)
self.step_count += 1
done = self.position >= 1.0 or self.position <= -1.0
if done:
reward = SUCCESS_REWARD * self.position * self.goal
else:
reward = -TIME_PENALTY
vector_obs = [self.goal] * OBS_SIZE
vector_obs_proto = ObservationProto(
float_data=ObservationProto.FloatData(data=vector_obs),
shape=[len(vector_obs)],
compression_type=COMPRESSION_TYPE_NONE,
)
agent_info = AgentInfoProto(
reward=reward, done=bool(done), observations=[vector_obs_proto]
)
if done:
self._reset_agent()
return {
BRAIN_NAME: BrainInfo.from_agent_proto(
0, [agent_info], self._brains[BRAIN_NAME]
)
}
def _reset_agent(self):
self.position = 0.0
self.step_count = 0
self.goal = self.random.choice([-1, 1])
def reset(
self,
config: Dict[str, float] = None,
train_mode: bool = True,
custom_reset_parameters: Any = None,
) -> AllBrainInfo:
self._reset_agent()
vector_obs = [self.goal] * OBS_SIZE
vector_obs_proto = ObservationProto(
float_data=ObservationProto.FloatData(data=vector_obs),
shape=[len(vector_obs)],
compression_type=COMPRESSION_TYPE_NONE,
)
agent_info = AgentInfoProto(
done=False, max_step_reached=False, observations=[vector_obs_proto]
)
return {
BRAIN_NAME: BrainInfo.from_agent_proto(
0, [agent_info], self._brains[BRAIN_NAME]
)
}
@property
def external_brains(self) -> Dict[str, BrainParameters]:
return self._brains
@property
def reset_parameters(self) -> Dict[str, str]:
return {}
def close(self):
pass
PPO_CONFIG = """
default:
trainer: ppo
batch_size: 16
beta: 5.0e-3
buffer_size: 64
epsilon: 0.2
hidden_units: 128
lambd: 0.95
learning_rate: 5.0e-3
max_steps: 2500
memory_size: 256
normalize: false
num_epoch: 3
num_layers: 2
time_horizon: 64
sequence_length: 64
summary_freq: 500
use_recurrent: false
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
"""
SAC_CONFIG = """
default:
trainer: sac
batch_size: 8
buffer_size: 500
buffer_init_steps: 100
hidden_units: 16
init_entcoef: 0.01
learning_rate: 5.0e-3
max_steps: 1000
memory_size: 256
normalize: false
num_update: 1
train_interval: 1
num_layers: 1
time_horizon: 64
sequence_length: 64
summary_freq: 500
tau: 0.005
use_recurrent: false
curiosity_enc_size: 128
demo_path: None
vis_encode_type: simple
reward_signals:
extrinsic:
strength: 1.0
gamma: 0.99
"""
def _check_environment_trains(env, config):
with tempfile.TemporaryDirectory() as dir:
run_id = "id"
save_freq = 99999
seed = 1337
trainer_config = yaml.safe_load(config)
env_manager = SimpleEnvManager(env, FloatPropertiesChannel())
trainer_factory = TrainerFactory(
trainer_config=trainer_config,
summaries_dir=dir,
run_id=run_id,
model_path=dir,
keep_checkpoints=1,
train_model=True,
load_model=False,
seed=seed,
meta_curriculum=None,
multi_gpu=False,
)
tc = TrainerController(
trainer_factory=trainer_factory,
summaries_dir=dir,
model_path=dir,
run_id=run_id,
meta_curriculum=None,
train=True,
training_seed=seed,
sampler_manager=SamplerManager(None),
resampling_interval=None,
save_freq=save_freq,
)
tc.start_learning(env_manager)
print(tc._get_measure_vals())
for brain_name, mean_reward in tc._get_measure_vals().items():
assert not math.isnan(mean_reward)
assert mean_reward > 0.99
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_ppo(use_discrete):
env = Simple1DEnvironment(use_discrete=use_discrete)
_check_environment_trains(env, PPO_CONFIG)
@pytest.mark.parametrize("use_discrete", [True, False])
def test_simple_sac(use_discrete):
env = Simple1DEnvironment(use_discrete=use_discrete)
_check_environment_trains(env, SAC_CONFIG)
| true | true |
1c31e777172d3840499c8f68ec880d29b6f62b21 | 1,307 | py | Python | .github/scripts/build_assets/util.py | eldadfux/devicon | 6a98c2349c7f8f7eb2ac4547c1cb95b3120d0005 | [
"MIT"
] | null | null | null | .github/scripts/build_assets/util.py | eldadfux/devicon | 6a98c2349c7f8f7eb2ac4547c1cb95b3120d0005 | [
"MIT"
] | null | null | null | .github/scripts/build_assets/util.py | eldadfux/devicon | 6a98c2349c7f8f7eb2ac4547c1cb95b3120d0005 | [
"MIT"
] | null | null | null | from pathlib import Path
from argparse import ArgumentParser
from build_assets.PathResolverAction import PathResolverAction
def get_commandline_args():
parser = ArgumentParser(description="Upload svgs to Icomoon to create icon files.")
parser.add_argument("--headless",
help="Whether to run the browser in headless/no UI mode",
action="store_true")
parser.add_argument("geckodriver_path",
help="The path to the firefox executable file",
action=PathResolverAction)
parser.add_argument("icomoon_json_path",
help="The path to the icomoon.json aka the selection.json created by Icomoon",
action=PathResolverAction)
parser.add_argument("devicon_json_path",
help="The path to the devicon.json",
action=PathResolverAction)
parser.add_argument("icons_folder_path",
help="The path to the icons folder",
action=PathResolverAction)
parser.add_argument("download_path",
help="The path where you'd like to download the Icomoon files to",
action=PathResolverAction)
return parser.parse_args() | 39.606061 | 102 | 0.614384 | from pathlib import Path
from argparse import ArgumentParser
from build_assets.PathResolverAction import PathResolverAction
def get_commandline_args():
parser = ArgumentParser(description="Upload svgs to Icomoon to create icon files.")
parser.add_argument("--headless",
help="Whether to run the browser in headless/no UI mode",
action="store_true")
parser.add_argument("geckodriver_path",
help="The path to the firefox executable file",
action=PathResolverAction)
parser.add_argument("icomoon_json_path",
help="The path to the icomoon.json aka the selection.json created by Icomoon",
action=PathResolverAction)
parser.add_argument("devicon_json_path",
help="The path to the devicon.json",
action=PathResolverAction)
parser.add_argument("icons_folder_path",
help="The path to the icons folder",
action=PathResolverAction)
parser.add_argument("download_path",
help="The path where you'd like to download the Icomoon files to",
action=PathResolverAction)
return parser.parse_args() | true | true |
1c31e8071a530c20a30e496539b63456f9d6a8c9 | 833 | py | Python | computation_migration/distComputing/distComputing/urls.py | mengyingzhou/ipv6_firewall_computation_migration | 3fbc1f910e1fffdf2d5bb25eed631dffc6d7d842 | [
"MIT"
] | null | null | null | computation_migration/distComputing/distComputing/urls.py | mengyingzhou/ipv6_firewall_computation_migration | 3fbc1f910e1fffdf2d5bb25eed631dffc6d7d842 | [
"MIT"
] | null | null | null | computation_migration/distComputing/distComputing/urls.py | mengyingzhou/ipv6_firewall_computation_migration | 3fbc1f910e1fffdf2d5bb25eed631dffc6d7d842 | [
"MIT"
] | null | null | null | """distComputing URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('imgProc/', include('imgProc.urls')),
path('admin/', admin.site.urls),
]
| 36.217391 | 78 | 0.687875 | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('imgProc/', include('imgProc.urls')),
path('admin/', admin.site.urls),
]
| true | true |
1c31e879297ecc47e67cac0d04f8704694e52b60 | 3,478 | py | Python | examples/vn_trader/run.py | JonnyORZ/vnpy | c3bb624d95625412a2dd593326abf3833321d2e2 | [
"MIT"
] | 11 | 2019-11-18T06:07:16.000Z | 2020-10-12T11:36:21.000Z | examples/vn_trader/run.py | dovnekai/vnpy | 222475fdf97f77f60cec4ecee231f1b85f44df21 | [
"MIT"
] | 2 | 2019-07-17T09:39:34.000Z | 2019-10-19T16:21:55.000Z | examples/vn_trader/run.py | dovnekai/vnpy | 222475fdf97f77f60cec4ecee231f1b85f44df21 | [
"MIT"
] | 6 | 2019-10-30T14:52:21.000Z | 2021-01-11T05:41:17.000Z | # flake8: noqa
from vnpy.event import EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import MainWindow, create_qapp
# from vnpy.gateway.binance import BinanceGateway
from vnpy.gateway.bitmex import BitmexGateway
# from vnpy.gateway.futu import FutuGateway
# from vnpy.gateway.ib import IbGateway
# from vnpy.gateway.ctp import CtpGateway
# from vnpy.gateway.ctptest import CtptestGateway
# from vnpy.gateway.mini import MiniGateway
# from vnpy.gateway.sopt import SoptGateway
# from vnpy.gateway.minitest import MinitestGateway
# from vnpy.gateway.femas import FemasGateway
# from vnpy.gateway.tiger import TigerGateway
# from vnpy.gateway.oes import OesGateway
# from vnpy.gateway.okex import OkexGateway
# from vnpy.gateway.huobi import HuobiGateway
from vnpy.gateway.bitfinex import BitfinexGateway
# from vnpy.gateway.onetoken import OnetokenGateway
from vnpy.gateway.okexf import OkexfGateway
from vnpy.gateway.okexs import OkexsGateway
# from vnpy.gateway.xtp import XtpGateway
# from vnpy.gateway.hbdm import HbdmGateway
# from vnpy.gateway.tap import TapGateway
# from vnpy.gateway.tora import ToraGateway
# from vnpy.gateway.alpaca import AlpacaGateway
from vnpy.gateway.da import DaGateway
from vnpy.gateway.coinbase import CoinbaseGateway
from vnpy.app.cta_strategy import CtaStrategyApp
# from vnpy.app.csv_loader import CsvLoaderApp
# from vnpy.app.algo_trading import AlgoTradingApp
from vnpy.app.cta_backtester import CtaBacktesterApp
# from vnpy.app.data_recorder import DataRecorderApp
# from vnpy.app.risk_manager import RiskManagerApp
from vnpy.app.script_trader import ScriptTraderApp
from vnpy.app.rpc_service import RpcServiceApp
from vnpy.app.spread_trading import SpreadTradingApp
def main():
""""""
qapp = create_qapp()
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
# main_engine.add_gateway(BinanceGateway)
# main_engine.add_gateway(CtpGateway)
# main_engine.add_gateway(CtptestGateway)
# main_engine.add_gateway(MiniGateway)
# main_engine.add_gateway(SoptGateway)
# main_engine.add_gateway(MinitestGateway)
# main_engine.add_gateway(FemasGateway)
# main_engine.add_gateway(IbGateway)
# main_engine.add_gateway(FutuGateway)
main_engine.add_gateway(BitmexGateway)
# main_engine.add_gateway(TigerGateway)
# main_engine.add_gateway(OesGateway)
# main_engine.add_gateway(OkexGateway)
# main_engine.add_gateway(HuobiGateway)
main_engine.add_gateway(BitfinexGateway)
# main_engine.add_gateway(OnetokenGateway)
# main_engine.add_gateway(OkexfGateway)
# main_engine.add_gateway(HbdmGateway)
# main_engine.add_gateway(XtpGateway)
# main_engine.add_gateway(TapGateway)
# main_engine.add_gateway(ToraGateway)
# main_engine.add_gateway(AlpacaGateway)
# main_engine.add_gateway(OkexsGateway)
# main_engine.add_gateway(DaGateway)
main_engine.add_gateway(CoinbaseGateway)
main_engine.add_app(CtaStrategyApp)
main_engine.add_app(CtaBacktesterApp)
# main_engine.add_app(CsvLoaderApp)
# main_engine.add_app(AlgoTradingApp)
# main_engine.add_app(DataRecorderApp)
# main_engine.add_app(RiskManagerApp)
# main_engine.add_app(ScriptTraderApp)
# main_engine.add_app(RpcServiceApp)
main_engine.add_app(SpreadTradingApp)
main_window = MainWindow(main_engine, event_engine)
main_window.showMaximized()
qapp.exec()
if __name__ == "__main__":
main()
| 36.229167 | 55 | 0.797297 |
from vnpy.event import EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import MainWindow, create_qapp
from vnpy.gateway.bitmex import BitmexGateway
from vnpy.gateway.bitfinex import BitfinexGateway
from vnpy.gateway.okexf import OkexfGateway
from vnpy.gateway.okexs import OkexsGateway
from vnpy.gateway.da import DaGateway
from vnpy.gateway.coinbase import CoinbaseGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_backtester import CtaBacktesterApp
from vnpy.app.script_trader import ScriptTraderApp
from vnpy.app.rpc_service import RpcServiceApp
from vnpy.app.spread_trading import SpreadTradingApp
def main():
qapp = create_qapp()
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(BitmexGateway)
main_engine.add_gateway(BitfinexGateway)
main_engine.add_gateway(CoinbaseGateway)
main_engine.add_app(CtaStrategyApp)
main_engine.add_app(CtaBacktesterApp)
main_engine.add_app(SpreadTradingApp)
main_window = MainWindow(main_engine, event_engine)
main_window.showMaximized()
qapp.exec()
if __name__ == "__main__":
main()
| true | true |
1c31e8eafc6d1dc8ae142df00cc0a77b3b1bfb28 | 2,284 | py | Python | tests/test_ext_graphviz.py | merwok-forks/sphinx | b7cada236f765003a73ab5dca48f975d54c0c298 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_graphviz.py | merwok-forks/sphinx | b7cada236f765003a73ab5dca48f975d54c0c298 | [
"BSD-2-Clause"
] | null | null | null | tests/test_ext_graphviz.py | merwok-forks/sphinx | b7cada236f765003a73ab5dca48f975d54c0c298 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
test_ext_graphviz
~~~~~~~~~~~~~~~~~
Test sphinx.ext.graphviz extension.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
@pytest.mark.sphinx('html', testroot='ext-graphviz')
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_html(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = (r'<div class="figure" .*?>\s*<img .*?/>\s*<p class="caption">'
r'<span class="caption-text">caption of graph</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
html = 'Hello <img .*?/>\n graphviz world'
assert re.search(html, content, re.S)
html = '<img src=".*?" alt="digraph {\n bar -> baz\n}" />'
assert re.search(html, content, re.M)
html = (r'<div class="figure align-right" .*?>\s*<img .*?/>\s*<p class="caption">'
r'<span class="caption-text">on right</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('latex', testroot='ext-graphviz')
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_latex(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'SphinxTests.tex').text()
macro = ('\\\\begin{figure}\\[htbp\\]\n\\\\centering\n\\\\capstart\n\n'
'\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{caption of graph}\\\\label{.*}\\\\end{figure}')
assert re.search(macro, content, re.S)
macro = 'Hello \\\\includegraphics{graphviz-\\w+.pdf} graphviz world'
assert re.search(macro, content, re.S)
macro = ('\\\\begin{wrapfigure}{r}{0pt}\n\\\\centering\n'
'\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{on right}\\\\label{.*}\\\\end{wrapfigure}')
assert re.search(macro, content, re.S)
@pytest.mark.sphinx('html', testroot='ext-graphviz', confoverrides={'language': 'xx'})
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_i18n(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = '<img src=".*?" alt="digraph {\n BAR -> BAZ\n}" />'
assert re.search(html, content, re.M)
| 34.606061 | 86 | 0.613398 |
import re
import pytest
@pytest.mark.sphinx('html', testroot='ext-graphviz')
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_html(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = (r'<div class="figure" .*?>\s*<img .*?/>\s*<p class="caption">'
r'<span class="caption-text">caption of graph</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
html = 'Hello <img .*?/>\n graphviz world'
assert re.search(html, content, re.S)
html = '<img src=".*?" alt="digraph {\n bar -> baz\n}" />'
assert re.search(html, content, re.M)
html = (r'<div class="figure align-right" .*?>\s*<img .*?/>\s*<p class="caption">'
r'<span class="caption-text">on right</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
@pytest.mark.sphinx('latex', testroot='ext-graphviz')
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_latex(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'SphinxTests.tex').text()
macro = ('\\\\begin{figure}\\[htbp\\]\n\\\\centering\n\\\\capstart\n\n'
'\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{caption of graph}\\\\label{.*}\\\\end{figure}')
assert re.search(macro, content, re.S)
macro = 'Hello \\\\includegraphics{graphviz-\\w+.pdf} graphviz world'
assert re.search(macro, content, re.S)
macro = ('\\\\begin{wrapfigure}{r}{0pt}\n\\\\centering\n'
'\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{on right}\\\\label{.*}\\\\end{wrapfigure}')
assert re.search(macro, content, re.S)
@pytest.mark.sphinx('html', testroot='ext-graphviz', confoverrides={'language': 'xx'})
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_i18n(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
html = '<img src=".*?" alt="digraph {\n BAR -> BAZ\n}" />'
assert re.search(html, content, re.M)
| true | true |
1c31eaa2cf359f1c2f09be374a8e93c2bb06a001 | 41,569 | py | Python | turfpy/measurement.py | synapticarbors/turfpy | fc25954354f08c4d326aea6303314962ee97688c | [
"MIT"
] | null | null | null | turfpy/measurement.py | synapticarbors/turfpy | fc25954354f08c4d326aea6303314962ee97688c | [
"MIT"
] | null | null | null | turfpy/measurement.py | synapticarbors/turfpy | fc25954354f08c4d326aea6303314962ee97688c | [
"MIT"
] | null | null | null | """
This module implements some of the spatial analysis techniques and processes used to
understand the patterns and relationships of geographic features.
This is mainly inspired by turf.js.
link: http://turfjs.org/
"""
from math import asin, atan2, cos, degrees, log, pi, pow, radians, sin, sqrt, tan
from typing import Optional, Union
from geojson import (
Feature,
FeatureCollection,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from turfpy.helper import (
avg_earth_radius_km,
convert_length,
feature_of,
get_coord,
get_coords,
get_geom,
get_type,
length_to_radians,
radians_to_length,
)
from turfpy.meta import (
coord_each,
feature_each,
geom_each,
geom_reduce,
segment_each,
segment_reduce,
)
# ---------- Bearing -----------#
def bearing(start: Feature, end: Feature, final=False) -> float:
"""
Takes two Point and finds the geographic bearing between them.
:param start: A object of :class:`Point` to represent start point.
:param end: A object of :class:`Point` to represent end point.
:param final: A boolean calculates the final bearing if True.
:return: A float calculated bearing.
Example:
>>> from geojson import Point, Feature
>>> from turfpy import measurement
>>> start = Feature(geometry=Point((-75.343, 39.984)))
>>> end = Feature(geometry=Point((-75.534, 39.123)))
>>> measurement.bearing(start,end)
"""
if final:
return _calculate_final_bearing(start, end)
start_coordinates = start["geometry"]["coordinates"]
end_coordinates = end["geometry"]["coordinates"]
lon1 = radians(float(start_coordinates[0]))
lon2 = radians(float(end_coordinates[0]))
lat1 = radians(float(start_coordinates[1]))
lat2 = radians(float(end_coordinates[1]))
a = sin(lon2 - lon1) * cos(lat2)
b = (cos(lat1) * sin(lat2)) - (sin(lat1) * cos(lat2) * cos(lon2 - lon1))
return degrees(atan2(a, b))
def _calculate_final_bearing(start, end) -> float:
"""#TODO: Add description"""
bear = bearing(end, start)
bear = (bear + 180) % 360
return bear
# -------------------------------#
# ---------- Distance -----------#
def distance(point1: Feature, point2: Feature, units: str = "km"):
"""
Calculates distance between two Points. A point is containing latitude and
logitude in decimal degrees and ``unit`` is optional.
It calculates distance in units such as kilometers, meters, miles, feet and inches.
:param point1: first point; tuple of (latitude, longitude) in decimal degrees.
:param point2: second point; tuple of (latitude, longitude) in decimal degrees.
:param units: A string containing unit, E.g. kilometers = 'km', miles = 'mi',
meters = 'm', feet = 'ft', inches = 'in'.
:return: The distance between the two points in the requested unit, as a float.
Example:
>>> from turfpy import measurement
>>> from geojson import Point, Feature
>>> start = Feature(geometry=Point((-75.343, 39.984)))
>>> end = Feature(geometry=Point((-75.534, 39.123)))
>>> measurement.distance(start,end)
"""
coordinates1 = get_coord(point1)
coordinates2 = get_coord(point2)
dlat = radians((coordinates2[1] - coordinates1[1]))
dlon = radians((coordinates2[0] - coordinates1[0]))
lat1 = radians(coordinates1[1])
lat2 = radians(coordinates2[1])
a = pow(sin(dlat / 2), 2) + pow(sin(dlon / 2), 2) * cos(lat1) * cos(lat2)
b = 2 * atan2(sqrt(a), sqrt(1 - a))
return radians_to_length(b, units)
# -------------------------------#
# ----------- Area --------------#
def area(
geojson: Union[
Point,
LineString,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon,
Feature,
FeatureCollection,
]
):
"""
This function calculates the area of the Geojson object given as input.
:param geojson: Geojson object for which area is to be found.
:return: area for the given Geojson object in square meters.
Example:
>>> from turfpy.measurement import area
>>> from geojson import Feature, FeatureCollection
>>> geometry_1 = {"coordinates": [[[0, 0], [0, 10], [10, 10], [10, 0], [0, 0]]],"type": "Polygon"} # noqa E501
>>> geometry_2 = {"coordinates": [[[2.38, 57.322], [23.194, -20.28], [-120.43, 19.15],[2.38, 57.322]]], "type": "Polygon"} # noqa E501
>>> feature_1 = Feature(geometry=geometry_1)
>>> feature_2 = Feature(geometry=geometry_2)
>>> feature_collection = FeatureCollection([feature_1, feature_2])
>>> area(feature_collection)
"""
return geom_reduce(geojson, 0)
# -------------------------------#
# ----------- BBox --------------#
def bbox(geojson):
"""
This function is used to generate bounding box coordinates for given geojson.
:param geojson: Geojson object for which bounding box is to be found.
:return: bounding box for the given Geojson object.
Example :
>>> from turfpy.measurement import bbox
>>> from geojson import Polygon
>>> p = Polygon([(2.38, 57.322), (23.194, -20.28), (-120.43, 19.15),(2.38, 57.322)])
>>> bb = bbox(p)
"""
result = [float("inf"), float("inf"), float("-inf"), float("-inf")]
def _callback_coord_each(
coord, coord_index, feature_index, multi_feature_index, geometry_index
):
nonlocal result
if result[0] > coord[0]:
result[0] = coord[0]
if result[1] > coord[1]:
result[1] = coord[1]
if result[2] < coord[0]:
result[2] = coord[0]
if result[3] < coord[1]:
result[3] = coord[1]
coord_each(geojson, _callback_coord_each)
return result
# -------------------------------#
# ----------- BBoxPolygon --------------#
def bbox_polygon(bbox: list, properties: dict = {}) -> Feature:
"""
To generate a Polygon Feature for the bounding box generated using bbox.
:param bbox: bounding box generated for a geojson.
:param properties: properties to be added to the returned feature.
:return: polygon for the given bounding box coordinates.
Example :
>>> from turfpy.measurement import bbox_polygon, bbox
>>> from geojson import Polygon
>>> p = Polygon([((2.38, 57.322), (23.194, -20.28), (-120.43, 19.15),
... (2.38, 57.322))])
>>> bb = bbox(p)
>>> feature = bbox_polygon(bb)
"""
west = float(bbox[0])
south = float(bbox[1])
east = float(bbox[2])
north = float(bbox[3])
if len(bbox) == 6:
raise Exception("bbox-polygon does not support BBox with 6 positions")
low_left = (west, south)
top_left = (west, north)
top_right = (east, north)
low_right = (east, south)
bbox_polygon = Polygon([(low_left, low_right, top_right, top_left, low_left)])
feature_bbox = Feature(geometry=bbox_polygon)
if "properties" in properties:
feature_bbox.properties = properties["properties"]
elif "properties" not in properties:
feature_bbox.properties = {}
if "id" in properties:
feature_bbox.id = properties["id"]
if "bbox" in properties:
feature_bbox.bbox = properties["bbox"]
return feature_bbox
# -------------------------------#
# ----------- Center --------------#
def center(geojson, properties: Optional[dict] = None) -> Feature:
"""
Takes a Feature or FeatureCollection and returns the absolute center point of all
features.
:param geojson: GeoJSON for which centered to be calculated.
:param properties: Optional parameters to be set to the generated feature.
:return: Point feature for the center.
Example :
>>> from turfpy.measurement import center
>>> from geojson import Feature, FeatureCollection, Point
>>> f1 = Feature(geometry=Point((-97.522259, 35.4691)))
>>> f2 = Feature(geometry=Point((-97.502754, 35.463455)))
>>> f3 = Feature(geometry=Point((-97.508269, 35.463245)))
>>> feature_collection = FeatureCollection([f1, f2, f3])
>>> feature = center(feature_collection)
"""
bounding_box = bbox(geojson)
x = (bounding_box[0] + bounding_box[2]) / 2
y = (bounding_box[1] + bounding_box[3]) / 2
point = Point((x, y))
center_feature = Feature(geometry=point)
if properties is None:
properties = dict()
if "properties" in properties:
center_feature.properties = properties["properties"]
elif "properties" not in properties:
center_feature.properties = {}
if "id" in properties:
center_feature.id = properties["id"]
if "bbox" in properties:
center_feature.bbox = properties["bbox"]
return center_feature
# -------------------------------#
# ----------- Envelope --------------#
def envelope(geojson) -> Feature:
"""
Takes any number of features and returns a rectangular Polygon that encompasses all
vertices.
:param geojson: geojson input features for which envelope to be generated.
:return: returns envelope i.e bounding box polygon.
Example :
>>> from turfpy.measurement import envelope
>>> from geojson import Feature, FeatureCollection, Point
>>> f1 = Feature(geometry=Point((-97.522259, 35.4691)))
>>> f2 = Feature(geometry=Point((-97.502754, 35.463455)))
>>> f3 = Feature(geometry=Point((-97.508269, 35.463245)))
>>> feature_collection = FeatureCollection([f1, f2, f3])
>>> feature = envelope(feature_collection)
"""
return bbox_polygon(bbox(geojson))
# -------------------------------#
# ----------- Length --------------#
def length(geojson, units: str = "km"):
"""
Takes a geojson and measures its length in the specified units.
:param geojson: geojson for which the length is to be determined.
:param units: units in which length is to be returned.
:return: length of the geojson in specified units.
Example:
>>> from turfpy.measurement import length
>>> from geojson import LineString
>>> ls = LineString([(115, -32), (131, -22), (143, -25), (150, -34)])
>>> length(ls)
"""
def _callback_segment_reduce(previous_value, segment):
coords = segment["geometry"]["coordinates"]
return previous_value + distance(
Feature(geometry=Point(coords[0])), Feature(geometry=Point(coords[1])), units
)
return segment_reduce(geojson, _callback_segment_reduce, 0)
# -------------------------------#
# ----------- Destination --------------#
def destination(origin: Feature, distance, bearing, options: dict = {}) -> Feature:
"""
Takes a Point and calculates the location of a destination point given a distance in
degrees, radians, miles, or kilometers and bearing in degrees.
:param origin: Start point.
:param distance: distance upto which the destination is from origin.
:param bearing: Direction in which is the destination is from origin.
:param options: Option like units of distance and properties to be passed to
destination point feature, value
for units are 'mi', 'km', 'deg' and 'rad'.
:return: Feature: destination point in at the given distance and given direction.
Example:
>>> from turfpy.measurement import destination
>>> from geojson import Point, Feature
>>> origin = Feature(geometry=Point([-75.343, 39.984]))
>>> distance = 50
>>> bearing = 90
>>> options = {'units': 'mi'}
>>> destination(origin,distance,bearing,options)
"""
coordinates1 = origin["geometry"]["coordinates"]
longitude1 = radians(float(coordinates1[0]))
latitude1 = radians(float(coordinates1[1]))
bearingRad = radians(float(bearing))
if "units" in options:
radian = length_to_radians(distance, options["units"])
else:
radian = length_to_radians(distance)
latitude2 = asin(
(sin(latitude1) * cos(radian)) + (cos(latitude1) * sin(radian) * cos(bearingRad))
)
longitude2 = longitude1 + atan2(
sin(bearingRad) * sin(radian) * cos(latitude1),
cos(radian) - sin(latitude1) * sin(latitude2),
)
lng = degrees(longitude2)
lat = degrees(latitude2)
point = Point((lng, lat))
return Feature(
geometry=point,
properties=options["properties"] if "properties" in options else {},
)
# -------------------------------#
# ----------- Centroid --------------#
def centroid(geojson, properties: dict = None) -> Feature:
"""
Takes one or more features and calculates the centroid using the mean of all vertices.
:param geojson: Input features
:param properties: Properties to be set to the output Feature point
:return: Feature: Point feature which is the centroid of the given features
Example:
>>> from turfpy.measurement import centroid
>>> from geojson import Polygon
>>> polygon = Polygon([((-81, 41), (-88, 36), (-84, 31), (-80, 33), (-77, 39),
(-81, 41))])
>>> centroid(polygon)
"""
x_sum = 0
y_sum = 0
length = 0
def _callback_coord_each(
coord, coord_index, feature_index, multi_feature_index, geometry_index
):
nonlocal x_sum, y_sum, length
x_sum += coord[0]
y_sum += coord[1]
length += 1
coord_each(geojson, _callback_coord_each)
point = Point((x_sum / length, y_sum / length))
return Feature(geometry=point, properties=properties if properties else {})
# -------------------------------#
# ----------- Along --------------#
def along(line: Feature, dist, unit: str = "km") -> Feature:
"""
This function is used identify a Point at a specified distance along a LineString.
:param line: LineString on which the point to be identified
:param dist: Distance from the start of the LineString
:param unit: unit of distance
:return: Feature : Point at the distance on the LineString passed
Example :
>>> from turfpy.measurement import along
>>> from geojson import LineString, Feature
>>> ls = Feature(geometry=LineString([(-83, 30), (-84, 36), (-78, 41)]))
>>> along(ls,200,'mi')
"""
if line["type"] == "Feature":
geom = line["geometry"]
else:
geom = line
coords = geom["coordinates"]
travelled = 0
options = {"units": unit}
for i in range(0, len(coords)):
if dist >= travelled and i == (len(coords) - 1):
break
elif travelled >= dist:
overshot = dist - travelled
if not overshot:
return Feature(geometry=Point(coords[i]))
else:
direction = (
bearing(
Feature(geometry=Point(coords[i])),
Feature(geometry=Point(coords[i - 1])),
)
- 180
)
interpolated = destination(
Feature(geometry=Point(coords[i])), overshot, direction, options
)
return interpolated
else:
travelled += distance(
Feature(geometry=Point(coords[i])),
Feature(geometry=Point(coords[i + 1])),
unit,
)
point = Point(coords[len(coords) - 1])
return Feature(geometry=point)
# -------------------------------#
# ----------- Midpoint --------------#
def midpoint(point1: Feature, point2: Feature) -> Feature:
"""
This function is used to get midpoint between any the two points.
:param point1: First point.
:param point2: Second point.
:return: Feature: Point which is the midpoint of the two points given as input.
Example:
>>> from turfpy.measurement import midpoint
>>> from geojson import Point, Feature
>>> point1 = Feature(geometry=Point((144.834823, -37.771257)))
>>> point2 = Feature(geometry=Point((145.14244, -37.830937)))
>>> midpoint(point1, point2)
"""
dist = distance(point1, point2)
heading = bearing(point1, point2)
midpoint = destination(point1, dist / 2, heading)
return midpoint
# -------------------------------#
# ----------- nearest point --------------#
def nearest_point(target_point: Feature, points: FeatureCollection) -> Feature:
"""
Takes a reference Point Feature and FeatureCollection of point features and returns
the point from the FeatureCollection closest to the reference Point Feature.
:param target_point: Feature Point of reference.
:param points: FeatureCollection of points.
:return: a Point Feature from the FeatureCollection which is closest to the reference
Point.
Example:
>>> from turfpy.measurement import nearest_point
>>> from geojson import Point, Feature, FeatureCollection
>>> f1 = Feature(geometry=Point((28.96991729736328,41.01190001748873)))
>>> f2 = Feature(geometry=Point((28.948459, 41.024204)))
>>> f3 = Feature(geometry=Point((28.938674, 41.013324)))
>>> fc = FeatureCollection([f1, f2 ,f3])
>>> t = Feature(geometry=Point((28.973865, 41.011122)))
>>> nearest_point(t ,fc)
"""
if not target_point:
raise Exception("target_point is required")
if not points:
raise Exception("points is required")
min_dist = float("inf")
best_feature_index = 0
def _callback_feature_each(pt, feature_index):
nonlocal min_dist, best_feature_index
distance_to_point = distance(target_point, pt)
if float(distance_to_point) < min_dist:
best_feature_index = feature_index
min_dist = distance_to_point
feature_each(points, _callback_feature_each)
nearest = points["features"][best_feature_index]
nearest["properties"]["featureIndex"] = best_feature_index
nearest["properties"]["distanceToPoint"] = min_dist
return nearest
# -------------------------------#
# ----------- point on feature --------------#
def point_on_feature(geojson) -> Feature:
"""
Takes a Feature or FeatureCollection and returns a Point guaranteed to be on the
surface of the feature.
:param geojson: Feature or FeatureCollection on which the Point is to be found.
:return: Feature point which on the provided feature.
Example:
>>> from turfpy.measurement import point_on_feature
>>> from geojson import Polygon, Feature
>>> point = Polygon([((116, -36), (131, -32), (146, -43), (155, -25), (133, -9),
(111, -22), (116, -36))])
>>> feature = Feature(geometry=point)
>>> point_on_feature(feature)
"""
fc = _normalize(geojson)
cent = centroid(fc)
on_surface = False
i = 0
while not on_surface and i < len(fc["features"]):
on_line = False
geom = fc["features"][i]["geometry"]
if geom["type"] == "Point":
if (
cent["geometry"]["coordinates"][0] == geom["coordinates"][0]
and cent["geometry"]["coordinates"][1] == geom["coordinates"][1]
):
on_surface = True
elif geom["type"] == "MultiPoint":
on_multi_point = False
k = 0
while not on_multi_point and k < len(geom["coordinates"]):
if (
cent["geometry"]["coordinates"][0] == geom["coordinates"][k][0]
and cent["geometry"]["coordinates"][1] == geom["coordinates"][k][1]
):
on_surface = True
on_multi_point = True
k += 1
elif geom["type"] == "LineString":
k = 0
while not on_line and k < len(geom["coordinates"]) - 1:
x = cent["geometry"]["coordinates"][0]
y = cent["geometry"]["coordinates"][1]
x1 = geom["coordinates"][k][0]
y1 = geom["coordinates"][k][1]
x2 = geom["coordinates"][k + 1][0]
y2 = geom["coordinates"][k + 1][1]
if _point_on_segment(x, y, x1, y1, x2, y2):
on_line = True
on_surface = True
k += 1
elif geom["type"] == "MultiLineString":
j = 0
while j < len(geom["coordinates"]):
on_line = False
k = 0
line = geom["coordinates"][j]
while not on_line and k < len(line) - 1:
x = cent["geometry"]["coordinates"][0]
y = cent["geometry"]["coordinates"][1]
x1 = line[k][0]
y1 = line[k][1]
x2 = line[k + 1][0]
y2 = line[k + 1][1]
if _point_on_segment(x, y, x1, y1, x2, y2):
on_line = True
on_surface = True
k += 1
j += 1
elif geom["type"] == "Polygon" or geom["type"] == "MultiPolygon":
if boolean_point_in_polygon(cent, geom):
on_surface = True
i += 1
if on_surface:
return cent
else:
vertices_list = []
for i in range(0, len(fc["features"])):
vertices_list.extend(explode(fc["features"][i])["features"])
vertices = FeatureCollection(vertices_list)
point = Point(nearest_point(cent, vertices)["geometry"]["coordinates"])
return Feature(geometry=point)
def _normalize(geojson):
if geojson["type"] != "FeatureCollection":
if geojson["type"] != "Feature":
return FeatureCollection([Feature(geometry=geojson)])
return FeatureCollection([geojson])
return geojson
def _point_on_segment(x, y, x1, y1, x2, y2):
ab = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))
ap = sqrt((x - x1) * (x - x1) + (y - y1) * (y - y1))
pb = sqrt((x2 - x) * (x2 - x) + (y2 - y) * (y2 - y))
return ab == (ap + pb)
# -------------------------------#
# ------------ boolean point in polygon ----------------#
def boolean_point_in_polygon(point, polygon, ignore_boundary=False):
"""
Takes a Point or a Point Feature and Polygon or Polygon Feature as input and returns
True if Point is in given Feature.
:param point: Point or Point Feature.
:param polygon: Polygon or Polygon Feature.
:param ignore_boundary: [Optional] default value is False, specify whether to exclude
boundary of the given polygon or not.
:return: True if the given Point is in Polygons else False
Example:
>>> from turfpy.measurement import boolean_point_in_polygon
>>> from geojson import Point, MultiPolygon, Feature
>>> point = Feature(geometry=Point((-77, 44)))
>>> polygon = Feature(geometry=MultiPolygon([([(-81, 41), (-81, 47), (-72, 47),
(-72, 41), (-81, 41)],),
>>> ([(3.78, 9.28), (-130.91, 1.52), (35.12, 72.234), (3.78, 9.28)],)]))
>>> boolean_point_in_polygon(point, polygon)
"""
if not point:
raise Exception("point is required")
if not polygon:
raise Exception("polygon is required")
pt = get_coord(point)
geom = get_geom(polygon)
geo_type = geom["type"]
bbox = polygon.get("bbox", None)
polys = geom["coordinates"]
if bbox and not in_bbox(pt, bbox):
return False
if geo_type == "Polygon":
polys = [polys]
inside_poly = False
for i in range(0, len(polys)):
if in_ring(pt, polys[i][0], ignore_boundary):
in_hole = False
k = 1
while k < len(polys[i]) and not in_hole:
if in_ring(pt, polys[i][k], not ignore_boundary):
in_hole = True
k += 1
if not in_hole:
inside_poly = True
return inside_poly
def in_ring(pt, ring, ignore_boundary):
is_inside = False
if ring[0][0] == ring[len(ring) - 1][0] and ring[0][1] == ring[len(ring) - 1][1]:
ring = ring[0 : len(ring) - 1]
j = len(ring) - 1
for i in range(0, len(ring)):
xi = ring[i][0]
yi = ring[i][1]
xj = ring[j][0]
yj = ring[j][1]
on_boundary = (
(pt[1] * (xi - xj) + yi * (xj - pt[0]) + yj * (pt[0] - xi) == 0)
and ((xi - pt[0]) * (xj - pt[0]) <= 0)
and ((yi - pt[1]) * (yj - pt[1]) <= 0)
)
if on_boundary:
return not ignore_boundary
intersect = ((yi > pt[1]) != (yj > pt[1])) and (
pt[0] < (xj - xi) * (pt[1] - yi) / (yj - yi) + xi
)
if intersect:
is_inside = not is_inside
j = i
return is_inside
def in_bbox(pt, bbox):
return bbox[0] <= pt[0] <= bbox[2] and bbox[1] <= pt[1] <= bbox[3]
# -------------------------------#
# ------------ Explode -----------#
def explode(geojson):
points = []
if geojson["type"] == "FeatureCollection":
def _callback_feature_each(feature, feature_index):
def _callback_coord_each(
coord,
coord_index,
feature_index,
multi_feature_index,
geometry_index,
):
nonlocal points
point = Point(coord)
points.append(Feature(geometry=point, properties=feature["properties"]))
coord_each(feature, _callback_coord_each)
feature_each(geojson, _callback_feature_each)
else:
def _callback_coord_each(
coord,
coord_index,
feature_index,
multi_feature_index,
geometry_index,
):
nonlocal points, geojson
point = Point(coord)
points.append(Feature(geometry=point, properties=geojson["properties"]))
coord_each(geojson, _callback_coord_each)
return FeatureCollection(points)
# -------------------------------#
# ------------ polygon tangents -----------#
def polygon_tangents(point, polygon):
"""
Finds the tangents of a (Multi)Polygon from a Point.
:param point: Point or Point Feature.
:param polygon: (Multi)Polygon or (Multi)Polygon Feature.
:return: FeatureCollection of two tangent Point Feature.
Example:
>>> from turfpy.measurement import polygon_tangents
>>> from geojson import Polygon, Point, Feature
>>> point = Feature(geometry=Point((61, 5)))
>>> polygon = Feature(geometry=Polygon([(11, 0), (22, 4), (31, 0), (31, 11),
... (21, 15), (11, 11), (11, 0)]))
>>> polygon_tangents(point, polygon)
"""
point_coords = get_coords(point)
poly_coords = get_coords(polygon)
enext = 0
bbox_points = bbox(polygon)
nearest_pt_index = 0
nearest = None
if (
bbox_points[0] < point_coords[0] < bbox_points[2]
and bbox_points[1] < point_coords[1] < bbox_points[3]
):
nearest = nearest_point(point, explode(polygon))
nearest_pt_index = nearest.properties.featureIndex
geo_type = get_type(polygon)
if geo_type == "Polygon":
rtan = poly_coords[0][nearest_pt_index]
ltan = poly_coords[0][0]
if nearest:
if nearest["geometry"]["coordinates"][1] < point_coords[1]:
ltan = poly_coords[0][nearest_pt_index]
eprev = _is_left(
poly_coords[0][0],
poly_coords[0][len(poly_coords[0]) - 1],
point_coords,
)
out = process_polygon(poly_coords[0], point_coords, eprev, enext, rtan, ltan)
rtan = out[0]
ltan = out[1]
elif geo_type == "MultiPolygon":
closest_feature = 0
closest_vertex = 0
vertices_counted = 0
for i in range(0, len(poly_coords[0])):
closest_feature = i
vertice_found = False
for i2 in range(0, len(poly_coords[0][i])):
closest_vertex = i2
if vertices_counted == nearest_pt_index:
vertice_found = True
break
vertices_counted += 1
if vertice_found:
break
rtan = poly_coords[0][closest_feature][closest_vertex]
ltan = poly_coords[0][closest_feature][closest_vertex]
eprev = _is_left(
poly_coords[0][0][0],
poly_coords[0][0][len(poly_coords[0][0]) - 1],
point_coords,
)
for ring in poly_coords:
out = process_polygon(ring[0], point_coords, eprev, enext, rtan, ltan)
rtan = out[0]
ltan = out[1]
return FeatureCollection(
[Feature(geometry=Point(rtan)), Feature(geometry=Point(ltan))]
)
def process_polygon(polygon_coords, pt_coords, eprev, enext, rtan, ltan):
for i in range(0, len(polygon_coords)):
current_coords = polygon_coords[i]
if i == (len(polygon_coords) - 1):
next_coord_pair = polygon_coords[0]
else:
next_coord_pair = polygon_coords[i + 1]
enext = _is_left(current_coords, next_coord_pair, pt_coords)
if eprev <= 0 and enext > 0:
if not _is_below(pt_coords, current_coords, rtan):
rtan = current_coords
elif eprev > 0 and enext <= 0:
if not _is_above(pt_coords, current_coords, ltan):
ltan = current_coords
eprev = enext
return [rtan, ltan]
def _is_above(point1, point2, point3):
return _is_left(point1, point2, point3) > 0
def _is_below(point1, point2, point3):
return _is_left(point1, point2, point3) < 0
def _is_left(point1, point2, point3):
return (point2[0] - point1[0]) * (point3[1] - point1[1]) - (point3[0] - point1[0]) * (
point2[1] - point1[1]
)
# -------------------------------#
# ------------ point to line distance -----------#
def point_to_line_distance(point: Feature, line: Feature, units="km", method="geodesic"):
"""
Returns the minimum distance between a Point and any segment of the LineString.
:param point: Point Feature from which distance to be measured.
:param line: Point LineString from which distance to be measured.
:param units: units for distance 'km', 'm', 'mi, 'ft', 'in', 'deg', 'cen', 'rad',
'naut', 'yd'
:param method: Method which is used to calculate, values can be 'geodesic' or 'planar'
:return: Approximate distance between the LineString and Point
Example:
>>> from turfpy.measurement import point_to_line_distance
>>> from geojson import LineString, Point, Feature
>>> point = Feature(geometry=Point((0, 0)))
>>> linestring = Feature(geometry=LineString([(1, 1),(-1, 1)]))
>>> point_to_line_distance(point, linestring)
"""
if method != "geodesic" and method != "planar":
raise Exception("method name is incorrect ot should be either geodesic or planar")
options = {"units": units, "method": method}
if not point:
raise Exception("pt is required")
if isinstance(point, list):
point = Feature(geometry=Point(point))
elif point["type"] == "Point":
point = Feature(point)
else:
feature_of(point, "Point", "point")
if not line:
raise Exception("line is required")
if isinstance(point, list):
line = Feature(geometry=LineString(line))
elif line["type"] == "LineString":
line = Feature(geometry=line)
else:
feature_of(line, "LineString", "line")
distance = float("inf")
p = point["geometry"]["coordinates"]
def _callback_segment_each(
current_segment,
feature_index,
multi_feature_index,
geometry_index,
segment_index,
):
nonlocal options, distance
a = current_segment["geometry"]["coordinates"][0]
b = current_segment["geometry"]["coordinates"][1]
d = distance_to_segment(p, a, b, options)
if d < distance:
distance = d
segment_each(line, _callback_segment_each)
return convert_length(distance, "deg", options.get("units", ""))
def distance_to_segment(p, a, b, options):
v = [b[0] - a[0], b[1] - a[1]]
w = [p[0] - a[0], p[1] - a[1]]
c1 = _dot(w, v)
if c1 <= 0:
return _calc_distance(p, a, {"method": options.get("method", ""), "units": "deg"})
c2 = _dot(v, v)
if c2 <= c1:
return _calc_distance(p, b, {"method": options.get("method", ""), "units": "deg"})
b2 = c1 / c2
Pb = [a[0] + (b2 * v[0]), a[1] + (b2 * v[1])]
return _calc_distance(p, Pb, {"method": options.get("method", ""), "units": "deg"})
def _calc_distance(a, b, options):
if options.get("method", "") == "planar":
return rhumb_distance(a, b, options.get("units", ""))
else:
return distance(
Feature(geometry=Point(a)),
Feature(geometry=Point(b)),
options.get("units", ""),
)
def _dot(u, v):
return u[0] * v[0] + u[1] * v[1]
# -------------------------------#
# ------------ rhumb bearing -----------#
def rhumb_bearing(start, end, final=False):
"""
Takes two points and finds the bearing angle between them along a Rhumb line,
i.e. the angle measured in degrees start the north line (0 degrees).
:param start: Start Point or Point Feature.
:param end: End Point or Point Feature.
:param final: Calculates the final bearing if true
:return: bearing from north in decimal degrees, between -180 and 180 degrees
(positive clockwise)
Example:
>>> from turfpy.measurement import rhumb_bearing
>>> from geojson import Feature, Point
>>> start = Feature(geometry=Point((-75.343, 39.984)))
>>> end = Feature(geometry=Point((-75.534, 39.123)))
>>> rhumb_bearing(start, end, True)
"""
if final:
bear_360 = calculate_rhumb_bearing(get_coord(end), get_coord(start))
else:
bear_360 = calculate_rhumb_bearing(get_coord(start), get_coord(end))
if bear_360 > 180:
bear_180 = -1 * (360 - bear_360)
else:
bear_180 = bear_360
return bear_180
def calculate_rhumb_bearing(fro, to):
"""#TODO: Add description"""
phi1 = radians(fro[1])
phi2 = radians(to[1])
delta_lambda = radians(to[0] - fro[0])
if delta_lambda > pi:
delta_lambda -= 2 * pi
if delta_lambda < -1 * pi:
delta_lambda += 2 * pi
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
theta = atan2(delta_lambda, delta_psi)
return (degrees(theta) + 360) % 360
# -------------------------------#
# ------------ rhumb destination -----------#
def rhumb_destination(origin, distance, bearing, options: dict = {}) -> Feature:
"""
Returns the destination Point having travelled the given distance along a Rhumb line
from the origin Point with the (varant) given bearing.
:param origin: Starting Point
:param distance: Distance from the starting point
:param bearing: Varant bearing angle ranging from -180 to 180 degrees from north
:param options: A dict of two values 'units' for the units of distance provided and
'properties' that are to be passed to the Destination Feature Point
Example :- {'units':'mi', 'properties': {"marker-color": "F00"}}
:return: Destination Feature Point
Example:
>>> from turfpy.measurement import rhumb_destination
>>> from geojson import Point, Feature
>>> start = Feature(geometry=Point((-75.343, 39.984)),
properties={"marker-color": "F00"})
>>> distance = 50
>>> bearing = 90
>>> rhumb_destination(start, distance, bearing, {'units':'mi',
'properties': {"marker-color": "F00"}})
"""
was_negative_distance = distance < 0
distance_in_meters = convert_length(abs(distance), options.get("units", "km"), "m")
if was_negative_distance:
distance_in_meters = -1 * (abs(distance_in_meters))
coords = get_coord(origin)
destination_point = _calculate_rhumb_destination(coords, distance_in_meters, bearing)
return Feature(
geometry=Point(destination_point),
properties=options.get("properties", ""),
)
def _calculate_rhumb_destination(origin, distance, bearing, radius=None):
if not radius:
radius = avg_earth_radius_km
delta = distance / radius
lambda1 = origin[0] * pi / 180
phi1 = radians(origin[1])
theta = radians(bearing)
delta_phi = delta * cos(theta)
phi2 = phi1 + delta_phi
if abs(phi2) > pi / 2:
if phi2 > 0:
phi2 = pi - phi2
else:
phi2 = -1 * pi - phi2
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
if abs(delta_psi) > 10e-12:
q = delta_phi / delta_psi
else:
q = cos(phi1)
delta_lambda = delta * sin(theta) / q
lambda2 = lambda1 + delta_lambda
return [((lambda2 * 180 / pi) + 540) % 360 - 180, phi2 * 180 / pi]
# -------------------------------#
# ------------ rhumb distance -----------#
def rhumb_distance(start, to, units="km"):
"""
Calculates the distance along a rhumb line between two points in degrees, radians,
miles, or kilometers.
:param start: Start Point or Point Feature from which distance to be calculated.
:param to: End Point or Point Feature upto which distance to be calculated.
:param units: Units in which distance to be calculated, values can be 'deg', 'rad',
'mi', 'km'
:return: Distance calculated from provided start to end Point.
Example:
>>> from turfpy.measurement import rhumb_distance
>>> from geojson import Point, Feature
>>> start = Feature(geometry=Point((-75.343, 39.984)))
>>> end = Feature(geometry=Point((-75.534, 39.123)))
>>> rhumb_distance(start, end,'mi')
"""
origin = get_coord(start)
dest = get_coord(to)
if dest[0] - origin[0] > 180:
temp = -360
elif origin[0] - dest[0] > 180:
temp = 360
else:
temp = 0
dest[0] += temp
distance_in_meters = _calculate_rhumb_distance(origin, dest)
ru_distance = convert_length(distance_in_meters, "m", units)
return ru_distance
def _calculate_rhumb_distance(origin, destination_point, radius=None):
if not radius:
radius = avg_earth_radius_km
phi1 = origin[1] * pi / 180
phi2 = destination_point[1] * pi / 180
delta_phi = phi2 - phi1
delta_lambda = abs(destination_point[0] - origin[0]) * pi / 180
if delta_lambda > pi:
delta_lambda -= 2 * pi
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
if abs(delta_psi) > 10e-12:
q = delta_phi / delta_psi
else:
q = cos(phi1)
delta = sqrt(delta_phi * delta_phi + q * q * delta_lambda * delta_lambda)
dist = delta * radius
return dist
# -------------------------------#
# ------------ square -----------#
def square(bbox: list):
"""
Takes a bounding box and calculates the minimum square bounding box that would contain
the input.
:param bbox: Bounding box extent in west, south, east, north order
:return: A square surrounding bbox
Example:
>>> from turfpy.measurement import square
>>> bbox = [-20, -20, -15, 0]
>>> square(bbox)
"""
west = bbox[0]
south = bbox[1]
east = bbox[2]
north = bbox[3]
horizontal_distance = distance(
Feature(geometry=Point(bbox[0:2])), Feature(geometry=Point((east, south)))
)
vertical_distance = distance(
Feature(geometry=Point(bbox[0:2])), Feature(geometry=Point((west, north)))
)
if horizontal_distance >= vertical_distance:
vertical_midpoint = (south + north) / 2
return [
west,
vertical_midpoint - ((east - west) / 2),
east,
vertical_midpoint + ((east - west) / 2),
]
else:
horizontal_midpoint = (west + east) / 2
return [
horizontal_midpoint - ((north - south) / 2),
south,
horizontal_midpoint + ((north - south) / 2),
north,
]
# -------------------------------#
def points_within_polygon(
points: Union[Feature, FeatureCollection], polygons: Union[Feature, FeatureCollection]
) -> FeatureCollection:
"""Find Point(s) that fall within (Multi)Polygon(s).
This function takes two inputs GeoJSON Feature :class:`geojson.Point` or
:class:`geojson.FeatureCollection` of Points and GeoJSON Feature
:class:`geojson.Polygon` or Feature :class:`geojson.MultiPolygon` or
FeatureCollection of :class:`geojson.Polygon` or Feature
:class:`geojson.MultiPolygon`. and returns all points with in in those
Polygon(s) or (Multi)Polygon(s).
:param points: A single GeoJSON ``Point`` feature or FeatureCollection of Points.
:param polygons: A Single GeoJSON Polygon/MultiPolygon or FeatureCollection of
Polygons/MultiPolygons.
:return: A :class:`geojson.FeatureCollection` of Points.
"""
results = []
def __callback_feature_each(feature, feature_index):
contained = False
def __callback_geom_each(
current_geometry, feature_index, feature_properties, feature_bbox, feature_id
):
if boolean_point_in_polygon(feature, current_geometry):
nonlocal contained
contained = True
if contained:
nonlocal results
results.append(feature)
geom_each(polygons, __callback_geom_each)
return True
feature_each(points, __callback_feature_each)
return FeatureCollection(results)
| 31.161169 | 139 | 0.587722 | from math import asin, atan2, cos, degrees, log, pi, pow, radians, sin, sqrt, tan
from typing import Optional, Union
from geojson import (
Feature,
FeatureCollection,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from turfpy.helper import (
avg_earth_radius_km,
convert_length,
feature_of,
get_coord,
get_coords,
get_geom,
get_type,
length_to_radians,
radians_to_length,
)
from turfpy.meta import (
coord_each,
feature_each,
geom_each,
geom_reduce,
segment_each,
segment_reduce,
)
def bearing(start: Feature, end: Feature, final=False) -> float:
if final:
return _calculate_final_bearing(start, end)
start_coordinates = start["geometry"]["coordinates"]
end_coordinates = end["geometry"]["coordinates"]
lon1 = radians(float(start_coordinates[0]))
lon2 = radians(float(end_coordinates[0]))
lat1 = radians(float(start_coordinates[1]))
lat2 = radians(float(end_coordinates[1]))
a = sin(lon2 - lon1) * cos(lat2)
b = (cos(lat1) * sin(lat2)) - (sin(lat1) * cos(lat2) * cos(lon2 - lon1))
return degrees(atan2(a, b))
def _calculate_final_bearing(start, end) -> float:
bear = bearing(end, start)
bear = (bear + 180) % 360
return bear
def distance(point1: Feature, point2: Feature, units: str = "km"):
coordinates1 = get_coord(point1)
coordinates2 = get_coord(point2)
dlat = radians((coordinates2[1] - coordinates1[1]))
dlon = radians((coordinates2[0] - coordinates1[0]))
lat1 = radians(coordinates1[1])
lat2 = radians(coordinates2[1])
a = pow(sin(dlat / 2), 2) + pow(sin(dlon / 2), 2) * cos(lat1) * cos(lat2)
b = 2 * atan2(sqrt(a), sqrt(1 - a))
return radians_to_length(b, units)
def area(
geojson: Union[
Point,
LineString,
Polygon,
MultiPoint,
MultiLineString,
MultiPolygon,
Feature,
FeatureCollection,
]
):
return geom_reduce(geojson, 0)
def bbox(geojson):
result = [float("inf"), float("inf"), float("-inf"), float("-inf")]
def _callback_coord_each(
coord, coord_index, feature_index, multi_feature_index, geometry_index
):
nonlocal result
if result[0] > coord[0]:
result[0] = coord[0]
if result[1] > coord[1]:
result[1] = coord[1]
if result[2] < coord[0]:
result[2] = coord[0]
if result[3] < coord[1]:
result[3] = coord[1]
coord_each(geojson, _callback_coord_each)
return result
def bbox_polygon(bbox: list, properties: dict = {}) -> Feature:
west = float(bbox[0])
south = float(bbox[1])
east = float(bbox[2])
north = float(bbox[3])
if len(bbox) == 6:
raise Exception("bbox-polygon does not support BBox with 6 positions")
low_left = (west, south)
top_left = (west, north)
top_right = (east, north)
low_right = (east, south)
bbox_polygon = Polygon([(low_left, low_right, top_right, top_left, low_left)])
feature_bbox = Feature(geometry=bbox_polygon)
if "properties" in properties:
feature_bbox.properties = properties["properties"]
elif "properties" not in properties:
feature_bbox.properties = {}
if "id" in properties:
feature_bbox.id = properties["id"]
if "bbox" in properties:
feature_bbox.bbox = properties["bbox"]
return feature_bbox
def center(geojson, properties: Optional[dict] = None) -> Feature:
bounding_box = bbox(geojson)
x = (bounding_box[0] + bounding_box[2]) / 2
y = (bounding_box[1] + bounding_box[3]) / 2
point = Point((x, y))
center_feature = Feature(geometry=point)
if properties is None:
properties = dict()
if "properties" in properties:
center_feature.properties = properties["properties"]
elif "properties" not in properties:
center_feature.properties = {}
if "id" in properties:
center_feature.id = properties["id"]
if "bbox" in properties:
center_feature.bbox = properties["bbox"]
return center_feature
def envelope(geojson) -> Feature:
return bbox_polygon(bbox(geojson))
def length(geojson, units: str = "km"):
def _callback_segment_reduce(previous_value, segment):
coords = segment["geometry"]["coordinates"]
return previous_value + distance(
Feature(geometry=Point(coords[0])), Feature(geometry=Point(coords[1])), units
)
return segment_reduce(geojson, _callback_segment_reduce, 0)
def destination(origin: Feature, distance, bearing, options: dict = {}) -> Feature:
coordinates1 = origin["geometry"]["coordinates"]
longitude1 = radians(float(coordinates1[0]))
latitude1 = radians(float(coordinates1[1]))
bearingRad = radians(float(bearing))
if "units" in options:
radian = length_to_radians(distance, options["units"])
else:
radian = length_to_radians(distance)
latitude2 = asin(
(sin(latitude1) * cos(radian)) + (cos(latitude1) * sin(radian) * cos(bearingRad))
)
longitude2 = longitude1 + atan2(
sin(bearingRad) * sin(radian) * cos(latitude1),
cos(radian) - sin(latitude1) * sin(latitude2),
)
lng = degrees(longitude2)
lat = degrees(latitude2)
point = Point((lng, lat))
return Feature(
geometry=point,
properties=options["properties"] if "properties" in options else {},
)
def centroid(geojson, properties: dict = None) -> Feature:
x_sum = 0
y_sum = 0
length = 0
def _callback_coord_each(
coord, coord_index, feature_index, multi_feature_index, geometry_index
):
nonlocal x_sum, y_sum, length
x_sum += coord[0]
y_sum += coord[1]
length += 1
coord_each(geojson, _callback_coord_each)
point = Point((x_sum / length, y_sum / length))
return Feature(geometry=point, properties=properties if properties else {})
def along(line: Feature, dist, unit: str = "km") -> Feature:
if line["type"] == "Feature":
geom = line["geometry"]
else:
geom = line
coords = geom["coordinates"]
travelled = 0
options = {"units": unit}
for i in range(0, len(coords)):
if dist >= travelled and i == (len(coords) - 1):
break
elif travelled >= dist:
overshot = dist - travelled
if not overshot:
return Feature(geometry=Point(coords[i]))
else:
direction = (
bearing(
Feature(geometry=Point(coords[i])),
Feature(geometry=Point(coords[i - 1])),
)
- 180
)
interpolated = destination(
Feature(geometry=Point(coords[i])), overshot, direction, options
)
return interpolated
else:
travelled += distance(
Feature(geometry=Point(coords[i])),
Feature(geometry=Point(coords[i + 1])),
unit,
)
point = Point(coords[len(coords) - 1])
return Feature(geometry=point)
def midpoint(point1: Feature, point2: Feature) -> Feature:
dist = distance(point1, point2)
heading = bearing(point1, point2)
midpoint = destination(point1, dist / 2, heading)
return midpoint
def nearest_point(target_point: Feature, points: FeatureCollection) -> Feature:
if not target_point:
raise Exception("target_point is required")
if not points:
raise Exception("points is required")
min_dist = float("inf")
best_feature_index = 0
def _callback_feature_each(pt, feature_index):
nonlocal min_dist, best_feature_index
distance_to_point = distance(target_point, pt)
if float(distance_to_point) < min_dist:
best_feature_index = feature_index
min_dist = distance_to_point
feature_each(points, _callback_feature_each)
nearest = points["features"][best_feature_index]
nearest["properties"]["featureIndex"] = best_feature_index
nearest["properties"]["distanceToPoint"] = min_dist
return nearest
def point_on_feature(geojson) -> Feature:
fc = _normalize(geojson)
cent = centroid(fc)
on_surface = False
i = 0
while not on_surface and i < len(fc["features"]):
on_line = False
geom = fc["features"][i]["geometry"]
if geom["type"] == "Point":
if (
cent["geometry"]["coordinates"][0] == geom["coordinates"][0]
and cent["geometry"]["coordinates"][1] == geom["coordinates"][1]
):
on_surface = True
elif geom["type"] == "MultiPoint":
on_multi_point = False
k = 0
while not on_multi_point and k < len(geom["coordinates"]):
if (
cent["geometry"]["coordinates"][0] == geom["coordinates"][k][0]
and cent["geometry"]["coordinates"][1] == geom["coordinates"][k][1]
):
on_surface = True
on_multi_point = True
k += 1
elif geom["type"] == "LineString":
k = 0
while not on_line and k < len(geom["coordinates"]) - 1:
x = cent["geometry"]["coordinates"][0]
y = cent["geometry"]["coordinates"][1]
x1 = geom["coordinates"][k][0]
y1 = geom["coordinates"][k][1]
x2 = geom["coordinates"][k + 1][0]
y2 = geom["coordinates"][k + 1][1]
if _point_on_segment(x, y, x1, y1, x2, y2):
on_line = True
on_surface = True
k += 1
elif geom["type"] == "MultiLineString":
j = 0
while j < len(geom["coordinates"]):
on_line = False
k = 0
line = geom["coordinates"][j]
while not on_line and k < len(line) - 1:
x = cent["geometry"]["coordinates"][0]
y = cent["geometry"]["coordinates"][1]
x1 = line[k][0]
y1 = line[k][1]
x2 = line[k + 1][0]
y2 = line[k + 1][1]
if _point_on_segment(x, y, x1, y1, x2, y2):
on_line = True
on_surface = True
k += 1
j += 1
elif geom["type"] == "Polygon" or geom["type"] == "MultiPolygon":
if boolean_point_in_polygon(cent, geom):
on_surface = True
i += 1
if on_surface:
return cent
else:
vertices_list = []
for i in range(0, len(fc["features"])):
vertices_list.extend(explode(fc["features"][i])["features"])
vertices = FeatureCollection(vertices_list)
point = Point(nearest_point(cent, vertices)["geometry"]["coordinates"])
return Feature(geometry=point)
def _normalize(geojson):
if geojson["type"] != "FeatureCollection":
if geojson["type"] != "Feature":
return FeatureCollection([Feature(geometry=geojson)])
return FeatureCollection([geojson])
return geojson
def _point_on_segment(x, y, x1, y1, x2, y2):
ab = sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))
ap = sqrt((x - x1) * (x - x1) + (y - y1) * (y - y1))
pb = sqrt((x2 - x) * (x2 - x) + (y2 - y) * (y2 - y))
return ab == (ap + pb)
def boolean_point_in_polygon(point, polygon, ignore_boundary=False):
if not point:
raise Exception("point is required")
if not polygon:
raise Exception("polygon is required")
pt = get_coord(point)
geom = get_geom(polygon)
geo_type = geom["type"]
bbox = polygon.get("bbox", None)
polys = geom["coordinates"]
if bbox and not in_bbox(pt, bbox):
return False
if geo_type == "Polygon":
polys = [polys]
inside_poly = False
for i in range(0, len(polys)):
if in_ring(pt, polys[i][0], ignore_boundary):
in_hole = False
k = 1
while k < len(polys[i]) and not in_hole:
if in_ring(pt, polys[i][k], not ignore_boundary):
in_hole = True
k += 1
if not in_hole:
inside_poly = True
return inside_poly
def in_ring(pt, ring, ignore_boundary):
is_inside = False
if ring[0][0] == ring[len(ring) - 1][0] and ring[0][1] == ring[len(ring) - 1][1]:
ring = ring[0 : len(ring) - 1]
j = len(ring) - 1
for i in range(0, len(ring)):
xi = ring[i][0]
yi = ring[i][1]
xj = ring[j][0]
yj = ring[j][1]
on_boundary = (
(pt[1] * (xi - xj) + yi * (xj - pt[0]) + yj * (pt[0] - xi) == 0)
and ((xi - pt[0]) * (xj - pt[0]) <= 0)
and ((yi - pt[1]) * (yj - pt[1]) <= 0)
)
if on_boundary:
return not ignore_boundary
intersect = ((yi > pt[1]) != (yj > pt[1])) and (
pt[0] < (xj - xi) * (pt[1] - yi) / (yj - yi) + xi
)
if intersect:
is_inside = not is_inside
j = i
return is_inside
def in_bbox(pt, bbox):
return bbox[0] <= pt[0] <= bbox[2] and bbox[1] <= pt[1] <= bbox[3]
def explode(geojson):
points = []
if geojson["type"] == "FeatureCollection":
def _callback_feature_each(feature, feature_index):
def _callback_coord_each(
coord,
coord_index,
feature_index,
multi_feature_index,
geometry_index,
):
nonlocal points
point = Point(coord)
points.append(Feature(geometry=point, properties=feature["properties"]))
coord_each(feature, _callback_coord_each)
feature_each(geojson, _callback_feature_each)
else:
def _callback_coord_each(
coord,
coord_index,
feature_index,
multi_feature_index,
geometry_index,
):
nonlocal points, geojson
point = Point(coord)
points.append(Feature(geometry=point, properties=geojson["properties"]))
coord_each(geojson, _callback_coord_each)
return FeatureCollection(points)
def polygon_tangents(point, polygon):
point_coords = get_coords(point)
poly_coords = get_coords(polygon)
enext = 0
bbox_points = bbox(polygon)
nearest_pt_index = 0
nearest = None
if (
bbox_points[0] < point_coords[0] < bbox_points[2]
and bbox_points[1] < point_coords[1] < bbox_points[3]
):
nearest = nearest_point(point, explode(polygon))
nearest_pt_index = nearest.properties.featureIndex
geo_type = get_type(polygon)
if geo_type == "Polygon":
rtan = poly_coords[0][nearest_pt_index]
ltan = poly_coords[0][0]
if nearest:
if nearest["geometry"]["coordinates"][1] < point_coords[1]:
ltan = poly_coords[0][nearest_pt_index]
eprev = _is_left(
poly_coords[0][0],
poly_coords[0][len(poly_coords[0]) - 1],
point_coords,
)
out = process_polygon(poly_coords[0], point_coords, eprev, enext, rtan, ltan)
rtan = out[0]
ltan = out[1]
elif geo_type == "MultiPolygon":
closest_feature = 0
closest_vertex = 0
vertices_counted = 0
for i in range(0, len(poly_coords[0])):
closest_feature = i
vertice_found = False
for i2 in range(0, len(poly_coords[0][i])):
closest_vertex = i2
if vertices_counted == nearest_pt_index:
vertice_found = True
break
vertices_counted += 1
if vertice_found:
break
rtan = poly_coords[0][closest_feature][closest_vertex]
ltan = poly_coords[0][closest_feature][closest_vertex]
eprev = _is_left(
poly_coords[0][0][0],
poly_coords[0][0][len(poly_coords[0][0]) - 1],
point_coords,
)
for ring in poly_coords:
out = process_polygon(ring[0], point_coords, eprev, enext, rtan, ltan)
rtan = out[0]
ltan = out[1]
return FeatureCollection(
[Feature(geometry=Point(rtan)), Feature(geometry=Point(ltan))]
)
def process_polygon(polygon_coords, pt_coords, eprev, enext, rtan, ltan):
for i in range(0, len(polygon_coords)):
current_coords = polygon_coords[i]
if i == (len(polygon_coords) - 1):
next_coord_pair = polygon_coords[0]
else:
next_coord_pair = polygon_coords[i + 1]
enext = _is_left(current_coords, next_coord_pair, pt_coords)
if eprev <= 0 and enext > 0:
if not _is_below(pt_coords, current_coords, rtan):
rtan = current_coords
elif eprev > 0 and enext <= 0:
if not _is_above(pt_coords, current_coords, ltan):
ltan = current_coords
eprev = enext
return [rtan, ltan]
def _is_above(point1, point2, point3):
return _is_left(point1, point2, point3) > 0
def _is_below(point1, point2, point3):
return _is_left(point1, point2, point3) < 0
def _is_left(point1, point2, point3):
return (point2[0] - point1[0]) * (point3[1] - point1[1]) - (point3[0] - point1[0]) * (
point2[1] - point1[1]
)
def point_to_line_distance(point: Feature, line: Feature, units="km", method="geodesic"):
if method != "geodesic" and method != "planar":
raise Exception("method name is incorrect ot should be either geodesic or planar")
options = {"units": units, "method": method}
if not point:
raise Exception("pt is required")
if isinstance(point, list):
point = Feature(geometry=Point(point))
elif point["type"] == "Point":
point = Feature(point)
else:
feature_of(point, "Point", "point")
if not line:
raise Exception("line is required")
if isinstance(point, list):
line = Feature(geometry=LineString(line))
elif line["type"] == "LineString":
line = Feature(geometry=line)
else:
feature_of(line, "LineString", "line")
distance = float("inf")
p = point["geometry"]["coordinates"]
def _callback_segment_each(
current_segment,
feature_index,
multi_feature_index,
geometry_index,
segment_index,
):
nonlocal options, distance
a = current_segment["geometry"]["coordinates"][0]
b = current_segment["geometry"]["coordinates"][1]
d = distance_to_segment(p, a, b, options)
if d < distance:
distance = d
segment_each(line, _callback_segment_each)
return convert_length(distance, "deg", options.get("units", ""))
def distance_to_segment(p, a, b, options):
v = [b[0] - a[0], b[1] - a[1]]
w = [p[0] - a[0], p[1] - a[1]]
c1 = _dot(w, v)
if c1 <= 0:
return _calc_distance(p, a, {"method": options.get("method", ""), "units": "deg"})
c2 = _dot(v, v)
if c2 <= c1:
return _calc_distance(p, b, {"method": options.get("method", ""), "units": "deg"})
b2 = c1 / c2
Pb = [a[0] + (b2 * v[0]), a[1] + (b2 * v[1])]
return _calc_distance(p, Pb, {"method": options.get("method", ""), "units": "deg"})
def _calc_distance(a, b, options):
if options.get("method", "") == "planar":
return rhumb_distance(a, b, options.get("units", ""))
else:
return distance(
Feature(geometry=Point(a)),
Feature(geometry=Point(b)),
options.get("units", ""),
)
def _dot(u, v):
return u[0] * v[0] + u[1] * v[1]
def rhumb_bearing(start, end, final=False):
if final:
bear_360 = calculate_rhumb_bearing(get_coord(end), get_coord(start))
else:
bear_360 = calculate_rhumb_bearing(get_coord(start), get_coord(end))
if bear_360 > 180:
bear_180 = -1 * (360 - bear_360)
else:
bear_180 = bear_360
return bear_180
def calculate_rhumb_bearing(fro, to):
phi1 = radians(fro[1])
phi2 = radians(to[1])
delta_lambda = radians(to[0] - fro[0])
if delta_lambda > pi:
delta_lambda -= 2 * pi
if delta_lambda < -1 * pi:
delta_lambda += 2 * pi
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
theta = atan2(delta_lambda, delta_psi)
return (degrees(theta) + 360) % 360
def rhumb_destination(origin, distance, bearing, options: dict = {}) -> Feature:
was_negative_distance = distance < 0
distance_in_meters = convert_length(abs(distance), options.get("units", "km"), "m")
if was_negative_distance:
distance_in_meters = -1 * (abs(distance_in_meters))
coords = get_coord(origin)
destination_point = _calculate_rhumb_destination(coords, distance_in_meters, bearing)
return Feature(
geometry=Point(destination_point),
properties=options.get("properties", ""),
)
def _calculate_rhumb_destination(origin, distance, bearing, radius=None):
if not radius:
radius = avg_earth_radius_km
delta = distance / radius
lambda1 = origin[0] * pi / 180
phi1 = radians(origin[1])
theta = radians(bearing)
delta_phi = delta * cos(theta)
phi2 = phi1 + delta_phi
if abs(phi2) > pi / 2:
if phi2 > 0:
phi2 = pi - phi2
else:
phi2 = -1 * pi - phi2
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
if abs(delta_psi) > 10e-12:
q = delta_phi / delta_psi
else:
q = cos(phi1)
delta_lambda = delta * sin(theta) / q
lambda2 = lambda1 + delta_lambda
return [((lambda2 * 180 / pi) + 540) % 360 - 180, phi2 * 180 / pi]
def rhumb_distance(start, to, units="km"):
origin = get_coord(start)
dest = get_coord(to)
if dest[0] - origin[0] > 180:
temp = -360
elif origin[0] - dest[0] > 180:
temp = 360
else:
temp = 0
dest[0] += temp
distance_in_meters = _calculate_rhumb_distance(origin, dest)
ru_distance = convert_length(distance_in_meters, "m", units)
return ru_distance
def _calculate_rhumb_distance(origin, destination_point, radius=None):
if not radius:
radius = avg_earth_radius_km
phi1 = origin[1] * pi / 180
phi2 = destination_point[1] * pi / 180
delta_phi = phi2 - phi1
delta_lambda = abs(destination_point[0] - origin[0]) * pi / 180
if delta_lambda > pi:
delta_lambda -= 2 * pi
delta_psi = log(tan(phi2 / 2 + pi / 4) / tan(phi1 / 2 + pi / 4))
if abs(delta_psi) > 10e-12:
q = delta_phi / delta_psi
else:
q = cos(phi1)
delta = sqrt(delta_phi * delta_phi + q * q * delta_lambda * delta_lambda)
dist = delta * radius
return dist
def square(bbox: list):
west = bbox[0]
south = bbox[1]
east = bbox[2]
north = bbox[3]
horizontal_distance = distance(
Feature(geometry=Point(bbox[0:2])), Feature(geometry=Point((east, south)))
)
vertical_distance = distance(
Feature(geometry=Point(bbox[0:2])), Feature(geometry=Point((west, north)))
)
if horizontal_distance >= vertical_distance:
vertical_midpoint = (south + north) / 2
return [
west,
vertical_midpoint - ((east - west) / 2),
east,
vertical_midpoint + ((east - west) / 2),
]
else:
horizontal_midpoint = (west + east) / 2
return [
horizontal_midpoint - ((north - south) / 2),
south,
horizontal_midpoint + ((north - south) / 2),
north,
]
def points_within_polygon(
points: Union[Feature, FeatureCollection], polygons: Union[Feature, FeatureCollection]
) -> FeatureCollection:
results = []
def __callback_feature_each(feature, feature_index):
contained = False
def __callback_geom_each(
current_geometry, feature_index, feature_properties, feature_bbox, feature_id
):
if boolean_point_in_polygon(feature, current_geometry):
nonlocal contained
contained = True
if contained:
nonlocal results
results.append(feature)
geom_each(polygons, __callback_geom_each)
return True
feature_each(points, __callback_feature_each)
return FeatureCollection(results)
| true | true |
1c31eaa39d0ff84ca5d659ef2cfbd15c184a0b91 | 32,188 | py | Python | release/scripts/modules/bpy_types.py | arubertoson/blender | 9d3550d7819807064dd39365322295ebd8ea0a09 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-09-23T21:30:18.000Z | 2020-09-23T21:30:18.000Z | release/scripts/modules/bpy_types.py | arubertoson/blender | 9d3550d7819807064dd39365322295ebd8ea0a09 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/modules/bpy_types.py | arubertoson/blender | 9d3550d7819807064dd39365322295ebd8ea0a09 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
from _bpy import types as bpy_types
import _bpy
StructRNA = bpy_types.bpy_struct
StructMetaPropGroup = bpy_types.bpy_struct_meta_idprop
# StructRNA = bpy_types.Struct
# Note that methods extended in C are defined in: 'bpy_rna_types_capi.c'
class Context(StructRNA):
__slots__ = ()
def copy(self):
from types import BuiltinMethodType
new_context = {}
generic_attrs = (
*StructRNA.__dict__.keys(),
"bl_rna", "rna_type", "copy",
)
for attr in dir(self):
if not (attr.startswith("_") or attr in generic_attrs):
value = getattr(self, attr)
if type(value) != BuiltinMethodType:
new_context[attr] = value
return new_context
class Library(bpy_types.ID):
__slots__ = ()
@property
def users_id(self):
"""ID data blocks which use this library"""
import bpy
# See: readblenentry.c, IDTYPE_FLAGS_ISLINKABLE,
# we could make this an attribute in rna.
attr_links = (
"actions", "armatures", "brushes", "cameras",
"curves", "grease_pencils", "collections", "images",
"lights", "lattices", "materials", "metaballs",
"meshes", "node_groups", "objects", "scenes",
"sounds", "speakers", "textures", "texts",
"fonts", "worlds",
)
return tuple(id_block
for attr in attr_links
for id_block in getattr(bpy.data, attr)
if id_block.library == self)
class Texture(bpy_types.ID):
__slots__ = ()
@property
def users_material(self):
"""Materials that use this texture"""
import bpy
return tuple(mat for mat in bpy.data.materials
if self in [slot.texture
for slot in mat.texture_slots
if slot]
)
@property
def users_object_modifier(self):
"""Object modifiers that use this texture"""
import bpy
return tuple(
obj for obj in bpy.data.objects if
self in [
mod.texture
for mod in obj.modifiers
if mod.type == 'DISPLACE']
)
class Collection(bpy_types.ID):
__slots__ = ()
@property
def users_dupli_group(self):
"""The collection instance objects this collection is used in"""
import bpy
return tuple(obj for obj in bpy.data.objects
if self == obj.instance_collection)
class Object(bpy_types.ID):
__slots__ = ()
@property
def children(self):
"""All the children of this object.
.. note:: Takes ``O(len(bpy.data.objects))`` time."""
import bpy
return tuple(child for child in bpy.data.objects
if child.parent == self)
@property
def users_collection(self):
"""
The collections this object is in.
.. note:: Takes ``O(len(bpy.data.collections) + len(bpy.data.scenes))`` time."""
import bpy
return (
tuple(
collection for collection in bpy.data.collections
if self in collection.objects[:]
) + tuple(
scene.collection for scene in bpy.data.scenes
if self in scene.collection.objects[:]
)
)
@property
def users_scene(self):
"""The scenes this object is in.
.. note:: Takes ``O(len(bpy.data.scenes) * len(bpy.data.objects))`` time."""
import bpy
return tuple(scene for scene in bpy.data.scenes
if self in scene.objects[:])
class WindowManager(bpy_types.ID):
__slots__ = ()
def popup_menu(self, draw_func, title="", icon='NONE'):
import bpy
popup = self.popmenu_begin__internal(title, icon=icon)
try:
draw_func(popup, bpy.context)
finally:
self.popmenu_end__internal(popup)
def popover(
self, draw_func, *,
ui_units_x=0,
keymap=None,
from_active_button=False,
):
import bpy
popup = self.popover_begin__internal(
ui_units_x=ui_units_x,
from_active_button=from_active_button,
)
try:
draw_func(popup, bpy.context)
finally:
self.popover_end__internal(popup, keymap=keymap)
def popup_menu_pie(self, event, draw_func, title="", icon='NONE'):
import bpy
pie = self.piemenu_begin__internal(title, icon=icon, event=event)
if pie:
try:
draw_func(pie, bpy.context)
finally:
self.piemenu_end__internal(pie)
class WorkSpace(bpy_types.ID):
__slots__ = ()
def status_text_set(self, text):
"""
Set the status text or None to clear,
When text is a function, this will be called with the (header, context) arguments.
"""
from bl_ui.space_statusbar import STATUSBAR_HT_header
draw_fn = getattr(STATUSBAR_HT_header, "_draw_orig", None)
if draw_fn is None:
draw_fn = STATUSBAR_HT_header._draw_orig = STATUSBAR_HT_header.draw
if not (text is None or isinstance(text, str)):
draw_fn = text
text = None
self.status_text_set_internal(text)
STATUSBAR_HT_header.draw = draw_fn
class _GenericBone:
"""
functions for bones, common between Armature/Pose/Edit bones.
internal subclassing use only.
"""
__slots__ = ()
def translate(self, vec):
"""Utility function to add *vec* to the head and tail of this bone"""
self.head += vec
self.tail += vec
def parent_index(self, parent_test):
"""
The same as 'bone in other_bone.parent_recursive'
but saved generating a list.
"""
# use the name so different types can be tested.
name = parent_test.name
parent = self.parent
i = 1
while parent:
if parent.name == name:
return i
parent = parent.parent
i += 1
return 0
@property
def x_axis(self):
""" Vector pointing down the x-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((1.0, 0.0, 0.0))
@property
def y_axis(self):
""" Vector pointing down the y-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 1.0, 0.0))
@property
def z_axis(self):
""" Vector pointing down the z-axis of the bone.
"""
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
@property
def basename(self):
"""The name of this bone before any '.' character"""
# return self.name.rsplit(".", 1)[0]
return self.name.split(".")[0]
@property
def parent_recursive(self):
"""A list of parents, starting with the immediate parent"""
parent_list = []
parent = self.parent
while parent:
if parent:
parent_list.append(parent)
parent = parent.parent
return parent_list
@property
def center(self):
"""The midpoint between the head and the tail."""
return (self.head + self.tail) * 0.5
@property
def vector(self):
"""
The direction this bone is pointing.
Utility function for (tail - head)
"""
return (self.tail - self.head)
@property
def children(self):
"""A list of all the bones children.
.. note:: Takes ``O(len(bones))`` time."""
return [child for child in self._other_bones if child.parent == self]
@property
def children_recursive(self):
"""A list of all children from this bone.
.. note:: Takes ``O(len(bones)**2)`` time."""
bones_children = []
for bone in self._other_bones:
index = bone.parent_index(self)
if index:
bones_children.append((index, bone))
# sort by distance to parent
bones_children.sort(key=lambda bone_pair: bone_pair[0])
return [bone for index, bone in bones_children]
@property
def children_recursive_basename(self):
"""
Returns a chain of children with the same base name as this bone.
Only direct chains are supported, forks caused by multiple children
with matching base names will terminate the function
and not be returned.
.. note:: Takes ``O(len(bones)**2)`` time.
"""
basename = self.basename
chain = []
child = self
while True:
children = child.children
children_basename = []
for child in children:
if basename == child.basename:
children_basename.append(child)
if len(children_basename) == 1:
child = children_basename[0]
chain.append(child)
else:
if children_basename:
print("multiple basenames found, "
"this is probably not what you want!",
self.name, children_basename)
break
return chain
@property
def _other_bones(self):
id_data = self.id_data
# `id_data` is an 'Object' for `PosePone`, otherwise it's an `Armature`.
if isinstance(self, PoseBone):
return id_data.pose.bones
if isinstance(self, EditBone):
return id_data.edit_bones
if isinstance(self, Bone):
return id_data.bones
raise RuntimeError("Invalid type %r" % self)
class PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
@property
def children(self):
obj = self.id_data
pbones = obj.pose.bones
self_bone = self.bone
return tuple(pbones[bone.name] for bone in obj.data.bones
if bone.parent == self_bone)
class Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
class EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
def align_orientation(self, other):
"""
Align this bone to another by moving its tail and settings its roll
the length of the other bone is not used.
"""
vec = other.vector.normalized() * self.length
self.tail = self.head + vec
self.roll = other.roll
def transform(self, matrix, scale=True, roll=True):
"""
Transform the the bones head, tail, roll and envelope
(when the matrix has a scale component).
:arg matrix: 3x3 or 4x4 transformation matrix.
:type matrix: :class:`mathutils.Matrix`
:arg scale: Scale the bone envelope by the matrix.
:type scale: bool
:arg roll:
Correct the roll to point in the same relative
direction to the head and tail.
:type roll: bool
"""
from mathutils import Vector
z_vec = self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
self.tail = matrix @ self.tail
self.head = matrix @ self.head
if scale:
scalar = matrix.median_scale
self.head_radius *= scalar
self.tail_radius *= scalar
if roll:
self.align_roll(matrix @ z_vec)
def ord_ind(i1, i2):
if i1 < i2:
return i1, i2
return i2, i1
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
"""
Make a mesh from a list of vertices/edges/faces
Until we have a nicer way to make geometry, use this.
:arg vertices:
float triplets each representing (X, Y, Z)
eg: [(0.0, 1.0, 0.5), ...].
:type vertices: iterable object
:arg edges:
int pairs, each pair contains two indices to the
*vertices* argument. eg: [(1, 2), ...]
When an empty iterable is passed in, the edges are inferred from the polygons.
:type edges: iterable object
:arg faces:
iterator of faces, each faces contains three or more indices to
the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
:type faces: iterable object
.. warning::
Invalid mesh data
*(out of range indices, edges with matching indices,
2 sided faces... etc)* are **not** prevented.
If the data used for mesh creation isn't known to be valid,
run :class:`Mesh.validate` after this function.
"""
from itertools import chain, islice, accumulate
face_lengths = tuple(map(len, faces))
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum(face_lengths))
self.polygons.add(len(faces))
self.vertices.foreach_set("co", tuple(chain.from_iterable(vertices)))
self.edges.foreach_set("vertices", tuple(chain.from_iterable(edges)))
vertex_indices = tuple(chain.from_iterable(faces))
loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))
self.polygons.foreach_set("loop_total", face_lengths)
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
if edges or faces:
self.update(
# Needed to either:
# - Calculate edges that don't exist for polygons.
# - Assign edges to polygon loops.
calc_edges=bool(faces),
# Flag loose edges.
calc_edges_loose=bool(edges),
)
@property
def edge_keys(self):
return [ed.key for ed in self.edges]
class MeshEdge(StructRNA):
__slots__ = ()
@property
def key(self):
return ord_ind(*tuple(self.vertices))
class MeshLoopTriangle(StructRNA):
__slots__ = ()
@property
def center(self):
"""The midpoint of the face."""
face_verts = self.vertices[:]
mesh_verts = self.id_data.vertices
return (
mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co
) / 3.0
@property
def edge_keys(self):
verts = self.vertices[:]
return (
ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[0]),
)
class MeshPolygon(StructRNA):
__slots__ = ()
@property
def edge_keys(self):
verts = self.vertices[:]
vlen = len(self.vertices)
return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]
@property
def loop_indices(self):
start = self.loop_start
end = start + self.loop_total
return range(start, end)
class Text(bpy_types.ID):
__slots__ = ()
def as_string(self):
"""Return the text as a string."""
return "\n".join(line.body for line in self.lines)
def from_string(self, string):
"""Replace text with this string."""
self.clear()
self.write(string)
def as_module(self):
from os.path import splitext
from types import ModuleType
mod = ModuleType(splitext(self.name)[0])
# TODO: We could use Text.compiled (C struct member)
# if this is called often it will be much faster.
exec(self.as_string(), mod.__dict__)
return mod
class Sound(bpy_types.ID):
__slots__ = ()
@property
def factory(self):
"""The aud.Factory object of the sound."""
import aud
return aud._sound_from_pointer(self.as_pointer())
class RNAMeta(type):
# TODO(campbell): move to C-API
@property
def is_registered(cls):
return "bl_rna" in cls.__dict__
class RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):
pass
# Same as 'Operator'
# only without 'as_keywords'
class Gizmo(StructRNA):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
from _bpy import (
_rna_gizmo_target_set_handler as target_set_handler,
_rna_gizmo_target_get_value as target_get_value,
_rna_gizmo_target_set_value as target_set_value,
_rna_gizmo_target_get_range as target_get_range,
)
# Convenience wrappers around private `_gpu` module.
def draw_custom_shape(self, shape, *, matrix=None, select_id=None):
"""
Draw a shape created form :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg shape: The cached shape to draw.
:type shape: Undefined.
:arg matrix: 4x4 matrix, when not given
:class:`bpy.types.Gizmo.matrix_world` is used.
:type matrix: :class:`mathutils.Matrix`
:arg select_id: The selection id.
Only use when drawing within :class:`bpy.types.Gizmo.draw_select`.
:type select_it: int
"""
import gpu
if matrix is None:
matrix = self.matrix_world
batch, shader = shape
shader.bind()
if select_id is not None:
gpu.select.load_id(select_id)
use_blend = False
else:
if self.is_highlight:
color = (*self.color_highlight, self.alpha_highlight)
else:
color = (*self.color, self.alpha)
shader.uniform_float("color", color)
use_blend = color[3] < 1.0
if use_blend:
# TODO: wrap GPU_blend from GPU state.
from bgl import glEnable, glDisable, GL_BLEND
glEnable(GL_BLEND)
with gpu.matrix.push_pop():
gpu.matrix.multiply_matrix(matrix)
batch.draw()
if use_blend:
glDisable(GL_BLEND)
@staticmethod
def new_custom_shape(type, verts):
"""
Create a new shape that can be passed to :class:`bpy.types.Gizmo.draw_custom_shape`.
:arg type: The type of shape to create in (POINTS, LINES, TRIS, LINE_STRIP).
:type type: string
:arg verts: Coordinates.
:type verts: sequence of of 2D or 3D coordinates.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
:return: The newly created shape.
:rtype: Undefined (it may change).
"""
import gpu
from gpu.types import (
GPUBatch,
GPUVertBuf,
GPUVertFormat,
)
dims = len(verts[0])
if dims not in {2, 3}:
raise ValueError("Expected 2D or 3D vertex")
fmt = GPUVertFormat()
pos_id = fmt.attr_add(id="pos", comp_type='F32', len=dims, fetch_mode='FLOAT')
vbo = GPUVertBuf(len=len(verts), format=fmt)
vbo.attr_fill(id=pos_id, data=verts)
batch = GPUBatch(type=type, buf=vbo)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR' if dims == 3 else '2D_UNIFORM_COLOR')
batch.program_set(shader)
return (batch, shader)
# Dummy class to keep the reference in `bpy_types_dict` and avoid
# erros like: "TypeError: expected GizmoGroup subclass of class ..."
class GizmoGroup(StructRNA):
__slots__ = ()
# Only defined so operators members can be used by accessing self.order
# with doc generation 'self.properties.bl_rna.properties' can fail
class Operator(StructRNA, metaclass=RNAMeta):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
def as_keywords(self, ignore=()):
"""Return a copy of the properties as a dictionary"""
ignore = ignore + ("rna_type",)
return {attr: getattr(self, attr)
for attr in self.properties.rna_type.properties.keys()
if attr not in ignore}
class Macro(StructRNA):
# bpy_types is imported before ops is defined
# so we have to do a local import on each run
__slots__ = ()
@classmethod
def define(self, opname):
from _bpy import ops
return ops.macro_define(self, opname)
class PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
class RenderEngine(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class AddonPreferences(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class _GenericUI:
__slots__ = ()
@classmethod
def _dyn_ui_initialize(cls):
draw_funcs = getattr(cls.draw, "_draw_funcs", None)
if draw_funcs is None:
def draw_ls(self, context):
# ensure menus always get default context
operator_context_default = self.layout.operator_context
# Support filtering out by owner
workspace = context.workspace
if workspace.use_filter_by_owner:
owner_names = {owner_id.name for owner_id in workspace.owner_ids}
else:
owner_names = None
for func in draw_ls._draw_funcs:
# Begin 'owner_id' filter.
# Exclude Import/Export menus from this filtering (io addons should always show there)
if not getattr(self, "bl_owner_use_filter", True):
pass
elif owner_names is not None:
owner_id = getattr(func, "_owner", None)
if owner_id is not None:
if func._owner not in owner_names:
continue
# End 'owner_id' filter.
# so bad menu functions don't stop
# the entire menu from drawing
try:
func(self, context)
except:
import traceback
traceback.print_exc()
self.layout.operator_context = operator_context_default
draw_funcs = draw_ls._draw_funcs = [cls.draw]
cls.draw = draw_ls
return draw_funcs
@staticmethod
def _dyn_owner_apply(draw_func):
from _bpy import _bl_owner_id_get
owner_id = _bl_owner_id_get()
if owner_id is not None:
draw_func._owner = owner_id
@classmethod
def is_extended(cls):
return bool(getattr(cls.draw, "_draw_funcs", None))
@classmethod
def append(cls, draw_func):
"""
Append a draw function to this menu,
takes the same arguments as the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.append(draw_func)
@classmethod
def prepend(cls, draw_func):
"""
Prepend a draw function to this menu, takes the same arguments as
the menus draw function
"""
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.insert(0, draw_func)
@classmethod
def remove(cls, draw_func):
"""Remove a draw function that has been added to this menu"""
draw_funcs = cls._dyn_ui_initialize()
try:
draw_funcs.remove(draw_func)
except ValueError:
pass
class Panel(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class UIList(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Header(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Menu(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
def path_menu(self, searchpaths, operator, *,
props_default=None, prop_filepath="filepath",
filter_ext=None, filter_path=None, display_name=None,
add_operator=None):
"""
Populate a menu from a list of paths.
:arg searchpaths: Paths to scan.
:type searchpaths: sequence of strings.
:arg operator: The operator id to use with each file.
:type operator: string
:arg prop_filepath: Optional operator filepath property (defaults to "filepath").
:type prop_filepath: string
:arg props_default: Properties to assign to each operator.
:type props_default: dict
:arg filter_ext: Optional callback that takes the file extensions.
Returning false excludes the file from the list.
:type filter_ext: Callable that takes a string and returns a bool.
:arg display_name: Optional callback that takes the full path, returns the name to display.
:type display_name: Callable that takes a string and returns a string.
"""
layout = self.layout
import os
import bpy.utils
layout = self.layout
if not searchpaths:
layout.label(text="* Missing Paths *")
# collect paths
files = []
for directory in searchpaths:
files.extend([
(f, os.path.join(directory, f))
for f in os.listdir(directory)
if (not f.startswith("."))
if ((filter_ext is None) or
(filter_ext(os.path.splitext(f)[1])))
if ((filter_path is None) or
(filter_path(f)))
])
files.sort()
col = layout.column(align=True)
for f, filepath in files:
# Intentionally pass the full path to 'display_name' callback,
# since the callback may want to use part a directory in the name.
row = col.row(align=True)
name = display_name(filepath) if display_name else bpy.path.display_name(f)
props = row.operator(
operator,
text=name,
translate=False,
)
if props_default is not None:
for attr, value in props_default.items():
setattr(props, attr, value)
setattr(props, prop_filepath, filepath)
if operator == "script.execute_preset":
props.menu_idname = self.bl_idname
if add_operator:
props = row.operator(add_operator, text="", icon='REMOVE')
props.name = name
props.remove_name = True
if add_operator:
wm = bpy.data.window_managers[0]
layout.separator()
row = layout.row()
sub = row.row()
sub.emboss = 'NORMAL'
sub.prop(wm, "preset_name", text="")
props = row.operator(add_operator, text="", icon='ADD')
props.name = wm.preset_name
def draw_preset(self, _context):
"""
Define these on the subclass:
- preset_operator (string)
- preset_subdir (string)
Optionally:
- preset_add_operator (string)
- preset_extensions (set of strings)
- preset_operator_defaults (dict of keyword args)
"""
import bpy
ext_valid = getattr(self, "preset_extensions", {".py", ".xml"})
props_default = getattr(self, "preset_operator_defaults", None)
add_operator = getattr(self, "preset_add_operator", None)
self.path_menu(
bpy.utils.preset_paths(self.preset_subdir),
self.preset_operator,
props_default=props_default,
filter_ext=lambda ext: ext.lower() in ext_valid,
add_operator=add_operator,
)
@classmethod
def draw_collapsible(cls, context, layout):
# helper function for (optionally) collapsed header menus
# only usable within headers
if context.area.show_menus:
# Align menus to space them closely.
layout.row(align=True).menu_contents(cls.__name__)
else:
layout.menu(cls.__name__, icon='COLLAPSEMENU')
class NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):
__slots__ = ()
class Node(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@classmethod
def poll(cls, _ntree):
return True
class NodeInternal(Node):
__slots__ = ()
class NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@property
def links(self):
"""
List of node links from or to this socket.
.. note:: Takes ``O(len(nodetree.links))`` time."""
return tuple(
link for link in self.id_data.links
if (link.from_socket == self or
link.to_socket == self))
class NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
# These are intermediate subclasses, need a bpy type too
class CompositorNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CompositorNodeTree'
def update(self):
self.tag_need_exec()
class ShaderNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ShaderNodeTree'
class TextureNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'TextureNodeTree'
| 30.42344 | 106 | 0.591929 | t__.keys(),
"bl_rna", "rna_type", "copy",
)
for attr in dir(self):
if not (attr.startswith("_") or attr in generic_attrs):
value = getattr(self, attr)
if type(value) != BuiltinMethodType:
new_context[attr] = value
return new_context
class Library(bpy_types.ID):
__slots__ = ()
@property
def users_id(self):
import bpy
attr_links = (
"actions", "armatures", "brushes", "cameras",
"curves", "grease_pencils", "collections", "images",
"lights", "lattices", "materials", "metaballs",
"meshes", "node_groups", "objects", "scenes",
"sounds", "speakers", "textures", "texts",
"fonts", "worlds",
)
return tuple(id_block
for attr in attr_links
for id_block in getattr(bpy.data, attr)
if id_block.library == self)
class Texture(bpy_types.ID):
__slots__ = ()
@property
def users_material(self):
import bpy
return tuple(mat for mat in bpy.data.materials
if self in [slot.texture
for slot in mat.texture_slots
if slot]
)
@property
def users_object_modifier(self):
import bpy
return tuple(
obj for obj in bpy.data.objects if
self in [
mod.texture
for mod in obj.modifiers
if mod.type == 'DISPLACE']
)
class Collection(bpy_types.ID):
__slots__ = ()
@property
def users_dupli_group(self):
import bpy
return tuple(obj for obj in bpy.data.objects
if self == obj.instance_collection)
class Object(bpy_types.ID):
__slots__ = ()
@property
def children(self):
import bpy
return tuple(child for child in bpy.data.objects
if child.parent == self)
@property
def users_collection(self):
import bpy
return (
tuple(
collection for collection in bpy.data.collections
if self in collection.objects[:]
) + tuple(
scene.collection for scene in bpy.data.scenes
if self in scene.collection.objects[:]
)
)
@property
def users_scene(self):
import bpy
return tuple(scene for scene in bpy.data.scenes
if self in scene.objects[:])
class WindowManager(bpy_types.ID):
__slots__ = ()
def popup_menu(self, draw_func, title="", icon='NONE'):
import bpy
popup = self.popmenu_begin__internal(title, icon=icon)
try:
draw_func(popup, bpy.context)
finally:
self.popmenu_end__internal(popup)
def popover(
self, draw_func, *,
ui_units_x=0,
keymap=None,
from_active_button=False,
):
import bpy
popup = self.popover_begin__internal(
ui_units_x=ui_units_x,
from_active_button=from_active_button,
)
try:
draw_func(popup, bpy.context)
finally:
self.popover_end__internal(popup, keymap=keymap)
def popup_menu_pie(self, event, draw_func, title="", icon='NONE'):
import bpy
pie = self.piemenu_begin__internal(title, icon=icon, event=event)
if pie:
try:
draw_func(pie, bpy.context)
finally:
self.piemenu_end__internal(pie)
class WorkSpace(bpy_types.ID):
__slots__ = ()
def status_text_set(self, text):
from bl_ui.space_statusbar import STATUSBAR_HT_header
draw_fn = getattr(STATUSBAR_HT_header, "_draw_orig", None)
if draw_fn is None:
draw_fn = STATUSBAR_HT_header._draw_orig = STATUSBAR_HT_header.draw
if not (text is None or isinstance(text, str)):
draw_fn = text
text = None
self.status_text_set_internal(text)
STATUSBAR_HT_header.draw = draw_fn
class _GenericBone:
__slots__ = ()
def translate(self, vec):
self.head += vec
self.tail += vec
def parent_index(self, parent_test):
name = parent_test.name
parent = self.parent
i = 1
while parent:
if parent.name == name:
return i
parent = parent.parent
i += 1
return 0
@property
def x_axis(self):
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((1.0, 0.0, 0.0))
@property
def y_axis(self):
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 1.0, 0.0))
@property
def z_axis(self):
from mathutils import Vector
return self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
@property
def basename(self):
return self.name.split(".")[0]
@property
def parent_recursive(self):
parent_list = []
parent = self.parent
while parent:
if parent:
parent_list.append(parent)
parent = parent.parent
return parent_list
@property
def center(self):
return (self.head + self.tail) * 0.5
@property
def vector(self):
return (self.tail - self.head)
@property
def children(self):
return [child for child in self._other_bones if child.parent == self]
@property
def children_recursive(self):
bones_children = []
for bone in self._other_bones:
index = bone.parent_index(self)
if index:
bones_children.append((index, bone))
bones_children.sort(key=lambda bone_pair: bone_pair[0])
return [bone for index, bone in bones_children]
@property
def children_recursive_basename(self):
basename = self.basename
chain = []
child = self
while True:
children = child.children
children_basename = []
for child in children:
if basename == child.basename:
children_basename.append(child)
if len(children_basename) == 1:
child = children_basename[0]
chain.append(child)
else:
if children_basename:
print("multiple basenames found, "
"this is probably not what you want!",
self.name, children_basename)
break
return chain
@property
def _other_bones(self):
id_data = self.id_data
if isinstance(self, PoseBone):
return id_data.pose.bones
if isinstance(self, EditBone):
return id_data.edit_bones
if isinstance(self, Bone):
return id_data.bones
raise RuntimeError("Invalid type %r" % self)
class PoseBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
@property
def children(self):
obj = self.id_data
pbones = obj.pose.bones
self_bone = self.bone
return tuple(pbones[bone.name] for bone in obj.data.bones
if bone.parent == self_bone)
class Bone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
class EditBone(StructRNA, _GenericBone, metaclass=StructMetaPropGroup):
__slots__ = ()
def align_orientation(self, other):
vec = other.vector.normalized() * self.length
self.tail = self.head + vec
self.roll = other.roll
def transform(self, matrix, scale=True, roll=True):
from mathutils import Vector
z_vec = self.matrix.to_3x3() @ Vector((0.0, 0.0, 1.0))
self.tail = matrix @ self.tail
self.head = matrix @ self.head
if scale:
scalar = matrix.median_scale
self.head_radius *= scalar
self.tail_radius *= scalar
if roll:
self.align_roll(matrix @ z_vec)
def ord_ind(i1, i2):
if i1 < i2:
return i1, i2
return i2, i1
class Mesh(bpy_types.ID):
__slots__ = ()
def from_pydata(self, vertices, edges, faces):
from itertools import chain, islice, accumulate
face_lengths = tuple(map(len, faces))
self.vertices.add(len(vertices))
self.edges.add(len(edges))
self.loops.add(sum(face_lengths))
self.polygons.add(len(faces))
self.vertices.foreach_set("co", tuple(chain.from_iterable(vertices)))
self.edges.foreach_set("vertices", tuple(chain.from_iterable(edges)))
vertex_indices = tuple(chain.from_iterable(faces))
loop_starts = tuple(islice(chain([0], accumulate(face_lengths)), len(faces)))
self.polygons.foreach_set("loop_total", face_lengths)
self.polygons.foreach_set("loop_start", loop_starts)
self.polygons.foreach_set("vertices", vertex_indices)
if edges or faces:
self.update(
# Needed to either:
# - Calculate edges that don't exist for polygons.
calc_edges=bool(faces),
calc_edges_loose=bool(edges),
)
@property
def edge_keys(self):
return [ed.key for ed in self.edges]
class MeshEdge(StructRNA):
__slots__ = ()
@property
def key(self):
return ord_ind(*tuple(self.vertices))
class MeshLoopTriangle(StructRNA):
__slots__ = ()
@property
def center(self):
face_verts = self.vertices[:]
mesh_verts = self.id_data.vertices
return (
mesh_verts[face_verts[0]].co +
mesh_verts[face_verts[1]].co +
mesh_verts[face_verts[2]].co
) / 3.0
@property
def edge_keys(self):
verts = self.vertices[:]
return (
ord_ind(verts[0], verts[1]),
ord_ind(verts[1], verts[2]),
ord_ind(verts[2], verts[0]),
)
class MeshPolygon(StructRNA):
__slots__ = ()
@property
def edge_keys(self):
verts = self.vertices[:]
vlen = len(self.vertices)
return [ord_ind(verts[i], verts[(i + 1) % vlen]) for i in range(vlen)]
@property
def loop_indices(self):
start = self.loop_start
end = start + self.loop_total
return range(start, end)
class Text(bpy_types.ID):
__slots__ = ()
def as_string(self):
return "\n".join(line.body for line in self.lines)
def from_string(self, string):
self.clear()
self.write(string)
def as_module(self):
from os.path import splitext
from types import ModuleType
mod = ModuleType(splitext(self.name)[0])
exec(self.as_string(), mod.__dict__)
return mod
class Sound(bpy_types.ID):
__slots__ = ()
@property
def factory(self):
import aud
return aud._sound_from_pointer(self.as_pointer())
class RNAMeta(type):
@property
def is_registered(cls):
return "bl_rna" in cls.__dict__
class RNAMetaPropGroup(StructMetaPropGroup, RNAMeta):
pass
class Gizmo(StructRNA):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
from _bpy import (
_rna_gizmo_target_set_handler as target_set_handler,
_rna_gizmo_target_get_value as target_get_value,
_rna_gizmo_target_set_value as target_set_value,
_rna_gizmo_target_get_range as target_get_range,
)
def draw_custom_shape(self, shape, *, matrix=None, select_id=None):
import gpu
if matrix is None:
matrix = self.matrix_world
batch, shader = shape
shader.bind()
if select_id is not None:
gpu.select.load_id(select_id)
use_blend = False
else:
if self.is_highlight:
color = (*self.color_highlight, self.alpha_highlight)
else:
color = (*self.color, self.alpha)
shader.uniform_float("color", color)
use_blend = color[3] < 1.0
if use_blend:
from bgl import glEnable, glDisable, GL_BLEND
glEnable(GL_BLEND)
with gpu.matrix.push_pop():
gpu.matrix.multiply_matrix(matrix)
batch.draw()
if use_blend:
glDisable(GL_BLEND)
@staticmethod
def new_custom_shape(type, verts):
import gpu
from gpu.types import (
GPUBatch,
GPUVertBuf,
GPUVertFormat,
)
dims = len(verts[0])
if dims not in {2, 3}:
raise ValueError("Expected 2D or 3D vertex")
fmt = GPUVertFormat()
pos_id = fmt.attr_add(id="pos", comp_type='F32', len=dims, fetch_mode='FLOAT')
vbo = GPUVertBuf(len=len(verts), format=fmt)
vbo.attr_fill(id=pos_id, data=verts)
batch = GPUBatch(type=type, buf=vbo)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR' if dims == 3 else '2D_UNIFORM_COLOR')
batch.program_set(shader)
return (batch, shader)
class GizmoGroup(StructRNA):
__slots__ = ()
class Operator(StructRNA, metaclass=RNAMeta):
__slots__ = ()
def __getattribute__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return getattr(properties, attr)
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return setattr(properties, attr, value)
return super().__setattr__(attr, value)
def __delattr__(self, attr):
properties = StructRNA.path_resolve(self, "properties")
bl_rna = getattr(properties, "bl_rna", None)
if (bl_rna is not None) and (attr in bl_rna.properties):
return delattr(properties, attr)
return super().__delattr__(attr)
def as_keywords(self, ignore=()):
ignore = ignore + ("rna_type",)
return {attr: getattr(self, attr)
for attr in self.properties.rna_type.properties.keys()
if attr not in ignore}
class Macro(StructRNA):
__slots__ = ()
@classmethod
def define(self, opname):
from _bpy import ops
return ops.macro_define(self, opname)
class PropertyGroup(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
class RenderEngine(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class AddonPreferences(StructRNA, metaclass=RNAMeta):
__slots__ = ()
class _GenericUI:
__slots__ = ()
@classmethod
def _dyn_ui_initialize(cls):
draw_funcs = getattr(cls.draw, "_draw_funcs", None)
if draw_funcs is None:
def draw_ls(self, context):
operator_context_default = self.layout.operator_context
workspace = context.workspace
if workspace.use_filter_by_owner:
owner_names = {owner_id.name for owner_id in workspace.owner_ids}
else:
owner_names = None
for func in draw_ls._draw_funcs:
if not getattr(self, "bl_owner_use_filter", True):
pass
elif owner_names is not None:
owner_id = getattr(func, "_owner", None)
if owner_id is not None:
if func._owner not in owner_names:
continue
# the entire menu from drawing
try:
func(self, context)
except:
import traceback
traceback.print_exc()
self.layout.operator_context = operator_context_default
draw_funcs = draw_ls._draw_funcs = [cls.draw]
cls.draw = draw_ls
return draw_funcs
@staticmethod
def _dyn_owner_apply(draw_func):
from _bpy import _bl_owner_id_get
owner_id = _bl_owner_id_get()
if owner_id is not None:
draw_func._owner = owner_id
@classmethod
def is_extended(cls):
return bool(getattr(cls.draw, "_draw_funcs", None))
@classmethod
def append(cls, draw_func):
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.append(draw_func)
@classmethod
def prepend(cls, draw_func):
draw_funcs = cls._dyn_ui_initialize()
cls._dyn_owner_apply(draw_func)
draw_funcs.insert(0, draw_func)
@classmethod
def remove(cls, draw_func):
draw_funcs = cls._dyn_ui_initialize()
try:
draw_funcs.remove(draw_func)
except ValueError:
pass
class Panel(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class UIList(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Header(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
class Menu(StructRNA, _GenericUI, metaclass=RNAMeta):
__slots__ = ()
def path_menu(self, searchpaths, operator, *,
props_default=None, prop_filepath="filepath",
filter_ext=None, filter_path=None, display_name=None,
add_operator=None):
layout = self.layout
import os
import bpy.utils
layout = self.layout
if not searchpaths:
layout.label(text="* Missing Paths *")
# collect paths
files = []
for directory in searchpaths:
files.extend([
(f, os.path.join(directory, f))
for f in os.listdir(directory)
if (not f.startswith("."))
if ((filter_ext is None) or
(filter_ext(os.path.splitext(f)[1])))
if ((filter_path is None) or
(filter_path(f)))
])
files.sort()
col = layout.column(align=True)
for f, filepath in files:
# Intentionally pass the full path to 'display_name' callback,
# since the callback may want to use part a directory in the name.
row = col.row(align=True)
name = display_name(filepath) if display_name else bpy.path.display_name(f)
props = row.operator(
operator,
text=name,
translate=False,
)
if props_default is not None:
for attr, value in props_default.items():
setattr(props, attr, value)
setattr(props, prop_filepath, filepath)
if operator == "script.execute_preset":
props.menu_idname = self.bl_idname
if add_operator:
props = row.operator(add_operator, text="", icon='REMOVE')
props.name = name
props.remove_name = True
if add_operator:
wm = bpy.data.window_managers[0]
layout.separator()
row = layout.row()
sub = row.row()
sub.emboss = 'NORMAL'
sub.prop(wm, "preset_name", text="")
props = row.operator(add_operator, text="", icon='ADD')
props.name = wm.preset_name
def draw_preset(self, _context):
import bpy
ext_valid = getattr(self, "preset_extensions", {".py", ".xml"})
props_default = getattr(self, "preset_operator_defaults", None)
add_operator = getattr(self, "preset_add_operator", None)
self.path_menu(
bpy.utils.preset_paths(self.preset_subdir),
self.preset_operator,
props_default=props_default,
filter_ext=lambda ext: ext.lower() in ext_valid,
add_operator=add_operator,
)
@classmethod
def draw_collapsible(cls, context, layout):
# helper function for (optionally) collapsed header menus
# only usable within headers
if context.area.show_menus:
# Align menus to space them closely.
layout.row(align=True).menu_contents(cls.__name__)
else:
layout.menu(cls.__name__, icon='COLLAPSEMENU')
class NodeTree(bpy_types.ID, metaclass=RNAMetaPropGroup):
__slots__ = ()
class Node(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@classmethod
def poll(cls, _ntree):
return True
class NodeInternal(Node):
__slots__ = ()
class NodeSocket(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
@property
def links(self):
return tuple(
link for link in self.id_data.links
if (link.from_socket == self or
link.to_socket == self))
class NodeSocketInterface(StructRNA, metaclass=RNAMetaPropGroup):
__slots__ = ()
# These are intermediate subclasses, need a bpy type too
class CompositorNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'CompositorNodeTree'
def update(self):
self.tag_need_exec()
class ShaderNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'ShaderNodeTree'
class TextureNode(NodeInternal):
__slots__ = ()
@classmethod
def poll(cls, ntree):
return ntree.bl_idname == 'TextureNodeTree'
| true | true |
1c31eb7da047e246ff8503a95a8181e5e53cb3d3 | 102 | py | Python | utils/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | utils/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | utils/__init__.py | rizwan09/Tagger | 7622f10561a0f6074abde0c9c26a4f25405b204b | [
"BSD-3-Clause"
] | null | null | null | # __init__.py
# author: Playinf
# email: playinf@stu.xmu.edu.cn
from .parallel import parallel_model
| 17 | 36 | 0.764706 |
from .parallel import parallel_model
| true | true |
1c31ec3c6b93fee9e2dbf2f4584e4bd3c40726b0 | 1,525 | py | Python | cad_tickers/news/ceo/scrap.py | FriendlyUser/cad_tickers | 2f99a4494498419d8decf41fb0fbc77722dbc712 | [
"MIT"
] | 2 | 2022-03-16T02:19:25.000Z | 2022-03-16T02:22:39.000Z | cad_tickers/news/ceo/scrap.py | FriendlyUser/cad_tickers | 2f99a4494498419d8decf41fb0fbc77722dbc712 | [
"MIT"
] | 57 | 2020-07-30T15:43:43.000Z | 2022-03-28T02:04:13.000Z | cad_tickers/news/ceo/scrap.py | FriendlyUser/cad_tickers | 2f99a4494498419d8decf41fb0fbc77722dbc712 | [
"MIT"
] | null | null | null | import requests
import bs4
from bs4 import BeautifulSoup
from cad_tickers.news.ceo.utils import module_logger
def extract_article(article_url: str)-> bs4.element.Tag:
"""Extracts data from given ceo news url"""
r = requests.get(article_url)
if r == None:
module_logger.warning('No data returned from the url')
data = r.text
soup = BeautifulSoup(data, features="html.parser")
try:
scripts = [x.extract() for x in soup.findAll('script')]
except AttributeError as e:
module_logger.warning('No Scripts to Extract from article')
# print(soup.prettify())
article = soup.find(attrs={'class': 'article-body article'})
# Use this as a example
# remove image tags
if article == None:
article = soup.find(id='article')
if article == None:
article = soup.find(id='article-container')
try:
image_text = article.findAll(lambda tag : tag.name == 'span' and 'Click Image To View Full Size' in tag.text)
[x.extract() for x in image_text]
except AttributeError as e:
module_logger.warning('No Click Image to View Full Size text')
try:
images = [x.extract() for x in soup.findAll('img')]
except AttributeError as e:
module_logger.warning('No Images in news report')
return article
def save_bs4_tag(tag: bs4.element.Tag, file_name: str=''):
"""Save bs4 tag to file"""
with open(file_name, "w", encoding='utf-8') as f:
f.write(tag.text)
if __name__ == '__main__':
pass | 36.309524 | 117 | 0.656393 | import requests
import bs4
from bs4 import BeautifulSoup
from cad_tickers.news.ceo.utils import module_logger
def extract_article(article_url: str)-> bs4.element.Tag:
r = requests.get(article_url)
if r == None:
module_logger.warning('No data returned from the url')
data = r.text
soup = BeautifulSoup(data, features="html.parser")
try:
scripts = [x.extract() for x in soup.findAll('script')]
except AttributeError as e:
module_logger.warning('No Scripts to Extract from article')
article = soup.find(attrs={'class': 'article-body article'})
if article == None:
article = soup.find(id='article')
if article == None:
article = soup.find(id='article-container')
try:
image_text = article.findAll(lambda tag : tag.name == 'span' and 'Click Image To View Full Size' in tag.text)
[x.extract() for x in image_text]
except AttributeError as e:
module_logger.warning('No Click Image to View Full Size text')
try:
images = [x.extract() for x in soup.findAll('img')]
except AttributeError as e:
module_logger.warning('No Images in news report')
return article
def save_bs4_tag(tag: bs4.element.Tag, file_name: str=''):
with open(file_name, "w", encoding='utf-8') as f:
f.write(tag.text)
if __name__ == '__main__':
pass | true | true |
1c31ed135cafc524fbead487f5bdf0e5b5daaa3e | 18,480 | py | Python | src_convertors/simple_convertors/analyzer.py | mansi-team/mansi_corpus | 369c71b8bb9e4d19999a88edf31d0017069d0d40 | [
"MIT"
] | null | null | null | src_convertors/simple_convertors/analyzer.py | mansi-team/mansi_corpus | 369c71b8bb9e4d19999a88edf31d0017069d0d40 | [
"MIT"
] | 1 | 2021-02-07T17:52:22.000Z | 2021-02-07T17:52:22.000Z | src_convertors/simple_convertors/analyzer.py | mansi-team/mansi_corpus | 369c71b8bb9e4d19999a88edf31d0017069d0d40 | [
"MIT"
] | 1 | 2020-06-13T06:44:22.000Z | 2020-06-13T06:44:22.000Z | import re
import copy
import os
class DumbMorphParser:
"""
Contains methods that add context-independent word-level
morhological information from a parsed word list to a
collection of JSON sentences. No actual parsing takes
place here.
"""
rxWordsRNC = re.compile('<w>(<ana.*?/(?:ana)?>)([^<>]+)</w>', flags=re.DOTALL)
rxAnalysesRNC = re.compile('<ana *([^<>]+)(?:></ana>|/>)\\s*')
rxAnaFieldRNC = re.compile('([^ <>"=]+) *= *"([^<>"]+)')
rxSplitGramTags = re.compile('[,, /=]')
rxHyphenParts = re.compile('[^\\-]+|-+')
rxGlossParts = re.compile('[^ \\-=<>]+')
rxGlossIndexPart = re.compile('^(.*)\\{(.*?)\\}')
rxBracketGloss = re.compile('[.-]?\\[.*?\\]')
def __init__(self, settings, categories, errorLog=''):
self.settings = copy.deepcopy(settings)
self.categories = copy.deepcopy(categories)
self.rxAllGlosses = self.prepare_gloss_regex()
self.analyses = {}
self.errorLog = errorLog
self.grammRules = []
if 'multivalued_ana_features' in self.settings:
self.settings['multivalued_ana_features'] = set(self.settings['multivalued_ana_features'])
else:
self.settings['multivalued_ana_features'] = set()
if 'gramtags_exclude' in self.settings:
self.settings['gramtags_exclude'] = set(self.settings['gramtags_exclude'])
else:
self.settings['gramtags_exclude'] = set()
if ('parsed_wordlist_filename' in self.settings
and len(self.settings['parsed_wordlist_filename']) > 0):
if type(self.settings['parsed_wordlist_filename']) == str:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename']))
else:
for language in self.settings['parsed_wordlist_filename']:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename'][language]),
language)
self.load_rules()
def load_rules(self):
"""
Load rules for converting the glosses into bags of grammatical
tags.
"""
self.load_gramm_rules(os.path.join(self.settings['corpus_dir'], 'conf/gramRules.txt'))
@staticmethod
def prepare_rule(rule):
"""
Make a compiled regex out of a rule represented as a string.
"""
def replReg(s):
if "'" in s:
return ''
return ' re.search(\'' + s + \
'\', ana[\'parts\']) is not None or ' + \
're.search(\'' + s + \
'\', ana[\'gloss\']) is not None '
ruleParts = rule.split('"')
rule = ''
for i in range(len(ruleParts)):
if i % 2 == 0:
rule += re.sub('([^\\[\\]~|& \t\']+)', ' \'\\1\' in tagsAndGlosses ',
ruleParts[i]).replace('|', ' or ').replace('&', ' and ') \
.replace('~', ' not ').replace('[', '(').replace(']', ')')
else:
rule += replReg(ruleParts[i])
return rule
def load_gramm_rules(self, fname):
"""
Load main set of rules for converting the glosses into bags
of grammatical tags.
"""
if len(fname) <= 0 or not os.path.isfile(fname):
return
rules = []
f = open(fname, 'r', encoding='utf-8-sig')
for line in f:
line = re.sub('#.*', '', line).strip()
if len(line) > 0:
rule = [i.strip() for i in line.split('->')]
if len(rule) != 2:
continue
rule[1] = set(rule[1].split(','))
rule[0] = self.prepare_rule(rule[0])
rules.append(rule)
f.close()
self.grammRules = rules
def log_message(self, message):
"""
If the filename of the error log is not empty, append
the message to the file.
"""
if self.errorLog is None or len(self.errorLog) <= 0:
return
try:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write(message + '\n')
fLog.close()
except:
return
def load_analyses(self, fname, lang=''):
"""
Load parsed word list from a file.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
self.analyses[lang] = {}
try:
f = open(fname, 'r', encoding='utf-8-sig')
text = f.read()
f.close()
if self.settings['parsed_wordlist_format'] == 'xml_rnc':
self.load_analyses_xml_rnc(text, lang=lang)
except FileNotFoundError:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write('File not found: ' + fname + '\n')
fLog.close()
def transform_gramm_str(self, grStr, lang=''):
"""
Transform a string with gramtags into a JSON object.
"""
grJSON = {}
grTags = self.rxSplitGramTags.split(grStr)
for tag in grTags:
if len(tag.strip()) <= 0:
continue
if tag in self.settings['gramtags_exclude']:
continue
if tag not in self.categories[lang]:
print('No category for a gramtag:', tag, ', language:', lang)
continue
cat = 'gr.' + self.categories[lang][tag]
if cat not in grJSON:
grJSON[cat] = tag
else:
if type(grJSON[cat]) != list:
grJSON[cat] = [grJSON[cat]]
if tag not in grJSON[cat]:
grJSON[cat].append(tag)
return grJSON
def prepare_gloss_regex(self):
"""
Return a regex that finds all glosses.
"""
regexes = {}
for lang in self.settings['languages']:
if lang not in self.categories:
self.categories[lang] = {}
if 'glosses' in self.settings and lang in self.settings['glosses']:
sRegex = '|'.join(re.escape(g) for g in sorted(self.settings['glosses'][lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex)
else:
sRegex = '|'.join(re.escape(g) for g in sorted(self.categories[lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex, flags=re.I)
return regexes
def gloss2gr(self, ana, lang, useGlossList=False):
"""
For an analysis that has glosses, but no tags for inflectional
categories, add these categories.
If useGlossList, use the list of glosses to distinguish between
glosses and stem translations. In the opposite case, consider
everyjting other than "STEM" a gloss.
"""
# TODO: Add rules for translating the glosses into tags.
if 'gloss_index' not in ana:
return
if useGlossList:
glosses = self.rxAllGlosses[lang].findall(ana['gloss_index'])
else:
glosses = [self.rxGlossIndexPart.search(g).group(1)
for g in self.rxGlossParts.findall(ana['gloss_index'])]
if 'glosses_covert' in ana:
glosses += ana['glosses_covert']
del ana['glosses_covert']
addedGrammTags = set()
tagsAndGlosses = set()
for field in ana:
if field.startswith('gr.'):
if type(ana[field]) == str:
tagsAndGlosses.add(ana[field])
elif type(ana[field]) == list:
tagsAndGlosses |= set(ana[field])
tagsAndGlosses |= set(gl.strip('-=:.<>') for gl in glosses)
if len(self.grammRules) > 0:
for rule in self.grammRules:
if eval(rule[0]):
addedGrammTags |= rule[1]
else:
for gl in glosses:
if gl.upper() == gl:
gl = gl.lower()
addedGrammTags.add(gl)
# print(list(addedGrammTags), list(tagsAndGlosses))
for tag in addedGrammTags:
if tag in self.categories[lang]:
anaCatName = 'gr.' + self.categories[lang][tag]
if anaCatName not in ana:
ana[anaCatName] = tag
else:
if type(ana[anaCatName]) == str:
ana[anaCatName] = [ana[anaCatName], tag]
elif tag not in ana[field]:
ana[anaCatName].append(tag)
def find_stems(self, glossIndex, lang):
"""
Return all glosses that are not in the categories list, and
therefore are the glosses for the stem.
"""
stems = []
newIndexGloss = ''
for glossPart in glossIndex.split('-'):
if len(glossPart) <= 0:
continue
m = self.rxGlossIndexPart.search(glossPart)
if m is None:
newIndexGloss += glossPart + '-'
continue
gloss, part = m.group(1), m.group(2)
if self.rxAllGlosses[lang].match(gloss) is None:
stems.append((gloss, part))
newIndexGloss += 'STEM{' + part + '}-'
else:
newIndexGloss += glossPart + '-'
return stems, newIndexGloss
def process_gloss_in_ana(self, ana, gloss_lang=''):
"""
If there are fields 'gloss' and 'parts' in the JSON
analysis, add field 'gloss_index' that contains the
glossed word in such a form that it could be queried
with the gloss query language.
If gloss_lang is not empty, look in fields "gloss_%gloss_lang%"
etc. instead of just "gloss". This may be needed if
there are glosses in more than one metalanguage.
Modify the source analysis, do not return anything.
"""
if len(gloss_lang) > 0:
gloss_lang = '_' + gloss_lang
if 'gloss' + gloss_lang not in ana or 'parts' not in ana:
return
wordParts = self.rxGlossParts.findall(ana['parts'].replace('{', '(').replace('{', ')').replace(' ', '.'))
glosses = self.rxGlossParts.findall(ana['gloss' + gloss_lang])
glossesOvert = [g for g in glosses if self.rxBracketGloss.search(g) is None]
glossesCovert = [g.strip('[]') for g in glosses if self.rxBracketGloss.search(g) is not None]
if len(wordParts) <= 0 or len(glosses) == 0 or len(wordParts) != len(glossesOvert):
self.log_message('Wrong gloss or partitioning: ' + ana['parts'] + ' != ' + ana['gloss' + gloss_lang])
return
glossIndex = '-'.join(p[1] + '{' + p[0] + '}'
for p in zip(wordParts, glossesOvert)) + '-'
ana['gloss_index' + gloss_lang] = glossIndex
if len(glossesCovert) > 0:
ana['glosses_covert' + gloss_lang] = glossesCovert
def transform_ana_rnc(self, ana, lang=''):
"""
Transform analyses for a single word, written in the XML
format used in Russian National Corpus, into a JSON object.
"""
setAna = set(self.rxAnalysesRNC.findall(ana.replace('\t', '')))
analyses = []
for ana in setAna:
fields = self.rxAnaFieldRNC.findall(ana)
if len(fields) <= 0:
continue
anaJSON = {}
for k, v in fields:
if k == 'gr':
anaJSON.update(self.transform_gramm_str(v, lang=lang))
elif k in self.settings['multivalued_ana_features']:
anaJSON[k] = [tag.strip() for tag in v.split()]
else:
anaJSON[k] = v
self.process_gloss_in_ana(anaJSON)
analyses.append(anaJSON)
return analyses
def load_analyses_xml_rnc(self, text, lang=''):
"""
Load analyses from a string in the XML format used
in Russian National Corpus.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
# there can be several languages if the corpus is parallel
analyses = self.rxWordsRNC.findall(text)
if lang not in self.analyses:
self.analyses[lang] = {}
iAna = 1
print('Loading analyses...')
for ana in analyses:
if iAna % 20000 == 0:
print('Loading analysis #' + str(iAna))
word = ana[1].strip('$&^#%*·;·‒–—―•…‘’‚“‛”„‟"\'')
if len(word) <= 0:
continue
if iAna <= 50000: # We assume the analyses are ordered by word frequency
ana = self.transform_ana_rnc(ana[0], lang=lang)
else:
ana = ana[0] # Avoid huge memory consumption at the expense of time
if word not in self.analyses[lang]:
self.analyses[lang][word] = ana
iAna += 1
print('Analyses for', len(self.analyses[lang]), 'different words loaded.')
def normalize(self, word):
"""
Normalize a word before searching for it in the list of analyses.
"""
word = word.strip().lower()
if 'char_replacements' in self.settings:
wordClean = ''
for c in word:
if c in self.settings['char_replacements']:
wordClean += self.settings['char_replacements'][c]
else:
wordClean += c
word = wordClean
return word
def analyze_word(self, wf, lang=''):
if lang not in self.analyses:
return []
if wf not in self.analyses[lang] and (wf.startswith('-') or wf.endswith('-')):
wf = wf.strip('-')
if wf in self.analyses[lang]:
ana = self.analyses[lang][wf]
if type(ana) == str and self.settings['parsed_wordlist_format'] == 'xml_rnc':
analyses = self.transform_ana_rnc(ana, lang=lang)
else:
analyses = copy.deepcopy(self.analyses[lang][wf])
else:
analyses = []
return analyses
def analyze_hyphened_word(self, words, iWord, lang=''):
"""
Try to analyze a word that contains a hyphen but could
not be analyzed as a whole. Split the word in several,
if needed.
"""
word = words[iWord]
parts = self.rxHyphenParts.findall(word['wf'])
partAnalyses = []
for iPart in range(len(parts)):
if parts[iPart].startswith('-'):
partAnalyses.append(None)
continue
wfPart = self.normalize(parts[iPart])
if iPart > 0:
wfPart = '-' + wfPart
if iPart < len(parts) - 1:
wfPart += '-'
partAna = self.analyze_word(wfPart, lang)
partAnalyses.append(partAna)
if any(pa is not None and len(pa) > 0 for pa in partAnalyses):
offStart = word['off_start']
newWords = [copy.deepcopy(word) for i in range(len(partAnalyses))]
for i in range(len(newWords)):
newWords[i]['wf'] = parts[i]
newWords[i]['off_start'] = offStart
offStart += len(newWords[i]['wf'])
newWords[i]['off_end'] = offStart
if i < len(newWords) - 1:
newWords[i]['next_word'] = iWord + i + 1
else:
newWords[i]['next_word'] += len(newWords) - 1
if newWords[i]['wf'].startswith('-'):
newWords[i]['wtype'] = 'punct'
else:
newWords[i]['ana'] = partAnalyses[i]
words.pop(iWord)
for i in range(len(words)):
if words[i]['next_word'] > iWord:
words[i]['next_word'] += len(newWords) - 1
for i in range(len(newWords)):
words.insert(iWord + i, newWords[i])
# print(words)
return len(newWords) - 1
return 0
def analyze_sentence(self, s, lang=''):
"""
Analyze each word in one sentence using preloaded analyses.
Return statistics.
"""
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
if 'words' not in s:
return 0, 0, 0
iWord = -1
while iWord < len(s['words']) - 1:
iWord += 1
nTokens += 1
word = s['words'][iWord]
if word['wtype'] != 'word':
continue
nWords += 1
wf = self.normalize(word['wf'])
analyses = self.analyze_word(wf, lang)
if len(analyses) > 0:
word['ana'] = analyses
nAnalyzed += 1
elif '-' in word['wf']:
iWord += self.analyze_hyphened_word(s['words'], iWord, lang)
return nTokens, nWords, nAnalyzed
def analyze(self, sentences, lang=''):
"""
Analyze each word in each sentence using preloaded analyses.
Return statistics.
"""
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
for s in sentences:
nTokensCur, nWordsCur, nAnalyzedCur = self.analyze_sentence(s, lang)
nTokens += nTokensCur
nWords += nWordsCur
nAnalyzed += nAnalyzedCur
return nTokens, nWords, nAnalyzed
| 40.526316 | 113 | 0.516126 | import re
import copy
import os
class DumbMorphParser:
rxWordsRNC = re.compile('<w>(<ana.*?/(?:ana)?>)([^<>]+)</w>', flags=re.DOTALL)
rxAnalysesRNC = re.compile('<ana *([^<>]+)(?:></ana>|/>)\\s*')
rxAnaFieldRNC = re.compile('([^ <>"=]+) *= *"([^<>"]+)')
rxSplitGramTags = re.compile('[,, /=]')
rxHyphenParts = re.compile('[^\\-]+|-+')
rxGlossParts = re.compile('[^ \\-=<>]+')
rxGlossIndexPart = re.compile('^(.*)\\{(.*?)\\}')
rxBracketGloss = re.compile('[.-]?\\[.*?\\]')
def __init__(self, settings, categories, errorLog=''):
self.settings = copy.deepcopy(settings)
self.categories = copy.deepcopy(categories)
self.rxAllGlosses = self.prepare_gloss_regex()
self.analyses = {}
self.errorLog = errorLog
self.grammRules = []
if 'multivalued_ana_features' in self.settings:
self.settings['multivalued_ana_features'] = set(self.settings['multivalued_ana_features'])
else:
self.settings['multivalued_ana_features'] = set()
if 'gramtags_exclude' in self.settings:
self.settings['gramtags_exclude'] = set(self.settings['gramtags_exclude'])
else:
self.settings['gramtags_exclude'] = set()
if ('parsed_wordlist_filename' in self.settings
and len(self.settings['parsed_wordlist_filename']) > 0):
if type(self.settings['parsed_wordlist_filename']) == str:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename']))
else:
for language in self.settings['parsed_wordlist_filename']:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename'][language]),
language)
self.load_rules()
def load_rules(self):
self.load_gramm_rules(os.path.join(self.settings['corpus_dir'], 'conf/gramRules.txt'))
@staticmethod
def prepare_rule(rule):
def replReg(s):
if "'" in s:
return ''
return ' re.search(\'' + s + \
'\', ana[\'parts\']) is not None or ' + \
're.search(\'' + s + \
'\', ana[\'gloss\']) is not None '
ruleParts = rule.split('"')
rule = ''
for i in range(len(ruleParts)):
if i % 2 == 0:
rule += re.sub('([^\\[\\]~|& \t\']+)', ' \'\\1\' in tagsAndGlosses ',
ruleParts[i]).replace('|', ' or ').replace('&', ' and ') \
.replace('~', ' not ').replace('[', '(').replace(']', ')')
else:
rule += replReg(ruleParts[i])
return rule
def load_gramm_rules(self, fname):
if len(fname) <= 0 or not os.path.isfile(fname):
return
rules = []
f = open(fname, 'r', encoding='utf-8-sig')
for line in f:
line = re.sub('#.*', '', line).strip()
if len(line) > 0:
rule = [i.strip() for i in line.split('->')]
if len(rule) != 2:
continue
rule[1] = set(rule[1].split(','))
rule[0] = self.prepare_rule(rule[0])
rules.append(rule)
f.close()
self.grammRules = rules
def log_message(self, message):
if self.errorLog is None or len(self.errorLog) <= 0:
return
try:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write(message + '\n')
fLog.close()
except:
return
def load_analyses(self, fname, lang=''):
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
self.analyses[lang] = {}
try:
f = open(fname, 'r', encoding='utf-8-sig')
text = f.read()
f.close()
if self.settings['parsed_wordlist_format'] == 'xml_rnc':
self.load_analyses_xml_rnc(text, lang=lang)
except FileNotFoundError:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write('File not found: ' + fname + '\n')
fLog.close()
def transform_gramm_str(self, grStr, lang=''):
grJSON = {}
grTags = self.rxSplitGramTags.split(grStr)
for tag in grTags:
if len(tag.strip()) <= 0:
continue
if tag in self.settings['gramtags_exclude']:
continue
if tag not in self.categories[lang]:
print('No category for a gramtag:', tag, ', language:', lang)
continue
cat = 'gr.' + self.categories[lang][tag]
if cat not in grJSON:
grJSON[cat] = tag
else:
if type(grJSON[cat]) != list:
grJSON[cat] = [grJSON[cat]]
if tag not in grJSON[cat]:
grJSON[cat].append(tag)
return grJSON
def prepare_gloss_regex(self):
regexes = {}
for lang in self.settings['languages']:
if lang not in self.categories:
self.categories[lang] = {}
if 'glosses' in self.settings and lang in self.settings['glosses']:
sRegex = '|'.join(re.escape(g) for g in sorted(self.settings['glosses'][lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex)
else:
sRegex = '|'.join(re.escape(g) for g in sorted(self.categories[lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex, flags=re.I)
return regexes
def gloss2gr(self, ana, lang, useGlossList=False):
if 'gloss_index' not in ana:
return
if useGlossList:
glosses = self.rxAllGlosses[lang].findall(ana['gloss_index'])
else:
glosses = [self.rxGlossIndexPart.search(g).group(1)
for g in self.rxGlossParts.findall(ana['gloss_index'])]
if 'glosses_covert' in ana:
glosses += ana['glosses_covert']
del ana['glosses_covert']
addedGrammTags = set()
tagsAndGlosses = set()
for field in ana:
if field.startswith('gr.'):
if type(ana[field]) == str:
tagsAndGlosses.add(ana[field])
elif type(ana[field]) == list:
tagsAndGlosses |= set(ana[field])
tagsAndGlosses |= set(gl.strip('-=:.<>') for gl in glosses)
if len(self.grammRules) > 0:
for rule in self.grammRules:
if eval(rule[0]):
addedGrammTags |= rule[1]
else:
for gl in glosses:
if gl.upper() == gl:
gl = gl.lower()
addedGrammTags.add(gl)
for tag in addedGrammTags:
if tag in self.categories[lang]:
anaCatName = 'gr.' + self.categories[lang][tag]
if anaCatName not in ana:
ana[anaCatName] = tag
else:
if type(ana[anaCatName]) == str:
ana[anaCatName] = [ana[anaCatName], tag]
elif tag not in ana[field]:
ana[anaCatName].append(tag)
def find_stems(self, glossIndex, lang):
stems = []
newIndexGloss = ''
for glossPart in glossIndex.split('-'):
if len(glossPart) <= 0:
continue
m = self.rxGlossIndexPart.search(glossPart)
if m is None:
newIndexGloss += glossPart + '-'
continue
gloss, part = m.group(1), m.group(2)
if self.rxAllGlosses[lang].match(gloss) is None:
stems.append((gloss, part))
newIndexGloss += 'STEM{' + part + '}-'
else:
newIndexGloss += glossPart + '-'
return stems, newIndexGloss
def process_gloss_in_ana(self, ana, gloss_lang=''):
if len(gloss_lang) > 0:
gloss_lang = '_' + gloss_lang
if 'gloss' + gloss_lang not in ana or 'parts' not in ana:
return
wordParts = self.rxGlossParts.findall(ana['parts'].replace('{', '(').replace('{', ')').replace(' ', '.'))
glosses = self.rxGlossParts.findall(ana['gloss' + gloss_lang])
glossesOvert = [g for g in glosses if self.rxBracketGloss.search(g) is None]
glossesCovert = [g.strip('[]') for g in glosses if self.rxBracketGloss.search(g) is not None]
if len(wordParts) <= 0 or len(glosses) == 0 or len(wordParts) != len(glossesOvert):
self.log_message('Wrong gloss or partitioning: ' + ana['parts'] + ' != ' + ana['gloss' + gloss_lang])
return
glossIndex = '-'.join(p[1] + '{' + p[0] + '}'
for p in zip(wordParts, glossesOvert)) + '-'
ana['gloss_index' + gloss_lang] = glossIndex
if len(glossesCovert) > 0:
ana['glosses_covert' + gloss_lang] = glossesCovert
def transform_ana_rnc(self, ana, lang=''):
setAna = set(self.rxAnalysesRNC.findall(ana.replace('\t', '')))
analyses = []
for ana in setAna:
fields = self.rxAnaFieldRNC.findall(ana)
if len(fields) <= 0:
continue
anaJSON = {}
for k, v in fields:
if k == 'gr':
anaJSON.update(self.transform_gramm_str(v, lang=lang))
elif k in self.settings['multivalued_ana_features']:
anaJSON[k] = [tag.strip() for tag in v.split()]
else:
anaJSON[k] = v
self.process_gloss_in_ana(anaJSON)
analyses.append(anaJSON)
return analyses
def load_analyses_xml_rnc(self, text, lang=''):
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
analyses = self.rxWordsRNC.findall(text)
if lang not in self.analyses:
self.analyses[lang] = {}
iAna = 1
print('Loading analyses...')
for ana in analyses:
if iAna % 20000 == 0:
print('Loading analysis #' + str(iAna))
word = ana[1].strip('$&^#%*·;·‒–—―•…‘’‚“‛”„‟"\'')
if len(word) <= 0:
continue
if iAna <= 50000: # We assume the analyses are ordered by word frequency
ana = self.transform_ana_rnc(ana[0], lang=lang)
else:
ana = ana[0] # Avoid huge memory consumption at the expense of time
if word not in self.analyses[lang]:
self.analyses[lang][word] = ana
iAna += 1
print('Analyses for', len(self.analyses[lang]), 'different words loaded.')
def normalize(self, word):
word = word.strip().lower()
if 'char_replacements' in self.settings:
wordClean = ''
for c in word:
if c in self.settings['char_replacements']:
wordClean += self.settings['char_replacements'][c]
else:
wordClean += c
word = wordClean
return word
def analyze_word(self, wf, lang=''):
if lang not in self.analyses:
return []
if wf not in self.analyses[lang] and (wf.startswith('-') or wf.endswith('-')):
wf = wf.strip('-')
if wf in self.analyses[lang]:
ana = self.analyses[lang][wf]
if type(ana) == str and self.settings['parsed_wordlist_format'] == 'xml_rnc':
analyses = self.transform_ana_rnc(ana, lang=lang)
else:
analyses = copy.deepcopy(self.analyses[lang][wf])
else:
analyses = []
return analyses
def analyze_hyphened_word(self, words, iWord, lang=''):
word = words[iWord]
parts = self.rxHyphenParts.findall(word['wf'])
partAnalyses = []
for iPart in range(len(parts)):
if parts[iPart].startswith('-'):
partAnalyses.append(None)
continue
wfPart = self.normalize(parts[iPart])
if iPart > 0:
wfPart = '-' + wfPart
if iPart < len(parts) - 1:
wfPart += '-'
partAna = self.analyze_word(wfPart, lang)
partAnalyses.append(partAna)
if any(pa is not None and len(pa) > 0 for pa in partAnalyses):
offStart = word['off_start']
newWords = [copy.deepcopy(word) for i in range(len(partAnalyses))]
for i in range(len(newWords)):
newWords[i]['wf'] = parts[i]
newWords[i]['off_start'] = offStart
offStart += len(newWords[i]['wf'])
newWords[i]['off_end'] = offStart
if i < len(newWords) - 1:
newWords[i]['next_word'] = iWord + i + 1
else:
newWords[i]['next_word'] += len(newWords) - 1
if newWords[i]['wf'].startswith('-'):
newWords[i]['wtype'] = 'punct'
else:
newWords[i]['ana'] = partAnalyses[i]
words.pop(iWord)
for i in range(len(words)):
if words[i]['next_word'] > iWord:
words[i]['next_word'] += len(newWords) - 1
for i in range(len(newWords)):
words.insert(iWord + i, newWords[i])
# print(words)
return len(newWords) - 1
return 0
def analyze_sentence(self, s, lang=''):
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
if 'words' not in s:
return 0, 0, 0
iWord = -1
while iWord < len(s['words']) - 1:
iWord += 1
nTokens += 1
word = s['words'][iWord]
if word['wtype'] != 'word':
continue
nWords += 1
wf = self.normalize(word['wf'])
analyses = self.analyze_word(wf, lang)
if len(analyses) > 0:
word['ana'] = analyses
nAnalyzed += 1
elif '-' in word['wf']:
iWord += self.analyze_hyphened_word(s['words'], iWord, lang)
return nTokens, nWords, nAnalyzed
def analyze(self, sentences, lang=''):
nTokens, nWords, nAnalyzed = 0, 0, 0
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
for s in sentences:
nTokensCur, nWordsCur, nAnalyzedCur = self.analyze_sentence(s, lang)
nTokens += nTokensCur
nWords += nWordsCur
nAnalyzed += nAnalyzedCur
return nTokens, nWords, nAnalyzed
| true | true |
1c31ef0bec97c351e5a257803f09682c31dad000 | 5,653 | py | Python | plotly/graph_objs/surface/contours/x/_project.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/surface/contours/x/_project.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | null | null | null | plotly/graph_objs/surface/contours/x/_project.py | omridanan/plotly.py | a8d26670cba49ce15ce9b7639ae0f55a6088a825 | [
"MIT"
] | 1 | 2019-02-18T04:12:56.000Z | 2019-02-18T04:12:56.000Z | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Project(BaseTraceHierarchyType):
# x
# -
@property
def x(self):
"""
Determines whether or not these contour lines are projected on
the x plane. If `highlight` is set to *true* (the default), the
projected lines are shown on hover. If `show` is set to *true*,
the projected lines are shown in permanence.
The 'x' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
Determines whether or not these contour lines are projected on
the y plane. If `highlight` is set to *true* (the default), the
projected lines are shown on hover. If `show` is set to *true*,
the projected lines are shown in permanence.
The 'y' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# z
# -
@property
def z(self):
"""
Determines whether or not these contour lines are projected on
the z plane. If `highlight` is set to *true* (the default), the
projected lines are shown on hover. If `show` is set to *true*,
the projected lines are shown in permanence.
The 'z' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['z']
@z.setter
def z(self, val):
self['z'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'surface.contours.x'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Project object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.surface.contours.x.Project
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
Returns
-------
Project
"""
super(Project, self).__init__('project')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.contours.x.Project
constructor must be a dict or
an instance of plotly.graph_objs.surface.contours.x.Project"""
)
# Import validators
# -----------------
from plotly.validators.surface.contours.x import (project as v_project)
# Initialize validators
# ---------------------
self._validators['x'] = v_project.XValidator()
self._validators['y'] = v_project.YValidator()
self._validators['z'] = v_project.ZValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('x', None)
self.x = x if x is not None else _v
_v = arg.pop('y', None)
self.y = y if y is not None else _v
_v = arg.pop('z', None)
self.z = z if z is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| 31.405556 | 79 | 0.546082 | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Project(BaseTraceHierarchyType):
@property
def x(self):
return self['x']
@x.setter
def x(self, val):
self['x'] = val
@property
def y(self):
return self['y']
@y.setter
def y(self, val):
self['y'] = val
@property
def z(self):
return self['z']
@z.setter
def z(self, val):
self['z'] = val
@property
def _parent_path_str(self):
return 'surface.contours.x'
@property
def _prop_descriptions(self):
return """\
x
Determines whether or not these contour lines are
projected on the x plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
y
Determines whether or not these contour lines are
projected on the y plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
z
Determines whether or not these contour lines are
projected on the z plane. If `highlight` is set to
*true* (the default), the projected lines are shown on
hover. If `show` is set to *true*, the projected lines
are shown in permanence.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
super(Project, self).__init__('project')
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.contours.x.Project
constructor must be a dict or
an instance of plotly.graph_objs.surface.contours.x.Project"""
)
from plotly.validators.surface.contours.x import (project as v_project)
self._validators['x'] = v_project.XValidator()
self._validators['y'] = v_project.YValidator()
self._validators['z'] = v_project.ZValidator()
_v = arg.pop('x', None)
self.x = x if x is not None else _v
_v = arg.pop('y', None)
self.y = y if y is not None else _v
_v = arg.pop('z', None)
self.z = z if z is not None else _v
self._process_kwargs(**dict(arg, **kwargs))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.