hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf325a9d54262873c1277957b37b029b410cdff | 15,867 | py | Python | tests/test_build_epub.py | hassoon1986/sphinx | 6bdac80b5fb75641f7cd6f4b8c47bf6241360e2c | [
"BSD-2-Clause"
] | 1 | 2019-02-11T21:16:22.000Z | 2019-02-11T21:16:22.000Z | tests/test_build_epub.py | hassoon1986/sphinx | 6bdac80b5fb75641f7cd6f4b8c47bf6241360e2c | [
"BSD-2-Clause"
] | null | null | null | tests/test_build_epub.py | hassoon1986/sphinx | 6bdac80b5fb75641f7cd6f4b8c47bf6241360e2c | [
"BSD-2-Clause"
] | null | null | null | """
test_build_html
~~~~~~~~~~~~~~~
Test the HTML builder and check output against XPath.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import subprocess
from subprocess import CalledProcessError, PIPE
from xml.etree import ElementTree
import pytest
from sphinx.util import docutils
# check given command is runnable
def runnable(command):
try:
subprocess.run(command, stdout=PIPE, stderr=PIPE, check=True)
return True
except (OSError, CalledProcessError):
return False # command not found or exit with non-zero
class EPUBElementTree:
"""Test helper for content.opf and toc.ncx"""
namespaces = {
'idpf': 'http://www.idpf.org/2007/opf',
'dc': 'http://purl.org/dc/elements/1.1/',
'ibooks': 'http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/',
'ncx': 'http://www.daisy.org/z3986/2005/ncx/',
'xhtml': 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops'
}
def __init__(self, tree):
self.tree = tree
@classmethod
def fromstring(cls, string):
return cls(ElementTree.fromstring(string))
def find(self, match):
ret = self.tree.find(match, namespaces=self.namespaces)
if ret is not None:
return self.__class__(ret)
else:
return ret
def findall(self, match):
ret = self.tree.findall(match, namespaces=self.namespaces)
return [self.__class__(e) for e in ret]
def __getattr__(self, name):
return getattr(self.tree, name)
def __iter__(self):
for child in self.tree:
yield self.__class__(child)
@pytest.mark.sphinx('epub', testroot='basic')
def test_build_epub(app):
app.build()
assert (app.outdir / 'mimetype').text() == 'application/epub+zip'
assert (app.outdir / 'META-INF' / 'container.xml').exists()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').text())
assert toc.find("./ncx:docTitle/ncx:text").text == 'Python documentation'
# toc.ncx / head
meta = list(toc.find("./ncx:head"))
assert meta[0].attrib == {'name': 'dtb:uid', 'content': 'unknown'}
assert meta[1].attrib == {'name': 'dtb:depth', 'content': '1'}
assert meta[2].attrib == {'name': 'dtb:totalPageCount', 'content': '0'}
assert meta[3].attrib == {'name': 'dtb:maxPageNumber', 'content': '0'}
# toc.ncx / navMap
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 1
assert navpoints[0].attrib == {'id': 'navPoint1', 'playOrder': '1'}
assert navpoints[0].find("./ncx:content").attrib == {'src': 'index.xhtml'}
navlabel = navpoints[0].find("./ncx:navLabel/ncx:text")
assert navlabel.text == 'The basic Sphinx documentation for testing'
# content.opf
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
# content.opf / metadata
metadata = opf.find("./idpf:metadata")
assert metadata.find("./dc:language").text == 'en'
assert metadata.find("./dc:title").text == 'Python documentation'
assert metadata.find("./dc:description").text == 'unknown'
assert metadata.find("./dc:creator").text == 'unknown'
assert metadata.find("./dc:contributor").text == 'unknown'
assert metadata.find("./dc:publisher").text == 'unknown'
assert metadata.find("./dc:rights").text is None
assert metadata.find("./idpf:meta[@property='ibooks:version']").text is None
assert metadata.find("./idpf:meta[@property='ibooks:specified-fonts']").text == 'true'
assert metadata.find("./idpf:meta[@property='ibooks:binding']").text == 'true'
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
# content.opf / manifest
manifest = opf.find("./idpf:manifest")
items = list(manifest)
assert items[0].attrib == {'id': 'ncx',
'href': 'toc.ncx',
'media-type': 'application/x-dtbncx+xml'}
assert items[1].attrib == {'id': 'nav',
'href': 'nav.xhtml',
'media-type': 'application/xhtml+xml',
'properties': 'nav'}
assert items[2].attrib == {'id': 'epub-0',
'href': 'genindex.xhtml',
'media-type': 'application/xhtml+xml'}
assert items[3].attrib == {'id': 'epub-1',
'href': 'index.xhtml',
'media-type': 'application/xhtml+xml'}
for i, item in enumerate(items[2:]):
# items are named as epub-NN
assert item.get('id') == 'epub-%d' % i
# content.opf / spine
spine = opf.find("./idpf:spine")
itemrefs = list(spine)
assert spine.get('toc') == 'ncx'
assert spine.get('page-progression-direction') == 'ltr'
assert itemrefs[0].get('idref') == 'epub-1'
assert itemrefs[1].get('idref') == 'epub-0'
# content.opf / guide
reference = opf.find("./idpf:guide/idpf:reference")
assert reference.get('type') == 'toc'
assert reference.get('title') == 'Table of Contents'
assert reference.get('href') == 'index.xhtml'
# nav.xhtml
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').text())
assert nav.attrib == {'lang': 'en',
'{http://www.w3.org/XML/1998/namespace}lang': 'en'}
assert nav.find("./xhtml:head/xhtml:title").text == 'Table of Contents'
# nav.xhtml / nav
navlist = nav.find("./xhtml:body/xhtml:nav")
toc = navlist.findall("./xhtml:ol/xhtml:li")
assert navlist.find("./xhtml:h1").text == 'Table of Contents'
assert len(toc) == 1
assert toc[0].find("./xhtml:a").get("href") == 'index.xhtml'
assert toc[0].find("./xhtml:a").text == 'The basic Sphinx documentation for testing'
@pytest.mark.sphinx('epub', testroot='footnotes',
confoverrides={'epub_cover': ('_images/rimg.png', None)})
def test_epub_cover(app):
app.build()
# content.opf / metadata
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
cover_image = opf.find("./idpf:manifest/idpf:item[@href='%s']" % app.config.epub_cover[0])
cover = opf.find("./idpf:metadata/idpf:meta[@name='cover']")
assert cover
assert cover.get('content') == cover_image.get('id')
@pytest.mark.sphinx('epub', testroot='toctree')
def test_nested_toc(app):
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').bytes())
assert toc.find("./ncx:docTitle/ncx:text").text == 'Python documentation'
# toc.ncx / navPoint
def navinfo(elem):
label = elem.find("./ncx:navLabel/ncx:text")
content = elem.find("./ncx:content")
return (elem.get('id'), elem.get('playOrder'),
content.get('src'), label.text)
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
"Welcome to Sphinx Tests’s documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
assert navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', 'foo')
navchildren = navpoints[1].findall("./ncx:navPoint")
assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', 'foo')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo.1')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav
def navinfo(elem):
anchor = elem.find("./xhtml:a")
return (anchor.get('href'), anchor.text)
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').bytes())
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml',
"Welcome to Sphinx Tests’s documentation!")
assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc
assert navinfo(toc[1]) == ('foo.xhtml', 'foo')
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo.1')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
assert len(grandchild) == 1
assert navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='need-escaped')
def test_escaped_toc(app):
app.build()
# toc.ncx
toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').bytes())
assert toc.find("./ncx:docTitle/ncx:text").text == ('need <b>"escaped"</b> '
'project documentation')
# toc.ncx / navPoint
def navinfo(elem):
label = elem.find("./ncx:navLabel/ncx:text")
content = elem.find("./ncx:content")
return (elem.get('id'), elem.get('playOrder'),
content.get('src'), label.text)
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
"Welcome to Sphinx Tests's documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
assert navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', '<foo>')
navchildren = navpoints[1].findall("./ncx:navPoint")
assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo “1”')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav
def navinfo(elem):
anchor = elem.find("./xhtml:a")
return (anchor.get('href'), anchor.text)
nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').bytes())
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml',
"Welcome to Sphinx Tests's documentation!")
assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc
assert navinfo(toc[1]) == ('foo.xhtml', '<foo>')
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo “1”')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
assert len(grandchild) == 1
assert navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
@pytest.mark.sphinx('epub', testroot='basic')
def test_epub_writing_mode(app):
# horizontal (default)
app.build()
# horizontal / page-progression-direction
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
assert opf.find("./idpf:spine").get('page-progression-direction') == 'ltr'
# horizontal / ibooks:scroll-axis
metadata = opf.find("./idpf:metadata")
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
# horizontal / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').text()
assert 'writing-mode: horizontal-tb;' in css
# vertical
app.config.epub_writing_mode = 'vertical'
(app.outdir / 'index.xhtml').unlink() # forcely rebuild
app.build()
# vertical / page-progression-direction
opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
assert opf.find("./idpf:spine").get('page-progression-direction') == 'rtl'
# vertical / ibooks:scroll-axis
metadata = opf.find("./idpf:metadata")
assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'horizontal'
# vertical / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').text()
assert 'writing-mode: vertical-rl;' in css
@pytest.mark.sphinx('epub', testroot='epub-anchor-id')
def test_epub_anchor_id(app):
app.build()
html = (app.outdir / 'index.xhtml').text()
assert '<p id="std-setting-STATICFILES_FINDERS">blah blah blah</p>' in html
assert 'see <a class="reference internal" href="#std-setting-STATICFILES_FINDERS">' in html
@pytest.mark.sphinx('epub', testroot='html_assets')
def test_epub_assets(app):
app.builder.build_all()
# epub_sytlesheets (same as html_css_files)
content = (app.outdir / 'index.xhtml').text()
assert ('<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
in content)
assert ('<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />' in content)
@pytest.mark.sphinx('epub', testroot='html_assets',
confoverrides={'epub_css_files': ['css/epub.css']})
def test_epub_css_files(app):
app.builder.build_all()
# epub_css_files
content = (app.outdir / 'index.xhtml').text()
assert '<link rel="stylesheet" type="text/css" href="_static/css/epub.css" />' in content
# files in html_css_files are not outputed
assert ('<link rel="stylesheet" type="text/css" href="_static/css/style.css" />'
not in content)
assert ('<link media="print" rel="stylesheet" title="title" type="text/css" '
'href="https://example.com/custom.css" />' not in content)
@pytest.mark.skipif(docutils.__version_info__ < (0, 13),
reason='docutils-0.13 or above is required')
@pytest.mark.sphinx('epub', testroot='roles-download')
def test_html_download_role(app, status, warning):
app.build()
assert not (app.outdir / '_downloads' / 'dummy.dat').exists()
content = (app.outdir / 'index.xhtml').text()
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">dummy.dat</span></code></p></li>' in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">not_found.dat</span></code></p></li>' in content)
assert ('<li><p><code class="xref download docutils literal notranslate">'
'<span class="pre">Sphinx</span> <span class="pre">logo</span></code>'
'<span class="link-target"> [http://www.sphinx-doc.org/en/master'
'/_static/sphinxheader.png]</span></p></li>' in content)
@pytest.mark.sphinx('epub', testroot='toctree-duplicated')
def test_duplicated_toctree_entry(app, status, warning):
app.build()
assert 'WARNING: duplicated ToC entry found: foo.xhtml' in warning.getvalue()
@pytest.mark.skipif('DO_EPUBCHECK' not in os.environ,
reason='Skipped because DO_EPUBCHECK is not set')
@pytest.mark.sphinx('epub')
def test_run_epubcheck(app):
app.build()
epubcheck = os.environ.get('EPUBCHECK_PATH', '/usr/share/java/epubcheck.jar')
if runnable(['java', '-version']) and os.path.exists(epubcheck):
try:
subprocess.run(['java', '-jar', epubcheck, app.outdir / 'SphinxTests.epub'],
stdout=PIPE, stderr=PIPE, check=True)
except CalledProcessError as exc:
print(exc.stdout)
print(exc.stderr)
assert False, 'epubcheck exited with return code %s' % exc.returncode
| 40.16962 | 95 | 0.612655 |
acf325f23390cbcc976d1a1c55324f34682711c4 | 559 | py | Python | printrider/__init__.py | namuan/print-rider-py | 73f0b1e45f7a53910903b06927787a3c157b0046 | [
"MIT"
] | 1 | 2021-06-30T21:41:35.000Z | 2021-06-30T21:41:35.000Z | printrider/__init__.py | namuan/print-rider-py | 73f0b1e45f7a53910903b06927787a3c157b0046 | [
"MIT"
] | null | null | null | printrider/__init__.py | namuan/print-rider-py | 73f0b1e45f7a53910903b06927787a3c157b0046 | [
"MIT"
] | null | null | null | from flask import Flask
from printrider.dynamo_db import DynamoDatabase
db_config = DynamoDatabase()
def create_app(config_object):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config_object)
init_db(config_object)
register_blueprints(app)
return app
def init_db(config_object):
db_config.init_app(config_object.DB_CLIENT, config_object.PRINT_DOCUMENTS_DB_TABLE)
def register_blueprints(app: Flask):
from printrider.prints import prints_blueprint
app.register_blueprint(prints_blueprint)
| 24.304348 | 87 | 0.801431 |
acf3266b399b9485f9a2338fecef0edec17e7c8c | 13,974 | py | Python | kivy/utils.py | clevermindgames/kivy | 7243529c76b4320b14ef25bb02d29f0ce8ccd6d6 | [
"MIT"
] | 1 | 2019-04-26T19:07:48.000Z | 2019-04-26T19:07:48.000Z | kivy/utils.py | basharbme/kivy | 88d456d56b872feeb1b3d7d98bccbfea4137c338 | [
"MIT"
] | null | null | null | kivy/utils.py | basharbme/kivy | 88d456d56b872feeb1b3d7d98bccbfea4137c338 | [
"MIT"
] | null | null | null | # pylint: disable=W0611
'''
Utils
=====
The Utils module provides a selection of general utility functions and classes
that may be useful for various applications. These include maths, color,
algebraic and platform functions.
.. versionchanged:: 1.6.0
The OrderedDict class has been removed. Use collections.OrderedDict
instead.
'''
__all__ = ('intersection', 'difference', 'strtotuple',
'get_color_from_hex', 'get_hex_from_color', 'get_random_color',
'is_color_transparent', 'hex_colormap', 'colormap', 'boundary',
'deprecated', 'SafeList',
'interpolate', 'QueryDict',
'platform', 'escape_markup', 'reify', 'rgba')
from os import environ
from sys import platform as _sys_platform
from re import match, split
from kivy.compat import string_types
def boundary(value, minvalue, maxvalue):
'''Limit a value between a minvalue and maxvalue.'''
return min(max(value, minvalue), maxvalue)
def intersection(set1, set2):
'''Return the intersection of 2 lists.'''
return [s for s in set1 if s in set2]
def difference(set1, set2):
'''Return the difference between 2 lists.'''
return [s for s in set1 if s not in set2]
def interpolate(value_from, value_to, step=10):
'''Interpolate between two values. This can be useful for smoothing some
transitions. For example::
# instead of setting directly
self.pos = pos
# use interpolate, and you'll have a nicer transition
self.pos = interpolate(self.pos, new_pos)
.. warning::
These interpolations work only on lists/tuples/doubles with the same
dimensions. No test is done to check the dimensions are the same.
'''
if type(value_from) in (list, tuple):
out = []
for x, y in zip(value_from, value_to):
out.append(interpolate(x, y, step))
return out
else:
return value_from + (value_to - value_from) / float(step)
def strtotuple(s):
'''Convert a tuple string into a tuple
with some security checks. Designed to be used
with the eval() function::
a = (12, 54, 68)
b = str(a) # return '(12, 54, 68)'
c = strtotuple(b) # return (12, 54, 68)
'''
# security
if not match('^[,.0-9 ()\[\]]*$', s):
raise Exception('Invalid characters in string for tuple conversion')
# fast syntax check
if s.count('(') != s.count(')'):
raise Exception('Invalid count of ( and )')
if s.count('[') != s.count(']'):
raise Exception('Invalid count of [ and ]')
r = eval(s)
if type(r) not in (list, tuple):
raise Exception('Conversion failed')
return r
def rgba(s, *args):
'''Return a Kivy color (4 value from 0-1 range) from either a hex string or
a list of 0-255 values.
.. versionadded:: 1.10.0
'''
if isinstance(s, string_types):
return get_color_from_hex(s)
if isinstance(s, (list, tuple)):
s = [x / 255. for x in s]
if len(s) == 3:
s.append(1)
return s
if isinstance(s, (int, float)):
s = [s / 255.]
s.extend(x / 255. for x in args)
if len(s) == 3:
s.append(1)
return s
raise Exception('Invalid value (not a string / list / tuple)')
def get_color_from_hex(s):
'''Transform a hex string color to a kivy
:class:`~kivy.graphics.Color`.
'''
if s.startswith('#'):
return get_color_from_hex(s[1:])
value = [int(x, 16) / 255.
for x in split('([0-9a-f]{2})', s.lower()) if x != '']
if len(value) == 3:
value.append(1)
return value
def get_hex_from_color(color):
'''Transform a kivy :class:`~kivy.graphics.Color` to a hex value::
>>> get_hex_from_color((0, 1, 0))
'#00ff00'
>>> get_hex_from_color((.25, .77, .90, .5))
'#3fc4e57f'
.. versionadded:: 1.5.0
'''
return '#' + ''.join(['{0:02x}'.format(int(x * 255)) for x in color])
def get_random_color(alpha=1.0):
'''Returns a random color (4 tuple).
:Parameters:
`alpha`: float, defaults to 1.0
If alpha == 'random', a random alpha value is generated.
'''
from random import random
if alpha == 'random':
return [random(), random(), random(), random()]
else:
return [random(), random(), random(), alpha]
def is_color_transparent(c):
'''Return True if the alpha channel is 0.'''
if len(c) < 4:
return False
if float(c[3]) == 0.:
return True
return False
hex_colormap = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgreen': '#90ee90',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
colormap = {k: get_color_from_hex(v) for k, v in hex_colormap.items()}
DEPRECATED_CALLERS = []
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted the first time
the function is used.'''
import inspect
import functools
@functools.wraps(func)
def new_func(*args, **kwargs):
file, line, caller = inspect.stack()[1][1:4]
caller_id = "%s:%s:%s" % (file, line, caller)
# We want to print deprecated warnings only once:
if caller_id not in DEPRECATED_CALLERS:
DEPRECATED_CALLERS.append(caller_id)
warning = (
'Call to deprecated function %s in %s line %d.'
'Called from %s line %d'
' by %s().' % (
func.__name__,
func.__code__.co_filename,
func.__code__.co_firstlineno + 1,
file, line, caller))
from kivy.logger import Logger
Logger.warning(warning)
if func.__doc__:
Logger.warning(func.__doc__)
return func(*args, **kwargs)
return new_func
class SafeList(list):
'''List with a clear() method.
.. warning::
Usage of the iterate() function will decrease your performance.
'''
def clear(self):
del self[:]
@deprecated
def iterate(self, reverse=False):
if reverse:
return iter(reversed(self))
return iter(self)
class QueryDict(dict):
'''QueryDict is a dict() that can be queried with dot.
::
d = QueryDict()
# create a key named toto, with the value 1
d.toto = 1
# it's the same as
d['toto'] = 1
.. versionadded:: 1.0.4
'''
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(QueryDict, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
def format_bytes_to_human(size, precision=2):
'''Format a byte value to a human readable representation (B, KB, MB...).
.. versionadded:: 1.0.8
:Parameters:
`size`: int
Number that represents the bytes value
`precision`: int, defaults to 2
Precision after the comma
Examples::
>>> format_bytes_to_human(6463)
'6.31 KB'
>>> format_bytes_to_human(646368746541)
'601.98 GB'
'''
size = int(size)
fmt = '%%1.%df %%s' % precision
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return fmt % (size, unit)
size /= 1024.0
def _get_platform():
# On Android sys.platform returns 'linux2', so prefer to check the
# presence of python-for-android environment variables (ANDROID_ARGUMENT
# or ANDROID_PRIVATE).
if 'ANDROID_ARGUMENT' in environ:
return 'android'
elif environ.get('KIVY_BUILD', '') == 'ios':
return 'ios'
elif _sys_platform in ('win32', 'cygwin'):
return 'win'
elif _sys_platform == 'darwin':
return 'macosx'
elif _sys_platform.startswith('linux'):
return 'linux'
elif _sys_platform.startswith('freebsd'):
return 'linux'
return 'unknown'
platform = _get_platform()
'''
A string identifying the current operating system. It is one
of: `'win'`, `'linux'`, `'android'`, `'macosx'`, `'ios'` or `'unknown'`.
You can use it as follows::
from kivy.utils import platform
if platform == 'linux':
do_linux_things()
.. versionadded:: 1.3.0
.. versionchanged:: 1.8.0
platform is now a variable instead of a function.
'''
def escape_markup(text):
'''
Escape markup characters found in the text. Intended to be used when markup
text is activated on the Label::
untrusted_text = escape_markup('Look at the example [1]')
text = '[color=ff0000]' + untrusted_text + '[/color]'
w = Label(text=text, markup=True)
.. versionadded:: 1.3.0
'''
return text.replace('&', '&').replace('[', '&bl;').replace(']', '&br;')
class reify(object):
'''
Put the result of a method which uses this (non-data) descriptor decorator
in the instance dict after the first call, effectively replacing the
decorator with an instance variable.
It acts like @property, except that the function is only ever called once;
after that, the value is cached as a regular attribute. This gives you lazy
attribute creation on objects that are meant to be immutable.
Taken from the `Pyramid project <https://pypi.python.org/pypi/pyramid/>`_.
To use this as a decorator::
@reify
def lazy(self):
...
return hard_to_compute_int
first_time = self.lazy # lazy is reify obj, reify.__get__() runs
second_time = self.lazy # lazy is hard_to_compute_int
'''
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, inst, cls):
if inst is None:
return self
retval = self.func(inst)
setattr(inst, self.func.__name__, retval)
return retval
| 27.948 | 79 | 0.578789 |
acf3269b980c9fb7c3e465143c3ae83e8afc6266 | 280 | py | Python | 1. Warmup/10. Time Conversion.py | trentandraka/Hackerrank-Algorithm-Solutions | a352070b39589931d9ece35bac6b7680bdfee9eb | [
"MIT"
] | null | null | null | 1. Warmup/10. Time Conversion.py | trentandraka/Hackerrank-Algorithm-Solutions | a352070b39589931d9ece35bac6b7680bdfee9eb | [
"MIT"
] | 5 | 2018-10-19T05:43:42.000Z | 2018-10-24T09:05:55.000Z | 1. Warmup/10. Time Conversion.py | trentandraka/Hackerrank-Algorithm-Solutions | a352070b39589931d9ece35bac6b7680bdfee9eb | [
"MIT"
] | 22 | 2018-10-19T06:06:28.000Z | 2021-04-15T00:28:11.000Z | n = input()
if n[8]=='A':
if n[0:2] == '12':
print("00", end="")
print(n[2:8])
else:
print(n[0:8])
else:
if n[0:2] == '12':
print("12", end="")
print(n[2:8])
else:
print(12+int(n[0:2]), end="")
print(n[2:8])
| 18.666667 | 37 | 0.375 |
acf326ab6c3de76b29a8014ba6d29f9d616cd675 | 331 | py | Python | tools/cos/qcloud_cos/cos_err.py | jarodvip/oneinstack-backup | 8f339a62ac8eaacb234721ec725e72d5b0a18b6e | [
"Apache-2.0"
] | 15 | 2017-06-13T06:24:27.000Z | 2021-02-09T10:27:28.000Z | tools/cos/qcloud_cos/cos_err.py | jarodvip/oneinstack-backup | 8f339a62ac8eaacb234721ec725e72d5b0a18b6e | [
"Apache-2.0"
] | 2 | 2017-07-06T11:36:56.000Z | 2017-12-02T16:58:50.000Z | tools/cos/qcloud_cos/cos_err.py | jarodvip/oneinstack-backup | 8f339a62ac8eaacb234721ec725e72d5b0a18b6e | [
"Apache-2.0"
] | 2 | 2017-08-31T01:53:22.000Z | 2017-12-02T12:51:45.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class CosErr(object):
"""sdk错误码"""
PARAMS_ERROR = -1 # 参数错误
NETWORK_ERROR = -2 # 网络错误
SERVER_ERROR = -3 # server端返回错误
UNKNOWN_ERROR = -4 # 未知错误
@staticmethod
def get_err_msg(errcode, err_info):
return {u'code': errcode, u'message': err_info}
| 22.066667 | 55 | 0.60423 |
acf32762ed582b00f2d5eea1b1d36886994dd3ab | 10,358 | py | Python | actionslog/models.py | cws1121/django-actions-logger | 61f850fea4ec3ef84c247cb21b3a870fa1cd768b | [
"MIT"
] | null | null | null | actionslog/models.py | cws1121/django-actions-logger | 61f850fea4ec3ef84c247cb21b3a870fa1cd768b | [
"MIT"
] | null | null | null | actionslog/models.py | cws1121/django-actions-logger | 61f850fea4ec3ef84c247cb21b3a870fa1cd768b | [
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
try:
from django.urls import NoReverseMatch, reverse
except ImportError:
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models import QuerySet, Q
from django.utils.encoding import smart_text
from six import iteritems, integer_types, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import lazy
from jsonfield import JSONField
from .signals import action_logged
from . import settings as app_conf
import json
class LogActionManager(models.Manager):
def create_log_action(self, **kwargs):
"""
Helper method to create a new log entry.
This method automatically populates some fields when no explicit value is given.
:param instance: The model instance to log a change for.
:type instance: Model
:param kwargs: Field overrides for the :py:class:`LogAction` object.
:return: The new log entry or `None` if there were no changes.
:rtype: LogAction
"""
instance = kwargs.get('instance', None)
if instance is not None:
del kwargs['instance']
request = kwargs.get('request', None)
if request is not None:
del kwargs['request']
# Let's grab the current IP of the user.
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
remote_ip = x_forwarded_for.split(',')[0]
else:
remote_ip = request.META.get('REMOTE_ADDR')
kwargs.setdefault('remote_ip', remote_ip)
if instance is not None:
pk = self._get_pk_value(instance)
kwargs.setdefault(
'content_type',
ContentType.objects.get_for_model(instance)
)
kwargs.setdefault('object_pk', pk)
kwargs.setdefault('object_repr', smart_text(instance))
if isinstance(pk, integer_types):
kwargs.setdefault('object_id', pk)
get_object_extra_info = getattr(
instance,
'get_object_extra_info',
None
)
if callable(get_object_extra_info):
kwargs.setdefault('object_extra_info', get_object_extra_info())
# Delete log entries with the same pk as a newly created model.
# This should only be necessary when an pk is used twice.
if kwargs.get('action', None) is app_conf.CREATE:
is_obj_exists = self.filter(
content_type=kwargs.get('content_type'),
object_id=kwargs.get('object_id')
).exists()
if kwargs.get('object_id', None) is not None and is_obj_exists:
self.filter(
content_type=kwargs.get('content_type'),
object_id=kwargs.get('object_id')
).delete()
else:
self.filter(
content_type=kwargs.get('content_type'),
object_pk=kwargs.get('object_pk', '')
).delete()
action_log = self.create(**kwargs)
action_logged.send(sender=LogAction, action=action_log)
return action_log
def get_for_model(self, model):
"""
Get log entries for all objects of a specified type.
:param model: The model to get log entries for.
:type model: class
:return: QuerySet of log entries for the given model.
:rtype: QuerySet
"""
# Return empty queryset if the given object is not valid.
if not issubclass(model, models.Model):
return self.none()
ct = ContentType.objects.get_for_model(model)
return self.filter(content_type=ct)
def get_for_objects(self, queryset):
"""
Get log entries for the objects in the specified queryset.
:param queryset: The queryset to get the log entries for.
:type queryset: QuerySet
:return: The LogAction objects for the objects in the given queryset.
:rtype: QuerySet
"""
if not isinstance(queryset, QuerySet) or queryset.count() == 0:
return self.none()
content_type = ContentType.objects.get_for_model(queryset.model)
primary_keys = queryset.values_list(queryset.model._meta.pk.name, flat=True)
if isinstance(primary_keys[0], integer_types):
return self.filter(content_type=content_type).filter(Q(object_id__in=primary_keys)).distinct()
else:
return self.filter(content_type=content_type).filter(Q(object_pk__in=primary_keys)).distinct()
def _get_pk_value(self, instance):
"""
Get the primary key field value for a model instance.
:param instance: The model instance to get the primary key for.
:type instance: Model
:return: The primary key value of the given model instance.
"""
pk_field = instance._meta.pk.name
pk = getattr(instance, pk_field, None)
# Check to make sure that we got an pk not a model object.
if isinstance(pk, models.Model):
pk = self._get_pk_value(pk)
return pk
def get_action_choices():
return app_conf.LOG_ACTION_CHOICES
@python_2_unicode_compatible
class LogAction(models.Model):
content_type = models.ForeignKey(
'contenttypes.ContentType', related_name='+',
verbose_name=_("content type"),
blank=True, null=True, on_delete=models.SET_NULL
)
object_id = models.BigIntegerField(
verbose_name=_("object id"),
blank=True, null=True, db_index=True
)
object_pk = models.CharField(
verbose_name=_("object pk"), max_length=255,
blank=True, null=True, db_index=True
)
object_repr = models.TextField(
verbose_name=_("object representation"),
blank=True, null=True
)
object_extra_info = JSONField(
verbose_name=_("object information"),
blank=True, null=True
)
session_key = models.CharField(_('session key'), max_length=40, blank=True, null=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_("user"),
blank=True, null=True,
on_delete=models.SET_NULL, related_name='actionlogs'
)
action = models.PositiveSmallIntegerField(verbose_name=_("action"), blank=True, null=True)
action_info = JSONField(
verbose_name=_("action information"),
blank=True, null=True
)
changes = models.TextField(blank=True, verbose_name=_("change message"))
remote_ip = models.GenericIPAddressField(
verbose_name=_("remote IP"), blank=True, null=True
)
created_at = models.DateTimeField(
verbose_name=_("created at"), auto_now_add=True, db_index=True
)
objects = LogActionManager()
class Meta:
ordering = ['-created_at']
verbose_name = _("log action")
verbose_name_plural = _("log actions")
def __str__(self):
if self.object_repr:
return _("Logged {repr:s}").format(repr=self.object_repr)
elif self.action:
return _("Logged action, type: {action}, id: {id}").format(
action=self.get_action_display(),
id=self.id
)
else:
return _("Logged action, id: {id}").format(id=self.id)
def __init__(self, *args, **kwargs):
super(LogAction, self).__init__(*args, **kwargs)
try:
self._meta.get_field('action').choices = \
lazy(get_action_choices, list)()
except:
# for Django < 1.11
self._meta.get_field_by_name('action')[0]._choices = \
lazy(get_action_choices, list)()
def get_action_display(self):
for action in app_conf.LOG_ACTION_CHOICES:
if action[0] == self.action:
return action[1]
return _('Not provided')
def get_edited_object(self):
"""Returns the edited object represented by this log entry"""
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (
self.content_type.app_label,
self.content_type.model
)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
@property
def changes_dict(self):
"""
:return: The changes recorded in this log entry as a dictionary object.
"""
try:
return json.loads(self.changes)
except ValueError:
return {}
@property
def changes_str(self, colon=': ', arrow=smart_text(' \u2192 '), separator='; '):
"""
Return the changes recorded in this log entry as a string.
The formatting of the string can be customized by
setting alternate values for colon, arrow and separator.
If the formatting is still not satisfying, please use
:py:func:`LogAction.changes_dict` and format the string yourself.
:param colon: The string to place between the field name and the values.
:param arrow: The string to place between each old and new value.
:param separator: The string to place between each field.
:return: A readable string of the changes in this log entry.
"""
substrings = []
for field, values in iteritems(self.changes_dict):
substring = smart_text('{field_name:s}{colon:s}{old:s}{arrow:s}{new:s}').format(
field_name=field,
colon=colon,
old=values[0],
arrow=arrow,
new=values[1],
)
substrings.append(substring)
return separator.join(substrings)
| 35.594502 | 106 | 0.61537 |
acf32784dfcd2aa6384d51b48b9f110c0336b9a9 | 3,134 | py | Python | QUANTAXIS/QASU/save_to_db_fields_description.py | simplezhang57/QUANTAXIS | 0fab23ee3cc4048a30b5eed3c311a5c9cdce8110 | [
"MIT"
] | 2 | 2018-10-29T12:01:55.000Z | 2021-03-05T10:28:59.000Z | QUANTAXIS/QASU/save_to_db_fields_description.py | frosthaoz/QUANTAXIS | f5f482418e5f6e23ac3530089b8d17300d931b48 | [
"MIT"
] | null | null | null | QUANTAXIS/QASU/save_to_db_fields_description.py | frosthaoz/QUANTAXIS | f5f482418e5f6e23ac3530089b8d17300d931b48 | [
"MIT"
] | 3 | 2018-11-29T07:07:56.000Z | 2021-02-09T17:24:56.000Z |
'''
'''
quantaxis__db_description= [
{
'stock_info_tushare':
[
{'code':'代码'},
{'name','名称'},
{'industry','所属行业'},
{'area','地区'},
{'pe','市盈率'},
{'outstanding','流通股本(亿)'},
{'totals','总股本(亿)'},
{'totalAssets','总资产(万)'},
{'liquidAssets','流动资产'},
{'fixedAssets','固定资产'},
{'reserved','公积金'},
{'reservedPerShare','每股公积金'},
{'esp','每股收益'},
{'bvps','每股净资'},
{'pb','市净率'},
{'timeToMarket','上市日期'},
{'undp','未分利润'},
{'perundp', '每股未分配'},
{'rev','收入同比(%)'},
{'profit','利润同比(%)'},
{'gpr','毛利率(%)'},
{'npr','净利润率(%)'},
{'holders','股东人数'},
]
},
{
'stock_info':
[
{"market": '1'},
{"code": "代码"},
{"liutongguben": '流通股本(单位)'},
{"province": "省市代码区域"},
{"industry": "行业分类代码"},
{"updated_date": '最后更新时间'},
{"ipo_date": "首次公开发行日期"},
{"zongguben": "总股本"},
{"guojiagu": "国家股"},
{"faqirenfarengu": "发起人法人股"},
{"farengu": "法人股"},
{"bgu": "B股"},
{"hgu": "H股"},
{"zhigonggu": "职工股"},
{"zongzichan": "总资产"},
{"liudongzichan": "流动资产"},
{"gudingzichan": "固定资产"},
{"wuxingzichan": "无形资产"},
{"gudongrenshu": "股东人数"},
{"liudongfuzhai": "流动负责"},
{"changqifuzhai": "长期负责"},
{"zibengongjijin": "资本公积金"},
{"jingzichan": "净资产"},
{"zhuyingshouru": "主营收入"},
{"zhuyinglirun": "主营利润"},
{"yingshouzhangkuan": "营收账款"},
{"yingyelirun": "营业利润"},
{"touzishouyu": "投资收益"},
{"jingyingxianjinliu": "经营现金流"},
{"zongxianjinliu": "总现金流"},
{"cunhuo": "存货"},
{"lirunzonghe": "利润总和"},
{"shuihoulirun": "税后利润"},
{"jinglirun": "净利润"},
{"weifenpeilirun": "未分配利润"},
{"meigujingzichan": "每股净资产"},
{"baoliu2": "保留"}
]
},
{
'eastmoney_stock_zjlx':
[
{"stock_code" : "股票代码"},
{"date" : "日期"},
{"zljll_je_wy" : "主力资金流入(金额)单位万元"},
{"zljll_jzb_bfb" : "主力资金流入(净占比)%"},
{"cddjll_je_wy" : "超级大单流入(金额)单位万元"},
{"cddjll_je_jzb" : "超级大单流入(净占比)"},
{"ddjll_je_wy" : "大单资金流入(金额)单位万元"},
{"ddjll_je_jzb" : "大单资金流入(净占比)%"},
{"zdjll_je_wy" : "中单资金流入(金额)单位万元"},
{"zdjll_je_jzb" : "中单资金流入(净占比)%"},
{"xdjll_je_wy" : "小单资金流入(金额)单位万元"},
{"xdjll_je_jzb" : "小单资金流入(净占比)"},
{"close_price" : "收盘价"},
{"change_price" : "涨跌幅"}
]
}
]
| 31.029703 | 49 | 0.356414 |
acf327ace66d2decb159dc1db56f7e41806b88bc | 5,144 | py | Python | yt_dlp/extractor/callin.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/callin.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 22 | 2021-05-08T13:44:12.000Z | 2022-03-30T01:27:23.000Z | yt_dlp/extractor/callin.py | jeroenj/yt-dlp | 11078c6d571673a0f09e21933f4ad1e6fcc35456 | [
"Unlicense"
] | 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from .common import InfoExtractor
from ..utils import (
traverse_obj,
float_or_none,
int_or_none
)
class CallinIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?callin\.com/(episode)/(?P<id>[-a-zA-Z]+)'
_TESTS = [{
'url': 'https://www.callin.com/episode/the-title-ix-regime-and-the-long-march-through-EBfXYSrsjc',
'info_dict': {
'id': '218b979630a35ead12c6fd096f2996c56c37e4d0dc1f6dc0feada32dcf7b31cd',
'title': 'The Title IX Regime and the Long March Through and Beyond the Institutions',
'ext': 'ts',
'display_id': 'the-title-ix-regime-and-the-long-march-through-EBfXYSrsjc',
'thumbnail': 're:https://.+\\.png',
'description': 'First episode',
'uploader': 'Wesley Yang',
'timestamp': 1639404128.65,
'upload_date': '20211213',
'uploader_id': 'wesyang',
'uploader_url': 'http://wesleyyang.substack.com',
'channel': 'Conversations in Year Zero',
'channel_id': '436d1f82ddeb30cd2306ea9156044d8d2cfdc3f1f1552d245117a42173e78553',
'channel_url': 'https://callin.com/show/conversations-in-year-zero-oJNllRFSfx',
'duration': 9951.936,
'view_count': int,
'categories': ['News & Politics', 'History', 'Technology'],
'cast': ['Wesley Yang', 'KC Johnson', 'Gabi Abramovich'],
'series': 'Conversations in Year Zero',
'series_id': '436d1f82ddeb30cd2306ea9156044d8d2cfdc3f1f1552d245117a42173e78553',
'episode': 'The Title IX Regime and the Long March Through and Beyond the Institutions',
'episode_number': 1,
'episode_id': '218b979630a35ead12c6fd096f2996c56c37e4d0dc1f6dc0feada32dcf7b31cd'
}
}]
def try_get_user_name(self, d):
names = [d.get(n) for n in ('first', 'last')]
if None in names:
return next((n for n in names if n), default=None)
return ' '.join(names)
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
next_data = self._search_nextjs_data(webpage, display_id)
episode = next_data['props']['pageProps']['episode']
id = episode['id']
title = (episode.get('title')
or self._og_search_title(webpage, fatal=False)
or self._html_search_regex('<title>(.*?)</title>', webpage, 'title'))
url = episode['m3u8']
formats = self._extract_m3u8_formats(url, display_id, ext='ts')
self._sort_formats(formats)
show = traverse_obj(episode, ('show', 'title'))
show_id = traverse_obj(episode, ('show', 'id'))
show_json = None
app_slug = (self._html_search_regex(
'<script\\s+src=["\']/_next/static/([-_a-zA-Z0-9]+)/_',
webpage, 'app slug', fatal=False) or next_data.get('buildId'))
show_slug = traverse_obj(episode, ('show', 'linkObj', 'resourceUrl'))
if app_slug and show_slug and '/' in show_slug:
show_slug = show_slug.rsplit('/', 1)[1]
show_json_url = f'https://www.callin.com/_next/data/{app_slug}/show/{show_slug}.json'
show_json = self._download_json(show_json_url, display_id, fatal=False)
host = (traverse_obj(show_json, ('pageProps', 'show', 'hosts', 0))
or traverse_obj(episode, ('speakers', 0)))
host_nick = traverse_obj(host, ('linkObj', 'resourceUrl'))
host_nick = host_nick.rsplit('/', 1)[1] if (host_nick and '/' in host_nick) else None
cast = list(filter(None, [
self.try_get_user_name(u) for u in
traverse_obj(episode, (('speakers', 'callerTags'), ...)) or []
]))
episode_list = traverse_obj(show_json, ('pageProps', 'show', 'episodes')) or []
episode_number = next(
(len(episode_list) - i for (i, e) in enumerate(episode_list) if e.get('id') == id),
None)
return {
'id': id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': traverse_obj(episode, ('show', 'photo')),
'description': episode.get('description'),
'uploader': self.try_get_user_name(host) if host else None,
'timestamp': episode.get('publishedAt'),
'uploader_id': host_nick,
'uploader_url': traverse_obj(show_json, ('pageProps', 'show', 'url')),
'channel': show,
'channel_id': show_id,
'channel_url': traverse_obj(episode, ('show', 'linkObj', 'resourceUrl')),
'duration': float_or_none(episode.get('runtime')),
'view_count': int_or_none(episode.get('plays')),
'categories': traverse_obj(episode, ('show', 'categorizations', ..., 'name')),
'cast': cast if cast else None,
'series': show,
'series_id': show_id,
'episode': title,
'episode_number': episode_number,
'episode_id': id
}
| 44.730435 | 106 | 0.584953 |
acf329968966a66e64376b11e2626e9f96b1efa5 | 2,833 | py | Python | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 1 | 2018-10-19T01:48:37.000Z | 2018-10-19T01:48:37.000Z | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py | knodir/neutron | ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.agent.common import ovs_lib
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import ovs_bridge
from neutron.tests import base
class TestBRCookieOpenflow(base.BaseTestCase):
def setUp(self):
super(TestBRCookieOpenflow, self).setUp()
conn_patcher = mock.patch(
'neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
self.br = ovs_bridge.OVSAgentBridge('br-int')
def test_reserved_cookies(self):
def_cookie = self.br.default_cookie
self.assertIn(def_cookie, self.br.reserved_cookies)
def test_request_cookie(self):
default_cookie = self.br.default_cookie
requested_cookie = self.br.request_cookie()
self.assertEqual(default_cookie, self.br.default_cookie)
self.assertIn(default_cookie, self.br.reserved_cookies)
self.assertIn(requested_cookie, self.br.reserved_cookies)
def test_unset_cookie(self):
requested_cookie = self.br.request_cookie()
self.assertIn(requested_cookie, self.br.reserved_cookies)
self.br.unset_cookie(requested_cookie)
self.assertNotIn(requested_cookie, self.br.reserved_cookies)
def test_set_agent_uuid_stamp(self):
self.br = ovs_bridge.OVSAgentBridge('br-int')
def_cookie = self.br.default_cookie
new_cookie = ovs_lib.generate_random_cookie()
self.br.set_agent_uuid_stamp(new_cookie)
self.assertEqual(new_cookie, self.br.default_cookie)
self.assertIn(new_cookie, self.br.reserved_cookies)
self.assertNotIn(def_cookie, self.br.reserved_cookies)
def test_set_agent_uuid_stamp_with_reserved_cookie(self):
self.br = ovs_bridge.OVSAgentBridge('br-int')
def_cookie = self.br.default_cookie
new_cookie = self.br.request_cookie()
self.br.set_agent_uuid_stamp(new_cookie)
self.assertEqual(new_cookie, self.br.default_cookie)
self.assertIn(new_cookie, self.br.reserved_cookies)
self.assertNotIn(def_cookie, self.br.reserved_cookies)
self.assertEqual(set([new_cookie]), self.br.reserved_cookies)
| 38.808219 | 78 | 0.725732 |
acf32a56407a846535cca834c84ffb3b03920fef | 26,569 | py | Python | tackerclient/tacker/v1_0/__init__.py | SSU-DCN/python-tackerclient | 771973a3f66f08fa05b3d0702a81df9ec0b35219 | [
"Apache-2.0"
] | 20 | 2015-10-18T02:56:36.000Z | 2021-10-12T13:37:58.000Z | tackerclient/tacker/v1_0/__init__.py | openstack/python-tackerclient | b7f27c3dc6a8ec747d13698a3ced1dc5cc162389 | [
"Apache-2.0"
] | null | null | null | tackerclient/tacker/v1_0/__init__.py | openstack/python-tackerclient | b7f27c3dc6a8ec747d13698a3ced1dc5cc162389 | [
"Apache-2.0"
] | 16 | 2016-03-18T08:37:28.000Z | 2021-07-19T05:28:16.000Z | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import argparse
import logging
import re
from cliff.formatters import table
from cliff import lister
from cliff import show
from oslo_serialization import jsonutils
from tackerclient.common._i18n import _
from tackerclient.common import command
from tackerclient.common import exceptions
from tackerclient.common import utils
HEX_ELEM = '[0-9A-Fa-f]'
UUID_PATTERN = '-'.join([HEX_ELEM + '{8}', HEX_ELEM + '{4}',
HEX_ELEM + '{4}', HEX_ELEM + '{4}',
HEX_ELEM + '{12}'])
def _get_resource_plural(resource, client):
plurals = getattr(client, 'EXTED_PLURALS', [])
for k in plurals:
if plurals[k] == resource:
return k
return resource + 's'
def find_resourceid_by_id(client, resource, resource_id):
resource_plural = _get_resource_plural(resource, client)
obj_lister = getattr(client, "list_%s" % resource_plural)
if resource == 'event':
match = resource_id.isdigit() and resource_id != 0
else:
match = re.match(UUID_PATTERN, resource_id)
collection = resource_plural
if match:
data = obj_lister(id=resource_id, fields='id')
if data and data[collection]:
return data[collection][0]['id']
not_found_message = (_("Unable to find %(resource)s with id "
"'%(id)s'") %
{'resource': resource, 'id': resource_id})
# 404 is used to simulate server side behavior
raise exceptions.TackerClientException(
message=not_found_message, status_code=404)
def _find_resourceid_by_name(client, resource, name):
resource_plural = _get_resource_plural(resource, client)
obj_lister = getattr(client, "list_%s" % resource_plural)
data = obj_lister(name=name, fields='id')
collection = resource_plural
info = data[collection]
if len(info) > 1:
raise exceptions.TackerClientNoUniqueMatch(resource=resource,
name=name)
elif len(info) == 0:
not_found_message = (_("Unable to find %(resource)s with name "
"'%(name)s'") %
{'resource': resource, 'name': name})
# 404 is used to simulate server side behavior
raise exceptions.TackerClientException(
message=not_found_message, status_code=404)
else:
return info[0]['id']
def find_resourceid_by_name_or_id(client, resource, name_or_id):
try:
return find_resourceid_by_id(client, resource, name_or_id)
except exceptions.TackerClientException:
return _find_resourceid_by_name(client, resource, name_or_id)
def add_show_list_common_argument(parser):
parser.add_argument(
'-D', '--show-details',
help=_('Show detailed info'),
action='store_true',
default=False,)
parser.add_argument(
'--show_details',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument(
'--fields',
help=argparse.SUPPRESS,
action='append',
default=[])
parser.add_argument(
'-F', '--field',
dest='fields', metavar='FIELD',
help=_('Specify the field(s) to be returned by server. You can '
'repeat this option.'),
action='append',
default=[])
def add_pagination_argument(parser):
parser.add_argument(
'-P', '--page-size',
dest='page_size', metavar='SIZE', type=int,
help=_("Specify retrieve unit of each request, then split one request "
"to several requests"),
default=None)
def add_sorting_argument(parser):
parser.add_argument(
'--sort-key',
dest='sort_key', metavar='FIELD',
action='append',
help=_("Sorts the list by the specified fields in the specified "
"directions. You can repeat this option, but you must "
"specify an equal number of sort_dir and sort_key values. "
"Extra sort_dir options are ignored. Missing sort_dir options "
"use the default asc value."),
default=[])
parser.add_argument(
'--sort-dir',
dest='sort_dir', metavar='{asc,desc}',
help=_("Sorts the list in the specified direction. You can repeat "
"this option."),
action='append',
default=[],
choices=['asc', 'desc'])
def is_number(s):
try:
float(s) # for int, long and float
except ValueError:
try:
complex(s) # for complex
except ValueError:
return False
return True
def _process_previous_argument(current_arg, _value_number, current_type_str,
_list_flag, _values_specs, _clear_flag,
values_specs):
if current_arg is not None:
if _value_number == 0 and (current_type_str or _list_flag):
# This kind of argument should have value
raise exceptions.CommandError(
message=_("Invalid values_specs %s") % ' '.join(values_specs))
if _value_number > 1 or _list_flag or current_type_str == 'list':
current_arg.update({'nargs': '+'})
elif _value_number == 0:
if _clear_flag:
# if we have action=clear, we use argument's default
# value None for argument
_values_specs.pop()
else:
# We assume non value argument as bool one
current_arg.update({'action': 'store_true'})
def parse_args_to_dict(values_specs):
'''It is used to analyze the extra command options to command.
Besides known options and arguments, our commands also support user to
put more options to the end of command line. For example,
list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1'
is extra options to our list_nets. This feature can support V1.0 API's
fields selection and filters. For example, to list networks which has name
'test4', we can have list_nets -- --name=test4.
value spec is: --key type=int|bool|... value. Type is one of Python
built-in types. By default, type is string. The key without value is
a bool option. Key with two values will be a list option.
'''
# values_specs for example: '-- --tag x y --key1 type=int value1'
# -- is a pseudo argument
values_specs_copy = values_specs[:]
if values_specs_copy and values_specs_copy[0] == '--':
del values_specs_copy[0]
# converted ArgumentParser arguments for each of the options
_options = {}
# the argument part for current option in _options
current_arg = None
# the string after remove meta info in values_specs
# for example, '--tag x y --key1 value1'
_values_specs = []
# record the count of values for an option
# for example: for '--tag x y', it is 2, while for '--key1 value1', it is 1
_value_number = 0
# list=true
_list_flag = False
# action=clear
_clear_flag = False
# the current item in values_specs
current_item = None
# the str after 'type='
current_type_str = None
for _item in values_specs_copy:
if _item.startswith('--'):
# Deal with previous argument if any
_process_previous_argument(
current_arg, _value_number, current_type_str,
_list_flag, _values_specs, _clear_flag, values_specs)
# Init variables for current argument
current_item = _item
_list_flag = False
_clear_flag = False
current_type_str = None
if "=" in _item:
_value_number = 1
_item = _item.split('=')[0]
else:
_value_number = 0
if _item in _options:
raise exceptions.CommandError(
message=_("Duplicated "
"options %s") % ' '.join(values_specs))
else:
_options.update({_item: {}})
current_arg = _options[_item]
_item = current_item
elif _item.startswith('type='):
if current_arg is None:
raise exceptions.CommandError(
message=_("Invalid "
"values_specs %s") % ' '.join(values_specs))
if 'type' not in current_arg:
current_type_str = _item.split('=', 2)[1]
current_arg.update({'type': eval(current_type_str)})
if current_type_str == 'bool':
current_arg.update({'type': utils.str2bool})
elif current_type_str == 'dict':
current_arg.update({'type': utils.str2dict})
continue
elif _item == 'list=true':
_list_flag = True
continue
elif _item == 'action=clear':
_clear_flag = True
continue
if not _item.startswith('--'):
# All others are value items
# Make sure '--' occurs first and allow minus value
if (not current_item or '=' in current_item or
_item.startswith('-') and not is_number(_item)):
raise exceptions.CommandError(
message=_("Invalid "
"values_specs %s") % ' '.join(values_specs))
_value_number += 1
_values_specs.append(_item)
# Deal with last one argument
_process_previous_argument(
current_arg, _value_number, current_type_str,
_list_flag, _values_specs, _clear_flag, values_specs)
# populate the parser with arguments
_parser = argparse.ArgumentParser(add_help=False)
for opt, optspec in _options.items():
_parser.add_argument(opt, **optspec)
_args = _parser.parse_args(_values_specs)
result_dict = {}
for opt in _options.keys():
_opt = opt.split('--', 2)[1]
_opt = _opt.replace('-', '_')
_value = getattr(_args, _opt)
result_dict.update({_opt: _value})
return result_dict
def _merge_args(qCmd, parsed_args, _extra_values, value_specs):
"""Merge arguments from _extra_values into parsed_args.
If an argument value are provided in both and it is a list,
the values in _extra_values will be merged into parsed_args.
@param parsed_args: the parsed args from known options
@param _extra_values: the other parsed arguments in unknown parts
@param values_specs: the unparsed unknown parts
"""
temp_values = _extra_values.copy()
for key, value in temp_values.items():
if hasattr(parsed_args, key):
arg_value = getattr(parsed_args, key)
if arg_value is not None and value is not None:
if isinstance(arg_value, list):
if value and isinstance(value, list):
if (not arg_value or
isinstance(arg_value[0], type(value[0]))):
arg_value.extend(value)
_extra_values.pop(key)
def update_dict(obj, dict, attributes):
"""Update dict with fields from obj.attributes
:param obj: the object updated into dict
:param dict: the result dictionary
:param attributes: a list of attributes belonging to obj
"""
for attribute in attributes:
if hasattr(obj, attribute) and getattr(obj, attribute) is not None:
dict[attribute] = getattr(obj, attribute)
class TableFormater(table.TableFormatter):
"""This class is used to keep consistency with prettytable 0.6.
https://bugs.launchpad.net/python-tackerclient/+bug/1165962
"""
def emit_list(self, column_names, data, stdout, parsed_args):
if column_names:
super(TableFormater, self).emit_list(column_names, data, stdout,
parsed_args)
else:
stdout.write('\n')
# command.OpenStackCommand is abstract class so that metaclass of
# subclass must be subclass of metaclass of all its base.
# otherwise metaclass conflict exception is raised.
class TackerCommandMeta(abc.ABCMeta):
def __new__(cls, name, bases, cls_dict):
if 'log' not in cls_dict:
cls_dict['log'] = logging.getLogger(
cls_dict['__module__'] + '.' + name)
return super(TackerCommandMeta, cls).__new__(cls,
name, bases, cls_dict)
class TackerCommand(command.OpenStackCommand, metaclass=TackerCommandMeta):
api = 'nfv-orchestration'
values_specs = []
json_indent = None
def __init__(self, app, app_args):
super(TackerCommand, self).__init__(app, app_args)
# NOTE(markmcclain): This is no longer supported in cliff version 1.5.2
# see https://bugs.launchpad.net/python-tackerclient/+bug/1265926
# if hasattr(self, 'formatters'):
# self.formatters['table'] = TableFormater()
def get_client(self):
return self.app.client_manager.tacker
def get_parser(self, prog_name):
parser = super(TackerCommand, self).get_parser(prog_name)
parser.add_argument(
'--request-format',
help=_('The xml or json request format'),
default='json',
choices=['json', 'xml', ], )
parser.add_argument(
'--request_format',
choices=['json', 'xml', ],
help=argparse.SUPPRESS)
return parser
def format_output_data(self, data):
# Modify data to make it more readable
if self.resource in data:
for k, v in data[self.resource].items():
if isinstance(v, list):
value = '\n'.join(jsonutils.dumps(
i, indent=self.json_indent) if isinstance(i, dict)
else str(i) for i in v)
data[self.resource][k] = value
elif isinstance(v, dict):
value = jsonutils.dumps(v, indent=self.json_indent)
data[self.resource][k] = value
elif v is None:
data[self.resource][k] = ''
def add_known_arguments(self, parser):
pass
def args2body(self, parsed_args):
return {}
class CreateCommand(TackerCommand, show.ShowOne):
"""Create a resource for a given tenant
"""
api = 'nfv-orchestration'
resource = None
log = None
remove_output_fields = []
def get_parser(self, prog_name):
parser = super(CreateCommand, self).get_parser(prog_name)
parser.add_argument(
'--tenant-id', metavar='TENANT_ID',
help=_('The owner tenant ID'), )
parser.add_argument(
'--tenant_id',
help=argparse.SUPPRESS)
self.add_known_arguments(parser)
return parser
def get_data(self, parsed_args):
self.log.debug('get_data(%s)', parsed_args)
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
_extra_values = parse_args_to_dict(self.values_specs)
_merge_args(self, parsed_args, _extra_values,
self.values_specs)
body = self.args2body(parsed_args)
body[self.resource].update(_extra_values)
obj_creator = getattr(tacker_client,
"create_%s" % self.resource)
data = obj_creator(body)
self.format_output_data(data)
# {u'network': {u'id': u'e9424a76-6db4-4c93-97b6-ec311cd51f19'}}
info = self.resource in data and data[self.resource] or None
if info:
print(_('Created a new %s:') % self.resource,
file=self.app.stdout)
for f in self.remove_output_fields:
if f in info:
info.pop(f)
else:
info = {'': ''}
return zip(*sorted(info.items()))
class UpdateCommand(TackerCommand):
"""Update resource's information."""
api = 'nfv-orchestration'
resource = None
log = None
allow_names = True
def get_parser(self, prog_name):
parser = super(UpdateCommand, self).get_parser(prog_name)
parser.add_argument(
'id', metavar=self.resource.upper(),
help=_('ID or name of %s to update') % self.resource)
self.add_known_arguments(parser)
return parser
def run(self, parsed_args):
self.log.debug('run(%s)', parsed_args)
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
_extra_values = parse_args_to_dict(self.values_specs)
_merge_args(self, parsed_args, _extra_values,
self.values_specs)
body = self.args2body(parsed_args)
if self.resource in body:
body[self.resource].update(_extra_values)
else:
body[self.resource] = _extra_values
if not body[self.resource]:
raise exceptions.CommandError(
message=_("Must specify new"
" values to update %s") % self.resource)
if self.allow_names:
_id = find_resourceid_by_name_or_id(
tacker_client, self.resource, parsed_args.id)
else:
_id = find_resourceid_by_id(
tacker_client, self.resource, parsed_args.id)
obj_updator = getattr(tacker_client,
"update_%s" % self.resource)
obj_updator(_id, body)
print((_('Updated %(resource)s: %(id)s') %
{'id': parsed_args.id, 'resource': self.resource}),
file=self.app.stdout)
return
class DeleteCommand(TackerCommand):
"""Delete given resource(s)
"""
api = 'nfv-orchestration'
resource = None
log = None
allow_names = True
deleted_msg = {}
def get_parser(self, prog_name):
parser = super(DeleteCommand, self).get_parser(prog_name)
if self.allow_names:
help_str = _('IDs or names of %s to delete')
else:
help_str = _('IDs of %s to delete')
parser.add_argument(
'ids', nargs='+',
metavar=self.resource.upper(),
help=help_str % self.resource)
self.add_known_arguments(parser)
return parser
def run(self, parsed_args):
failure = False
deleted_ids = []
failed_items = {}
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
obj_deleter = getattr(tacker_client,
"delete_%s" % self.resource)
body = self.args2body(parsed_args)
for resource_id in parsed_args.ids:
try:
if self.allow_names:
_id = find_resourceid_by_name_or_id(
tacker_client, self.resource, resource_id)
else:
_id = resource_id
if body:
obj_deleter(_id, body)
else:
obj_deleter(_id)
deleted_ids.append(resource_id)
except Exception as e:
failure = True
failed_items[resource_id] = e
if failure:
msg = ''
if deleted_ids:
status_msg = self.deleted_msg.get(self.resource, 'deleted')
msg = (_('Successfully %(status_msg)s %(resource)s(s):'
' %(deleted_list)s') % {'status_msg': status_msg,
'deleted_list':
', '.join(deleted_ids),
'resource': self.resource})
err_msg = _("\n\nUnable to delete the below"
" %s(s):") % self.resource
for failed_id, error in failed_items.items():
err_msg += (_('\n Cannot delete %(failed_id)s: %(error)s')
% {'failed_id': failed_id,
'error': error})
msg += err_msg
raise exceptions.CommandError(message=msg)
else:
print((_('All specified %(resource)s(s) %(msg)s successfully')
% {'msg': self.deleted_msg.get(self.resource, 'deleted'),
'resource': self.resource}))
return
class ListCommand(TackerCommand, lister.Lister):
"""List resources that belong to a given tenant
"""
api = 'nfv-orchestration'
resource = None
log = None
_formatters = {}
list_columns = []
unknown_parts_flag = True
pagination_support = False
sorting_support = False
def get_parser(self, prog_name):
parser = super(ListCommand, self).get_parser(prog_name)
add_show_list_common_argument(parser)
if self.pagination_support:
add_pagination_argument(parser)
if self.sorting_support:
add_sorting_argument(parser)
return parser
def args2search_opts(self, parsed_args):
search_opts = {}
fields = parsed_args.fields
if parsed_args.fields:
search_opts.update({'fields': fields})
if parsed_args.show_details:
search_opts.update({'verbose': 'True'})
return search_opts
def call_server(self, tacker_client, search_opts, parsed_args):
resource_plural = _get_resource_plural(self.resource, tacker_client)
obj_lister = getattr(tacker_client, "list_%s" % resource_plural)
data = obj_lister(**search_opts)
return data
def retrieve_list(self, parsed_args):
"""Retrieve a list of resources from Tacker server"""
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
_extra_values = parse_args_to_dict(self.values_specs)
_merge_args(self, parsed_args, _extra_values,
self.values_specs)
search_opts = self.args2search_opts(parsed_args)
search_opts.update(_extra_values)
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
if self.sorting_support:
keys = parsed_args.sort_key
if keys:
search_opts.update({'sort_key': keys})
dirs = parsed_args.sort_dir
len_diff = len(keys) - len(dirs)
if len_diff > 0:
dirs += ['asc'] * len_diff
elif len_diff < 0:
dirs = dirs[:len(keys)]
if dirs:
search_opts.update({'sort_dir': dirs})
data = self.call_server(tacker_client, search_opts, parsed_args)
collection = _get_resource_plural(self.resource, tacker_client)
return data.get(collection, [])
def extend_list(self, data, parsed_args):
"""Update a retrieved list.
This method provides a way to modify a original list returned from
the tacker server. For example, you can add subnet cidr information
to a list network.
"""
pass
def setup_columns(self, info, parsed_args):
_columns = len(info) > 0 and sorted(info[0].keys()) or []
if not _columns:
# clean the parsed_args.columns so that cliff will not break
parsed_args.columns = []
elif parsed_args.columns:
_columns = [x for x in parsed_args.columns if x in _columns]
elif self.list_columns:
# if no -c(s) by user and list_columns, we use columns in
# both list_columns and returned resource.
# Also Keep their order the same as in list_columns
_columns = [x for x in self.list_columns if x in _columns]
return (_columns, (utils.get_item_properties(
s, _columns, formatters=self._formatters, )
for s in info), )
def get_data(self, parsed_args):
self.log.debug('get_data(%s)', parsed_args)
data = self.retrieve_list(parsed_args)
self.extend_list(data, parsed_args)
return self.setup_columns(data, parsed_args)
class ShowCommand(TackerCommand, show.ShowOne):
"""Show information of a given resource
"""
api = 'nfv-orchestration'
resource = None
log = None
allow_names = True
def get_id(self):
if self.resource:
return self.resource.upper()
def get_parser(self, prog_name):
parser = super(ShowCommand, self).get_parser(prog_name)
add_show_list_common_argument(parser)
if self.allow_names:
help_str = _('ID or name of %s to look up')
else:
help_str = _('ID of %s to look up')
parser.add_argument(
'id', metavar=self.get_id(),
help=help_str % self.resource)
return parser
def get_data(self, parsed_args):
self.log.debug('get_data(%s)', parsed_args)
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
params = {}
if parsed_args.show_details:
params = {'verbose': 'True'}
if parsed_args.fields:
params = {'fields': parsed_args.fields}
if self.allow_names:
_id = find_resourceid_by_name_or_id(tacker_client, self.resource,
parsed_args.id)
else:
_id = parsed_args.id
obj_shower = getattr(tacker_client, "show_%s" % self.resource)
data = obj_shower(_id, **params)
self.format_output_data(data)
resource = data[self.resource]
if self.resource in data:
return zip(*sorted(resource.items()))
else:
return None
| 36.646897 | 79 | 0.592269 |
acf32b302d1f005cbf44930eb88a192f96838076 | 5,396 | py | Python | pysstv/color.py | Rachiesqueek/pySSTV | 2df8acefb764b1ed05fa0c6c5a8785c9e6b0e89d | [
"MIT"
] | 68 | 2015-07-14T14:03:04.000Z | 2022-03-25T05:26:24.000Z | pysstv/color.py | Rachiesqueek/pySSTV | 2df8acefb764b1ed05fa0c6c5a8785c9e6b0e89d | [
"MIT"
] | 17 | 2016-08-30T19:49:24.000Z | 2021-05-19T07:02:47.000Z | pysstv/color.py | Rachiesqueek/pySSTV | 2df8acefb764b1ed05fa0c6c5a8785c9e6b0e89d | [
"MIT"
] | 30 | 2015-01-07T06:36:26.000Z | 2022-02-05T21:03:24.000Z | #!/usr/bin/env python
from __future__ import division
from pysstv.sstv import byte_to_freq, FREQ_BLACK, FREQ_WHITE, FREQ_VIS_START
from pysstv.grayscale import GrayscaleSSTV
from itertools import chain
from enum import Enum
class Color(Enum):
red = 0
green = 1
blue = 2
class ColorSSTV(GrayscaleSSTV):
def on_init(self):
self.pixels = self.image.convert('RGB').load()
def encode_line(self, line):
msec_pixel = self.SCAN / self.WIDTH
image = self.pixels
for color in self.COLOR_SEQ:
yield from self.before_channel(color)
for col in range(self.WIDTH):
pixel = image[col, line]
freq_pixel = byte_to_freq(pixel[color.value])
yield freq_pixel, msec_pixel
yield from self.after_channel(color)
def before_channel(self, color):
return []
after_channel = before_channel
class MartinM1(ColorSSTV):
COLOR_SEQ = (Color.green, Color.blue, Color.red)
VIS_CODE = 0x2c
WIDTH = 320
HEIGHT = 256
SYNC = 4.862
SCAN = 146.432
INTER_CH_GAP = 0.572
def before_channel(self, color):
if color is Color.green:
yield FREQ_BLACK, self.INTER_CH_GAP
def after_channel(self, color):
yield FREQ_BLACK, self.INTER_CH_GAP
class MartinM2(MartinM1):
VIS_CODE = 0x28
WIDTH = 160
SCAN = 73.216
class ScottieS1(MartinM1):
VIS_CODE = 0x3c
SYNC = 9
INTER_CH_GAP = 1.5
SCAN = 138.24 - INTER_CH_GAP
def horizontal_sync(self):
return []
def before_channel(self, color):
if color is Color.red:
yield from MartinM1.horizontal_sync(self)
yield FREQ_BLACK, self.INTER_CH_GAP
class ScottieS2(ScottieS1):
VIS_CODE = 0x38
SCAN = 88.064 - ScottieS1.INTER_CH_GAP
WIDTH = 160
class Robot36(ColorSSTV):
VIS_CODE = 0x08
WIDTH = 320
HEIGHT = 240
SYNC = 9
INTER_CH_GAP = 4.5
Y_SCAN = 88
C_SCAN = 44
PORCH = 1.5
SYNC_PORCH = 3
INTER_CH_FREQS = [None, FREQ_WHITE, FREQ_BLACK]
def on_init(self):
self.yuv = self.image.convert('YCbCr').load()
def encode_line(self, line):
pixels = [self.yuv[col, line] for col in range(self.WIDTH)]
channel = 2 - (line % 2)
y_pixel_time = self.Y_SCAN / self.WIDTH
uv_pixel_time = self.C_SCAN / self.WIDTH
return chain(
[(FREQ_BLACK, self.SYNC_PORCH)],
((byte_to_freq(p[0]), y_pixel_time) for p in pixels),
[(self.INTER_CH_FREQS[channel], self.INTER_CH_GAP),
(FREQ_VIS_START, self.PORCH)],
((byte_to_freq(p[channel]), uv_pixel_time) for p in pixels))
class PasokonP3(ColorSSTV):
"""
[ VIS code or horizontal sync here ]
Back porch - 5 time units of black (1500 Hz).
Red component - 640 pixels of 1 time unit each.
Gap - 5 time units of black.
Green component - 640 pixels of 1 time unit each.
Gap - 5 time units of black.
Blue component - 640 pixels of 1 time unit each.
Front porch - 5 time units of black.
Horizontal Sync - 25 time units of 1200 Hz.
"""
TIMEUNIT = 1000/4800. # ms
COLOR_SEQ = (Color.red, Color.green, Color.blue)
VIS_CODE = 0x71
WIDTH = 640
HEIGHT = 480+16
SYNC = 25 * TIMEUNIT
SCAN = WIDTH * TIMEUNIT
INTER_CH_GAP = 5 * TIMEUNIT
def before_channel(self, color):
if color is Color.red:
yield FREQ_BLACK, self.INTER_CH_GAP
def after_channel(self, color):
yield FREQ_BLACK, self.INTER_CH_GAP
class PasokonP5(PasokonP3):
TIMEUNIT = 1000/3200. # ms
VIS_CODE = 0x72
SYNC = 25 * TIMEUNIT
SCAN = PasokonP3.WIDTH * TIMEUNIT
INTER_CH_GAP = 5 * TIMEUNIT
class PasokonP7(PasokonP3):
TIMEUNIT = 1000/2400. # ms
VIS_CODE = 0xF3
SYNC = 25 * TIMEUNIT
SCAN = PasokonP3.WIDTH * TIMEUNIT
INTER_CH_GAP = 5 * TIMEUNIT
class PD90(ColorSSTV):
VIS_CODE = 0x63
WIDTH = 320
HEIGHT = 256
SYNC = 20
PORCH = 2.08
PIXEL = 0.532
def gen_image_tuples(self):
yuv = self.image.convert('YCbCr').load()
for line in range(0, self.HEIGHT, 2):
yield from self.horizontal_sync()
yield FREQ_BLACK, self.PORCH
pixels0 = [yuv[col, line] for col in range(self.WIDTH)]
pixels1 = [yuv[col, line + 1] for col in range(self.WIDTH)]
for p in pixels0:
yield byte_to_freq(p[0]), self.PIXEL
for p0, p1 in zip(pixels0, pixels1):
yield byte_to_freq((p0[2] + p1[2]) / 2), self.PIXEL
for p0, p1 in zip(pixels0, pixels1):
yield byte_to_freq((p0[1] + p1[1]) / 2), self.PIXEL
for p in pixels1:
yield byte_to_freq(p[0]), self.PIXEL
class PD120(PD90):
VIS_CODE = 0x5f
WIDTH = 640
HEIGHT = 496
PIXEL = 0.19
class PD160(PD90):
VIS_CODE = 0x62
WIDTH = 512
HEIGHT = 400
PIXEL = 0.382
class PD180(PD120):
VIS_CODE = 0x60
PIXEL = 0.286
class PD240(PD120):
VIS_CODE = 0x61
PIXEL = 0.382
class PD290(PD240):
VIS_CODE = 0x5e
WIDTH = 800
HEIGHT = 616
PIXEL = 0.286
MODES = (MartinM1, MartinM2, ScottieS1, ScottieS2, Robot36,
PasokonP3, PasokonP5, PasokonP7, PD90, PD120, PD160, PD180, PD240, PD290)
| 26.194175 | 81 | 0.610638 |
acf32b442d342b71c474c76d57124cf2dca7c603 | 793 | py | Python | hood/migrations/0003_auto_20190324_1122.py | Ronyonka/hood | 4043dee9b992b189efe879ce331c2a7d25a2c5bd | [
"Unlicense"
] | null | null | null | hood/migrations/0003_auto_20190324_1122.py | Ronyonka/hood | 4043dee9b992b189efe879ce331c2a7d25a2c5bd | [
"Unlicense"
] | 4 | 2020-06-05T20:11:24.000Z | 2021-06-10T21:17:02.000Z | hood/migrations/0003_auto_20190324_1122.py | Ronyonka/hood | 4043dee9b992b189efe879ce331c2a7d25a2c5bd | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-24 08:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hood', '0002_auto_20190323_1306'),
]
operations = [
migrations.RemoveField(
model_name='contact',
name='location',
),
migrations.AddField(
model_name='hood',
name='health',
field=models.CharField(max_length=30, null=True),
),
migrations.AddField(
model_name='hood',
name='police',
field=models.CharField(max_length=30, null=True),
),
migrations.DeleteModel(
name='Contact',
),
]
| 24.030303 | 61 | 0.559899 |
acf32bf06b96f3c7d4ddd0d3fee22e4d958e1ec0 | 8,648 | py | Python | explore/angles.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 11 | 2016-07-24T01:29:01.000Z | 2021-12-12T13:41:00.000Z | explore/angles.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 426 | 2016-03-16T21:37:11.000Z | 2022-03-31T13:48:28.000Z | explore/angles.py | pkienzle/sasmodels | de3d42cb3621294b8706e55928035477790cd0ac | [
"BSD-3-Clause"
] | 28 | 2016-03-16T10:26:50.000Z | 2021-03-17T10:29:48.000Z | #!/usr/bin/env python
"""
Generate code for orientation transforms using symbolic algebra.
To make it easier to generate correct transforms for oriented shapes, we
use the sympy symbolic alegbra package to do the matrix multiplication.
The transforms are displayed both using an ascii math notation, and as
C or python code which can be pasted directly into the kernel driver.
If ever we decide to change conventions, we simply need to adjust the
order and parameters to the rotation matrices. For display we want to
use forward transforms for the mesh describing the shape, first applying
jitter, then adjusting the view. For calculation we know the effective q
so we instead need to first unwind the view, using the inverse rotation,
then undo the jitter to get the q to calculate for the shape in its
canonical orientation.
Set *OUTPUT* to the type of code you want to see: ccode, python, math
or any combination.
"""
from __future__ import print_function
import codecs
import sys
import re
import sympy as sp
from sympy import pi, sqrt, sin, cos, Matrix, Eq
# Select output
OUTPUT = ""
OUTPUT = OUTPUT + "ccode"
#OUTPUT = OUTPUT + "python "
OUTPUT = OUTPUT + "math "
REUSE_SINCOS = True
QC_ONLY = True # show only what is needed for dqc in the symmetric case
# include unicode symbols in output, even if piping to a pager
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sp.init_printing(use_unicode=True)
def subs(s):
"""
Transform sympy generated code to follow sasmodels naming conventions.
"""
if REUSE_SINCOS:
s = re.sub(r'(phi|psi|theta)\^\+', r'\1', s) # jitter rep: V^+ => V
s = re.sub(r'([a-z]*)\^\+', r'd\1', s) # jitter rep: V^+ => dV
s = re.sub(r'(cos|sin)\(([a-z]*)\)', r'\1_\2', s) # cos(V) => cos_V
s = re.sub(r'pow\(([a-z]*), 2\)', r'\1*\1', s) # pow(V, 2) => V*V
return s
def comment(s):
r"""
Add a comment to the generated code. Use '\n' to separate lines.
"""
if 'ccode' in OUTPUT:
for line in s.split("\n"):
print("// " + line if line else "")
if 'python' in OUTPUT:
for line in s.split("\n"):
print(" ## " + line if line else "")
def vprint(var, vec, comment=None, post=None):
"""
Generate assignment statements.
*var* could be a single sympy symbol or a 1xN vector of symbols.
*vec* could be a single sympy expression or a 1xN vector of expressions
such as results from a matrix-vector multiplication.
*comment* if present is added to the start of the block as documentation.
"""
#for v, row in zip(var, vec): sp.pprint(Eq(v, row))
desc = sp.pretty(Eq(var, vec), wrap_line=False)
if not isinstance(var, Matrix):
var, vec = [var], [vec]
if 'ccode' in OUTPUT:
if 'math' in OUTPUT:
print("\n// " + comment if comment else "")
print("/*")
for line in desc.split("\n"):
print(" * "+line)
print(" *\n */")
else:
print("\n // " + comment if comment else "")
if post:
print(" // " + post)
for v, row in zip(var, vec):
print(subs(" const double " + sp.ccode(row, assign_to=v)))
if 'python' in OUTPUT:
if comment:
print("\n ## " + comment)
if 'math' in OUTPUT:
for line in desc.split("\n"):
print(" # " + line)
if post:
print(" ## " + post)
for v, row in zip(var, vec):
print(subs(" " + sp.ccode(row, assign_to=v)[:-1]))
if OUTPUT == 'math ':
print("\n// " + comment if comment else "")
if post: print("// " + post)
print(desc)
def mprint(var, mat, comment=None, post=None):
"""
Generate assignment statements for matrix elements.
"""
n = sp.prod(var.shape)
vprint(var.reshape(n, 1), mat.reshape(n, 1), comment=comment, post=post)
# From wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(a):
"""Rotate y and z about x"""
R = [[1, 0, 0],
[0, +cos(a), -sin(a)],
[0, +sin(a), +cos(a)]]
return Matrix(R)
def Ry(a):
"""Rotate x and z about y"""
R = [[+cos(a), 0, +sin(a)],
[0, 1, 0],
[-sin(a), 0, +cos(a)]]
return Matrix(R)
def Rz(a):
"""Rotate x and y about z"""
R = [[+cos(a), -sin(a), 0],
[+sin(a), +cos(a), 0],
[0, 0, 1]]
return Matrix(R)
## =============== Describe the transforms ====================
# Define symbols used. Note that if you change the symbols for the jitter
# angles, you will need to update the subs() function accordingly.
dphi, dpsi, dtheta = sp.var("phi^+ psi^+ theta^+")
phi, psi, theta = sp.var("phi psi theta")
#dphi, dpsi, dtheta = sp.var("beta^+ gamma^+ alpha^+")
#phi, psi, theta = sp.var("beta gamma alpha")
x, y, z = sp.var("x y z")
q = sp.var("q")
qx, qy, qz = sp.var("qx qy qz")
dqx, dqy, dqz = sp.var("qx^+ qy^+ qz^+")
qa, qb, qc = sp.var("qa qb qc")
dqa, dqb, dqc = sp.var("qa^+ qb^+ qc^+")
qab = sp.var("qab")
# 3x3 matrix M
J = Matrix([sp.var("J(1:4)(1:4)")]).reshape(3,3)
V = Matrix([sp.var("V(1:4)(1:4)")]).reshape(3,3)
R = Matrix([sp.var("R(1:4)(1:4)")]).reshape(3,3)
# various vectors
xyz = Matrix([[x], [y], [z]])
x_hat = Matrix([[x], [0], [0]])
y_hat = Matrix([[0], [y], [0]])
z_hat = Matrix([[0], [0], [z]])
q_xy = Matrix([[qx], [qy], [0]])
q_abc = Matrix([[qa], [qb], [qc]])
q_xyz = Matrix([[qx], [qy], [qz]])
dq_abc = Matrix([[dqa], [dqb], [dqc]])
dq_xyz = Matrix([[dqx], [dqy], [dqz]])
def print_steps(jitter, jitter_inv, view, view_inv, qc_only):
"""
Show the forward/reverse transform code for view and jitter.
"""
if 0: # forward calculations
vprint(q_xyz, jitter*q_abc, "apply jitter")
#vprint(xyz, jitter*z_hat, "r")
#mprint(J, jitter, "forward jitter")
vprint(dq_xyz, view*q_xyz, "apply view after jitter")
#mprint(V, view, "forward view")
#vprint(dq_xyz, view*jitter*q_abc, "combine view and jitter")
mprint(R, view*jitter, "forward matrix")
if 1: # reverse calculations
pre_view = "set angles from view" if REUSE_SINCOS else None
pre_jitter = "set angles from jitter" if REUSE_SINCOS else None
index = slice(2,3) if qc_only else slice(None,None)
comment("\n**** direct ****")
vprint(q_abc, view_inv*q_xy, "reverse view", post=pre_view)
vprint(dq_abc[index,:], (jitter_inv*q_abc)[index,:],
"reverse jitter after view", post=pre_jitter)
comment("\n\n**** precalc ****")
#vprint(q_abc, jitter_inv*view_inv*q_xy, "combine jitter and view reverse")
mprint(V[:,:2], view_inv[:,:2], "reverse view matrix", post=pre_view)
mprint(J[index,:], jitter_inv[index,:], "reverse jitter matrix", post=pre_jitter)
mprint(R[index,:2], (J*V)[index,:2], "reverse matrix")
comment("\n**** per point ****")
mprint(q_abc[index,:], (R*q_xy)[index,:], "applied reverse matrix")
#mprint(q_abc, J*V*q_xy, "applied reverse matrix")
#mprint(R[index,:2], jitter_inv*view_inv, "reverse matrix direct")
#vprint(q_abc, M*q_xy, "matrix application")
if 1:
comment("==== asymmetric ====")
print_steps(
jitter=Rx(dphi)*Ry(dtheta)*Rz(dpsi),
jitter_inv=Rz(-dpsi)*Ry(-dtheta)*Rx(-dphi),
view=Rz(phi)*Ry(theta)*Rz(psi),
view_inv=Rz(-psi)*Ry(-theta)*Rz(-phi),
qc_only=False,
)
if 1:
comment("\n\n==== symmetric ====")
print_steps(
jitter=Rx(dphi)*Ry(dtheta),
jitter_inv=Ry(-dtheta)*Rx(-dphi),
view=Rz(phi)*Ry(theta),
view_inv=Ry(-theta)*Rz(-phi),
qc_only=QC_ONLY,
)
comment("\n**** qab from qc ****")
# The indirect calculation of qab is better than directly c
# alculating qab^2 = qa^2 + qb^2 since qc can be computed
# as qc = M31*qx + M32*qy, thus requiring only two elements
# of the rotation matrix.
#vprint(qab, sqrt(qa**2 + qb**2), "Direct calculation of qab")
vprint(dqa, sqrt((qx**2+qy**2) - dqc**2),
"Indirect calculation of qab, from qab^2 = |q|^2 - qc^2")
if 0:
comment("==== asymmetric (3.x) ====")
view_inv = Rz(-psi)*Rx(theta)*Ry(-(pi/2 - phi))
vprint(q_abc, view_inv*q_xy, "reverse view")
print(""" existing code
cos_alpha = cos_theta*cos_phi*qxhat + sin_theta*qyhat;
cos_mu = (-sin_theta*cos_psi*cos_phi - sin_psi*sin_phi)*qxhat + cos_theta*cos_psi*qyhat;
cos_nu = (-cos_phi*sin_psi*sin_theta + sin_phi*cos_psi)*qxhat + sin_psi*cos_theta*qyhat;
""")
| 35.012146 | 92 | 0.589269 |
acf32d7b35dcc3208060522d15213de73be0ffea | 387 | py | Python | Guide/asgi.py | TalonClone/Guide | af60a51c5f105bb9c406a52ba97207c41b112611 | [
"MIT"
] | null | null | null | Guide/asgi.py | TalonClone/Guide | af60a51c5f105bb9c406a52ba97207c41b112611 | [
"MIT"
] | null | null | null | Guide/asgi.py | TalonClone/Guide | af60a51c5f105bb9c406a52ba97207c41b112611 | [
"MIT"
] | null | null | null | """
ASGI config for Guide project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Guide.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
acf32d9a7c2498a2d6ff3118b034cb74b18f6ae3 | 613 | py | Python | src/boip/preset/maya-qt-mvc/template/__init__.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | null | null | null | src/boip/preset/maya-qt-mvc/template/__init__.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | 1 | 2020-09-28T15:26:02.000Z | 2020-09-28T15:26:02.000Z | src/boip/preset/maya-qt-mvc/template/__init__.py | InTack2/boip | 99a2c1cf7116dc4a28453d44ac9768446241174d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import generators
from __future__ import division
from . import controller
from . import model
from . import view
from .gui import sample_gui
reload(controller)
reload(model)
reload(view)
reload(sample_gui)
def main():
global {tool_name}_window_controller
try:
{tool_name}_window_controller.close_gui()
except:
pass
{tool_name}_window_controller = controller.Controller()
{tool_name}_window_controller.show_gui()
| 20.433333 | 59 | 0.76509 |
acf32dc76e8ec3748bf89483c65fc94d2bcd1874 | 161,294 | py | Python | sdk/python/pulumi_azure_native/network/v20150501preview/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20150501preview/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20150501preview/_inputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AddressSpaceArgs',
'ApplicationGatewayBackendAddressPoolArgs',
'ApplicationGatewayBackendAddressArgs',
'ApplicationGatewayBackendHttpSettingsArgs',
'ApplicationGatewayFrontendIPConfigurationArgs',
'ApplicationGatewayFrontendPortArgs',
'ApplicationGatewayHttpListenerArgs',
'ApplicationGatewayIPConfigurationArgs',
'ApplicationGatewayRequestRoutingRuleArgs',
'ApplicationGatewaySkuArgs',
'ApplicationGatewaySslCertificateArgs',
'BackendAddressPoolArgs',
'DhcpOptionsArgs',
'ExpressRouteCircuitAuthorizationArgs',
'ExpressRouteCircuitPeeringConfigArgs',
'ExpressRouteCircuitPeeringArgs',
'ExpressRouteCircuitServiceProviderPropertiesArgs',
'ExpressRouteCircuitSkuArgs',
'ExpressRouteCircuitStatsArgs',
'FrontendIpConfigurationArgs',
'InboundNatPoolArgs',
'InboundNatRuleArgs',
'LoadBalancingRuleArgs',
'NetworkInterfaceDnsSettingsArgs',
'NetworkInterfaceIpConfigurationArgs',
'OutboundNatRuleArgs',
'ProbeArgs',
'PublicIpAddressDnsSettingsArgs',
'RouteArgs',
'SecurityRuleArgs',
'SubResourceArgs',
'SubnetArgs',
]
@pulumi.input_type
class AddressSpaceArgs:
def __init__(__self__, *,
address_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
AddressSpace contains an array of IP address ranges that can be used by subnets
:param pulumi.Input[Sequence[pulumi.Input[str]]] address_prefixes: Gets or sets List of address blocks reserved for this virtual network in CIDR notation
"""
if address_prefixes is not None:
pulumi.set(__self__, "address_prefixes", address_prefixes)
@property
@pulumi.getter(name="addressPrefixes")
def address_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Gets or sets List of address blocks reserved for this virtual network in CIDR notation
"""
return pulumi.get(self, "address_prefixes")
@address_prefixes.setter
def address_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "address_prefixes", value)
@pulumi.input_type
class ApplicationGatewayBackendAddressPoolArgs:
def __init__(__self__, *,
backend_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationGatewayBackendAddressArgs']]]] = None,
backend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Backend Address Pool of application gateway
:param pulumi.Input[Sequence[pulumi.Input['ApplicationGatewayBackendAddressArgs']]] backend_addresses: Gets or sets the backend addresses
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] backend_ip_configurations: Gets or sets backendIPConfiguration of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the backend address pool resource Updating/Deleting/Failed
"""
if backend_addresses is not None:
pulumi.set(__self__, "backend_addresses", backend_addresses)
if backend_ip_configurations is not None:
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddresses")
def backend_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationGatewayBackendAddressArgs']]]]:
"""
Gets or sets the backend addresses
"""
return pulumi.get(self, "backend_addresses")
@backend_addresses.setter
def backend_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ApplicationGatewayBackendAddressArgs']]]]):
pulumi.set(self, "backend_addresses", value)
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets or sets backendIPConfiguration of application gateway
"""
return pulumi.get(self, "backend_ip_configurations")
@backend_ip_configurations.setter
def backend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "backend_ip_configurations", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the backend address pool resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class ApplicationGatewayBackendAddressArgs:
def __init__(__self__, *,
fqdn: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None):
"""
Backend Address of application gateway
:param pulumi.Input[str] fqdn: Gets or sets the dns name
:param pulumi.Input[str] ip_address: Gets or sets the ip address
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the dns name
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the ip address
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@pulumi.input_type
class ApplicationGatewayBackendHttpSettingsArgs:
def __init__(__self__, *,
cookie_based_affinity: Optional[pulumi.Input[Union[str, 'ApplicationGatewayCookieBasedAffinity']]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Backend address pool settings of application gateway
:param pulumi.Input[Union[str, 'ApplicationGatewayCookieBasedAffinity']] cookie_based_affinity: Gets or sets the cookie affinity
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[int] port: Gets or sets the port
:param pulumi.Input[Union[str, 'ApplicationGatewayProtocol']] protocol: Gets or sets the protocol
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the backend http settings resource Updating/Deleting/Failed
"""
if cookie_based_affinity is not None:
pulumi.set(__self__, "cookie_based_affinity", cookie_based_affinity)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="cookieBasedAffinity")
def cookie_based_affinity(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewayCookieBasedAffinity']]]:
"""
Gets or sets the cookie affinity
"""
return pulumi.get(self, "cookie_based_affinity")
@cookie_based_affinity.setter
def cookie_based_affinity(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewayCookieBasedAffinity']]]):
pulumi.set(self, "cookie_based_affinity", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the port
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]]:
"""
Gets or sets the protocol
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the backend http settings resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class ApplicationGatewayFrontendIPConfigurationArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_allocation_method: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
public_ip_address: Optional[pulumi.Input['SubResourceArgs']] = None,
subnet: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
Frontend IP configuration of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] private_ip_address: Gets or sets the privateIPAddress of the Network Interface IP Configuration
:param pulumi.Input[Union[str, 'IpAllocationMethod']] private_ip_allocation_method: Gets or sets PrivateIP allocation method (Static/Dynamic)
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] public_ip_address: Gets or sets the reference of the PublicIP resource
:param pulumi.Input['SubResourceArgs'] subnet: Gets or sets the reference of the subnet resource
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the privateIPAddress of the Network Interface IP Configuration
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]:
"""
Gets or sets PrivateIP allocation method (Static/Dynamic)
"""
return pulumi.get(self, "private_ip_allocation_method")
@private_ip_allocation_method.setter
def private_ip_allocation_method(self, value: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]):
pulumi.set(self, "private_ip_allocation_method", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the PublicIP resource
"""
return pulumi.get(self, "public_ip_address")
@public_ip_address.setter
def public_ip_address(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "public_ip_address", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the subnet resource
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "subnet", value)
@pulumi.input_type
class ApplicationGatewayFrontendPortArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Frontend Port of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[int] port: Gets or sets the frontend port
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the frontend port resource Updating/Deleting/Failed
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if port is not None:
pulumi.set(__self__, "port", port)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the frontend port
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the frontend port resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class ApplicationGatewayHttpListenerArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configuration: Optional[pulumi.Input['SubResourceArgs']] = None,
frontend_port: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
ssl_certificate: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
Http listener of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input['SubResourceArgs'] frontend_ip_configuration: Gets or sets frontend IP configuration resource of application gateway
:param pulumi.Input['SubResourceArgs'] frontend_port: Gets or sets frontend port resource of application gateway
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[Union[str, 'ApplicationGatewayProtocol']] protocol: Gets or sets the protocol
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the http listener resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] ssl_certificate: Gets or sets ssl certificate resource of application gateway
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port is not None:
pulumi.set(__self__, "frontend_port", frontend_port)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if ssl_certificate is not None:
pulumi.set(__self__, "ssl_certificate", ssl_certificate)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets frontend IP configuration resource of application gateway
"""
return pulumi.get(self, "frontend_ip_configuration")
@frontend_ip_configuration.setter
def frontend_ip_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "frontend_ip_configuration", value)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets frontend port resource of application gateway
"""
return pulumi.get(self, "frontend_port")
@frontend_port.setter
def frontend_port(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "frontend_port", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]]:
"""
Gets or sets the protocol
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewayProtocol']]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the http listener resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="sslCertificate")
def ssl_certificate(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets ssl certificate resource of application gateway
"""
return pulumi.get(self, "ssl_certificate")
@ssl_certificate.setter
def ssl_certificate(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "ssl_certificate", value)
@pulumi.input_type
class ApplicationGatewayIPConfigurationArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
IP configuration of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the application gateway subnet resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] subnet: Gets or sets the reference of the subnet resource.A subnet from where application gateway gets its private address
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the application gateway subnet resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the subnet resource.A subnet from where application gateway gets its private address
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "subnet", value)
@pulumi.input_type
class ApplicationGatewayRequestRoutingRuleArgs:
def __init__(__self__, *,
backend_address_pool: Optional[pulumi.Input['SubResourceArgs']] = None,
backend_http_settings: Optional[pulumi.Input['SubResourceArgs']] = None,
etag: Optional[pulumi.Input[str]] = None,
http_listener: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
rule_type: Optional[pulumi.Input[Union[str, 'ApplicationGatewayRequestRoutingRuleType']]] = None):
"""
Request routing rule of application gateway
:param pulumi.Input['SubResourceArgs'] backend_address_pool: Gets or sets backend address pool resource of application gateway
:param pulumi.Input['SubResourceArgs'] backend_http_settings: Gets or sets frontend port resource of application gateway
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input['SubResourceArgs'] http_listener: Gets or sets http listener resource of application gateway
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the request routing rule resource Updating/Deleting/Failed
:param pulumi.Input[Union[str, 'ApplicationGatewayRequestRoutingRuleType']] rule_type: Gets or sets the rule type
"""
if backend_address_pool is not None:
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if backend_http_settings is not None:
pulumi.set(__self__, "backend_http_settings", backend_http_settings)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if http_listener is not None:
pulumi.set(__self__, "http_listener", http_listener)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rule_type is not None:
pulumi.set(__self__, "rule_type", rule_type)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets backend address pool resource of application gateway
"""
return pulumi.get(self, "backend_address_pool")
@backend_address_pool.setter
def backend_address_pool(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_address_pool", value)
@property
@pulumi.getter(name="backendHttpSettings")
def backend_http_settings(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets frontend port resource of application gateway
"""
return pulumi.get(self, "backend_http_settings")
@backend_http_settings.setter
def backend_http_settings(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_http_settings", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="httpListener")
def http_listener(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets http listener resource of application gateway
"""
return pulumi.get(self, "http_listener")
@http_listener.setter
def http_listener(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "http_listener", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the request routing rule resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="ruleType")
def rule_type(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewayRequestRoutingRuleType']]]:
"""
Gets or sets the rule type
"""
return pulumi.get(self, "rule_type")
@rule_type.setter
def rule_type(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewayRequestRoutingRuleType']]]):
pulumi.set(self, "rule_type", value)
@pulumi.input_type
class ApplicationGatewaySkuArgs:
def __init__(__self__, *,
capacity: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[Union[str, 'ApplicationGatewaySkuName']]] = None,
tier: Optional[pulumi.Input[Union[str, 'ApplicationGatewayTier']]] = None):
"""
SKU of application gateway
:param pulumi.Input[int] capacity: Gets or sets capacity (instance count) of application gateway
:param pulumi.Input[Union[str, 'ApplicationGatewaySkuName']] name: Gets or sets name of application gateway SKU
:param pulumi.Input[Union[str, 'ApplicationGatewayTier']] tier: Gets or sets tier of application gateway
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets capacity (instance count) of application gateway
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewaySkuName']]]:
"""
Gets or sets name of application gateway SKU
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewaySkuName']]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[Union[str, 'ApplicationGatewayTier']]]:
"""
Gets or sets tier of application gateway
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[Union[str, 'ApplicationGatewayTier']]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class ApplicationGatewaySslCertificateArgs:
def __init__(__self__, *,
data: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
public_cert_data: Optional[pulumi.Input[str]] = None):
"""
SSL certificates of application gateway
:param pulumi.Input[str] data: Gets or sets the certificate data
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] password: Gets or sets the certificate password
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the ssl certificate resource Updating/Deleting/Failed
:param pulumi.Input[str] public_cert_data: Gets or sets the certificate public data
"""
if data is not None:
pulumi.set(__self__, "data", data)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_cert_data is not None:
pulumi.set(__self__, "public_cert_data", public_cert_data)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the certificate data
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the certificate password
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the ssl certificate resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="publicCertData")
def public_cert_data(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the certificate public data
"""
return pulumi.get(self, "public_cert_data")
@public_cert_data.setter
def public_cert_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "public_cert_data", value)
@pulumi.input_type
class BackendAddressPoolArgs:
def __init__(__self__, *,
backend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
outbound_nat_rule: Optional[pulumi.Input['SubResourceArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Pool of backend IP addresses
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] backend_ip_configurations: Gets collection of references to IPs defined in NICs
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] load_balancing_rules: Gets Load Balancing rules that use this Backend Address Pool
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input['SubResourceArgs'] outbound_nat_rule: Gets outbound rules that use this Backend Address Pool
:param pulumi.Input[str] provisioning_state: Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
if backend_ip_configurations is not None:
pulumi.set(__self__, "backend_ip_configurations", backend_ip_configurations)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancing_rules is not None:
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if outbound_nat_rule is not None:
pulumi.set(__self__, "outbound_nat_rule", outbound_nat_rule)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfigurations")
def backend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets collection of references to IPs defined in NICs
"""
return pulumi.get(self, "backend_ip_configurations")
@backend_ip_configurations.setter
def backend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "backend_ip_configurations", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets Load Balancing rules that use this Backend Address Pool
"""
return pulumi.get(self, "load_balancing_rules")
@load_balancing_rules.setter
def load_balancing_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "load_balancing_rules", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outboundNatRule")
def outbound_nat_rule(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets outbound rules that use this Backend Address Pool
"""
return pulumi.get(self, "outbound_nat_rule")
@outbound_nat_rule.setter
def outbound_nat_rule(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "outbound_nat_rule", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class DhcpOptionsArgs:
def __init__(__self__, *,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
DHCPOptions contains an array of DNS servers available to VMs deployed in the virtual networkStandard DHCP option for a subnet overrides VNET DHCP options.
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: Gets or sets list of DNS servers IP addresses
"""
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Gets or sets list of DNS servers IP addresses
"""
return pulumi.get(self, "dns_servers")
@dns_servers.setter
def dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "dns_servers", value)
@pulumi.input_type
class ExpressRouteCircuitAuthorizationArgs:
def __init__(__self__, *,
authorization_key: Optional[pulumi.Input[str]] = None,
authorization_use_status: Optional[pulumi.Input[Union[str, 'AuthorizationUseStatus']]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Authorization in a ExpressRouteCircuit resource
:param pulumi.Input[str] authorization_key: Gets or sets the authorization key
:param pulumi.Input[Union[str, 'AuthorizationUseStatus']] authorization_use_status: Gets or sets AuthorizationUseStatus
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
if authorization_key is not None:
pulumi.set(__self__, "authorization_key", authorization_key)
if authorization_use_status is not None:
pulumi.set(__self__, "authorization_use_status", authorization_use_status)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="authorizationKey")
def authorization_key(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the authorization key
"""
return pulumi.get(self, "authorization_key")
@authorization_key.setter
def authorization_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_key", value)
@property
@pulumi.getter(name="authorizationUseStatus")
def authorization_use_status(self) -> Optional[pulumi.Input[Union[str, 'AuthorizationUseStatus']]]:
"""
Gets or sets AuthorizationUseStatus
"""
return pulumi.get(self, "authorization_use_status")
@authorization_use_status.setter
def authorization_use_status(self, value: Optional[pulumi.Input[Union[str, 'AuthorizationUseStatus']]]):
pulumi.set(self, "authorization_use_status", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class ExpressRouteCircuitPeeringConfigArgs:
def __init__(__self__, *,
advertised_public_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
advertised_public_prefixes_state: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState']]] = None,
customer_asn: Optional[pulumi.Input[int]] = None,
routing_registry_name: Optional[pulumi.Input[str]] = None):
"""
Specifies the peering config
:param pulumi.Input[Sequence[pulumi.Input[str]]] advertised_public_prefixes: Gets or sets the reference of AdvertisedPublicPrefixes
:param pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState']] advertised_public_prefixes_state: Gets or sets AdvertisedPublicPrefixState of the Peering resource
:param pulumi.Input[int] customer_asn: Gets or Sets CustomerAsn of the peering.
:param pulumi.Input[str] routing_registry_name: Gets or Sets RoutingRegistryName of the config.
"""
if advertised_public_prefixes is not None:
pulumi.set(__self__, "advertised_public_prefixes", advertised_public_prefixes)
if advertised_public_prefixes_state is not None:
pulumi.set(__self__, "advertised_public_prefixes_state", advertised_public_prefixes_state)
if customer_asn is not None:
pulumi.set(__self__, "customer_asn", customer_asn)
if routing_registry_name is not None:
pulumi.set(__self__, "routing_registry_name", routing_registry_name)
@property
@pulumi.getter(name="advertisedPublicPrefixes")
def advertised_public_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Gets or sets the reference of AdvertisedPublicPrefixes
"""
return pulumi.get(self, "advertised_public_prefixes")
@advertised_public_prefixes.setter
def advertised_public_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "advertised_public_prefixes", value)
@property
@pulumi.getter(name="advertisedPublicPrefixesState")
def advertised_public_prefixes_state(self) -> Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState']]]:
"""
Gets or sets AdvertisedPublicPrefixState of the Peering resource
"""
return pulumi.get(self, "advertised_public_prefixes_state")
@advertised_public_prefixes_state.setter
def advertised_public_prefixes_state(self, value: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState']]]):
pulumi.set(self, "advertised_public_prefixes_state", value)
@property
@pulumi.getter(name="customerASN")
def customer_asn(self) -> Optional[pulumi.Input[int]]:
"""
Gets or Sets CustomerAsn of the peering.
"""
return pulumi.get(self, "customer_asn")
@customer_asn.setter
def customer_asn(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "customer_asn", value)
@property
@pulumi.getter(name="routingRegistryName")
def routing_registry_name(self) -> Optional[pulumi.Input[str]]:
"""
Gets or Sets RoutingRegistryName of the config.
"""
return pulumi.get(self, "routing_registry_name")
@routing_registry_name.setter
def routing_registry_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "routing_registry_name", value)
@pulumi.input_type
class ExpressRouteCircuitPeeringArgs:
def __init__(__self__, *,
azure_asn: Optional[pulumi.Input[int]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
microsoft_peering_config: Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
peer_asn: Optional[pulumi.Input[int]] = None,
peering_type: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringType']]] = None,
primary_azure_port: Optional[pulumi.Input[str]] = None,
primary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
secondary_azure_port: Optional[pulumi.Input[str]] = None,
secondary_peer_address_prefix: Optional[pulumi.Input[str]] = None,
shared_key: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringState']]] = None,
stats: Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']] = None,
vlan_id: Optional[pulumi.Input[int]] = None):
"""
Peering in a ExpressRouteCircuit resource
:param pulumi.Input[int] azure_asn: Gets or sets the azure ASN
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input['ExpressRouteCircuitPeeringConfigArgs'] microsoft_peering_config: Gets or sets the Microsoft peering config
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[int] peer_asn: Gets or sets the peer ASN
:param pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringType']] peering_type: Gets or sets PeeringType
:param pulumi.Input[str] primary_azure_port: Gets or sets the primary port
:param pulumi.Input[str] primary_peer_address_prefix: Gets or sets the primary address prefix
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input[str] secondary_azure_port: Gets or sets the secondary port
:param pulumi.Input[str] secondary_peer_address_prefix: Gets or sets the secondary address prefix
:param pulumi.Input[str] shared_key: Gets or sets the shared key
:param pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringState']] state: Gets or sets state of Peering
:param pulumi.Input['ExpressRouteCircuitStatsArgs'] stats: Gets or peering stats
:param pulumi.Input[int] vlan_id: Gets or sets the vlan id
"""
if azure_asn is not None:
pulumi.set(__self__, "azure_asn", azure_asn)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if microsoft_peering_config is not None:
pulumi.set(__self__, "microsoft_peering_config", microsoft_peering_config)
if name is not None:
pulumi.set(__self__, "name", name)
if peer_asn is not None:
pulumi.set(__self__, "peer_asn", peer_asn)
if peering_type is not None:
pulumi.set(__self__, "peering_type", peering_type)
if primary_azure_port is not None:
pulumi.set(__self__, "primary_azure_port", primary_azure_port)
if primary_peer_address_prefix is not None:
pulumi.set(__self__, "primary_peer_address_prefix", primary_peer_address_prefix)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if secondary_azure_port is not None:
pulumi.set(__self__, "secondary_azure_port", secondary_azure_port)
if secondary_peer_address_prefix is not None:
pulumi.set(__self__, "secondary_peer_address_prefix", secondary_peer_address_prefix)
if shared_key is not None:
pulumi.set(__self__, "shared_key", shared_key)
if state is not None:
pulumi.set(__self__, "state", state)
if stats is not None:
pulumi.set(__self__, "stats", stats)
if vlan_id is not None:
pulumi.set(__self__, "vlan_id", vlan_id)
@property
@pulumi.getter(name="azureASN")
def azure_asn(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the azure ASN
"""
return pulumi.get(self, "azure_asn")
@azure_asn.setter
def azure_asn(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "azure_asn", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="microsoftPeeringConfig")
def microsoft_peering_config(self) -> Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']]:
"""
Gets or sets the Microsoft peering config
"""
return pulumi.get(self, "microsoft_peering_config")
@microsoft_peering_config.setter
def microsoft_peering_config(self, value: Optional[pulumi.Input['ExpressRouteCircuitPeeringConfigArgs']]):
pulumi.set(self, "microsoft_peering_config", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peerASN")
def peer_asn(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the peer ASN
"""
return pulumi.get(self, "peer_asn")
@peer_asn.setter
def peer_asn(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "peer_asn", value)
@property
@pulumi.getter(name="peeringType")
def peering_type(self) -> Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringType']]]:
"""
Gets or sets PeeringType
"""
return pulumi.get(self, "peering_type")
@peering_type.setter
def peering_type(self, value: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringType']]]):
pulumi.set(self, "peering_type", value)
@property
@pulumi.getter(name="primaryAzurePort")
def primary_azure_port(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the primary port
"""
return pulumi.get(self, "primary_azure_port")
@primary_azure_port.setter
def primary_azure_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_azure_port", value)
@property
@pulumi.getter(name="primaryPeerAddressPrefix")
def primary_peer_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the primary address prefix
"""
return pulumi.get(self, "primary_peer_address_prefix")
@primary_peer_address_prefix.setter
def primary_peer_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "primary_peer_address_prefix", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="secondaryAzurePort")
def secondary_azure_port(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the secondary port
"""
return pulumi.get(self, "secondary_azure_port")
@secondary_azure_port.setter
def secondary_azure_port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_azure_port", value)
@property
@pulumi.getter(name="secondaryPeerAddressPrefix")
def secondary_peer_address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the secondary address prefix
"""
return pulumi.get(self, "secondary_peer_address_prefix")
@secondary_peer_address_prefix.setter
def secondary_peer_address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secondary_peer_address_prefix", value)
@property
@pulumi.getter(name="sharedKey")
def shared_key(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the shared key
"""
return pulumi.get(self, "shared_key")
@shared_key.setter
def shared_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_key", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringState']]]:
"""
Gets or sets state of Peering
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitPeeringState']]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter
def stats(self) -> Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']]:
"""
Gets or peering stats
"""
return pulumi.get(self, "stats")
@stats.setter
def stats(self, value: Optional[pulumi.Input['ExpressRouteCircuitStatsArgs']]):
pulumi.set(self, "stats", value)
@property
@pulumi.getter(name="vlanId")
def vlan_id(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the vlan id
"""
return pulumi.get(self, "vlan_id")
@vlan_id.setter
def vlan_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vlan_id", value)
@pulumi.input_type
class ExpressRouteCircuitServiceProviderPropertiesArgs:
def __init__(__self__, *,
bandwidth_in_mbps: Optional[pulumi.Input[int]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
service_provider_name: Optional[pulumi.Input[str]] = None):
"""
Contains ServiceProviderProperties in an ExpressRouteCircuit
:param pulumi.Input[int] bandwidth_in_mbps: Gets or sets BandwidthInMbps.
:param pulumi.Input[str] peering_location: Gets or sets peering location.
:param pulumi.Input[str] service_provider_name: Gets or sets serviceProviderName.
"""
if bandwidth_in_mbps is not None:
pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps)
if peering_location is not None:
pulumi.set(__self__, "peering_location", peering_location)
if service_provider_name is not None:
pulumi.set(__self__, "service_provider_name", service_provider_name)
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets BandwidthInMbps.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@bandwidth_in_mbps.setter
def bandwidth_in_mbps(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bandwidth_in_mbps", value)
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets peering location.
"""
return pulumi.get(self, "peering_location")
@peering_location.setter
def peering_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_location", value)
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets serviceProviderName.
"""
return pulumi.get(self, "service_provider_name")
@service_provider_name.setter
def service_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_name", value)
@pulumi.input_type
class ExpressRouteCircuitSkuArgs:
def __init__(__self__, *,
family: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuFamily']]] = None,
name: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuTier']]] = None):
"""
Contains sku in an ExpressRouteCircuit
:param pulumi.Input[Union[str, 'ExpressRouteCircuitSkuFamily']] family: Gets or sets family of the sku.
:param pulumi.Input[str] name: Gets or sets name of the sku.
:param pulumi.Input[Union[str, 'ExpressRouteCircuitSkuTier']] tier: Gets or sets tier of the sku.
"""
if family is not None:
pulumi.set(__self__, "family", family)
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuFamily']]]:
"""
Gets or sets family of the sku.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuFamily']]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets name of the sku.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuTier']]]:
"""
Gets or sets tier of the sku.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[Union[str, 'ExpressRouteCircuitSkuTier']]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class ExpressRouteCircuitStatsArgs:
def __init__(__self__, *,
bytes_in: Optional[pulumi.Input[int]] = None,
bytes_out: Optional[pulumi.Input[int]] = None):
"""
Contains Stats associated with the peering
:param pulumi.Input[int] bytes_in: Gets BytesIn of the peering.
:param pulumi.Input[int] bytes_out: Gets BytesOut of the peering.
"""
if bytes_in is not None:
pulumi.set(__self__, "bytes_in", bytes_in)
if bytes_out is not None:
pulumi.set(__self__, "bytes_out", bytes_out)
@property
@pulumi.getter(name="bytesIn")
def bytes_in(self) -> Optional[pulumi.Input[int]]:
"""
Gets BytesIn of the peering.
"""
return pulumi.get(self, "bytes_in")
@bytes_in.setter
def bytes_in(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bytes_in", value)
@property
@pulumi.getter(name="bytesOut")
def bytes_out(self) -> Optional[pulumi.Input[int]]:
"""
Gets BytesOut of the peering.
"""
return pulumi.get(self, "bytes_out")
@bytes_out.setter
def bytes_out(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bytes_out", value)
@pulumi.input_type
class FrontendIpConfigurationArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
inbound_nat_pools: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
outbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_allocation_method: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
public_ip_address: Optional[pulumi.Input['SubResourceArgs']] = None,
subnet: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
Frontend IP address of the load balancer
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] inbound_nat_pools: Read only.Inbound pools URIs that use this frontend IP
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] inbound_nat_rules: Read only.Inbound rules URIs that use this frontend IP
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] load_balancing_rules: Gets Load Balancing rules URIs that use this frontend IP
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] outbound_nat_rules: Read only.Outbound rules URIs that use this frontend IP
:param pulumi.Input[str] private_ip_address: Gets or sets the IP address of the Load Balancer.This is only specified if a specific private IP address shall be allocated from the subnet specified in subnetRef
:param pulumi.Input[Union[str, 'IpAllocationMethod']] private_ip_allocation_method: Gets or sets PrivateIP allocation method (Static/Dynamic)
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] public_ip_address: Gets or sets the reference of the PublicIP resource
:param pulumi.Input['SubResourceArgs'] subnet: Gets or sets the reference of the subnet resource.A subnet from where the load balancer gets its private frontend address
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if inbound_nat_pools is not None:
pulumi.set(__self__, "inbound_nat_pools", inbound_nat_pools)
if inbound_nat_rules is not None:
pulumi.set(__self__, "inbound_nat_rules", inbound_nat_rules)
if load_balancing_rules is not None:
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if outbound_nat_rules is not None:
pulumi.set(__self__, "outbound_nat_rules", outbound_nat_rules)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="inboundNatPools")
def inbound_nat_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Read only.Inbound pools URIs that use this frontend IP
"""
return pulumi.get(self, "inbound_nat_pools")
@inbound_nat_pools.setter
def inbound_nat_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "inbound_nat_pools", value)
@property
@pulumi.getter(name="inboundNatRules")
def inbound_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Read only.Inbound rules URIs that use this frontend IP
"""
return pulumi.get(self, "inbound_nat_rules")
@inbound_nat_rules.setter
def inbound_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "inbound_nat_rules", value)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets Load Balancing rules URIs that use this frontend IP
"""
return pulumi.get(self, "load_balancing_rules")
@load_balancing_rules.setter
def load_balancing_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "load_balancing_rules", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outboundNatRules")
def outbound_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Read only.Outbound rules URIs that use this frontend IP
"""
return pulumi.get(self, "outbound_nat_rules")
@outbound_nat_rules.setter
def outbound_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "outbound_nat_rules", value)
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the IP address of the Load Balancer.This is only specified if a specific private IP address shall be allocated from the subnet specified in subnetRef
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]:
"""
Gets or sets PrivateIP allocation method (Static/Dynamic)
"""
return pulumi.get(self, "private_ip_allocation_method")
@private_ip_allocation_method.setter
def private_ip_allocation_method(self, value: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]):
pulumi.set(self, "private_ip_allocation_method", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the PublicIP resource
"""
return pulumi.get(self, "public_ip_address")
@public_ip_address.setter
def public_ip_address(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "public_ip_address", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the subnet resource.A subnet from where the load balancer gets its private frontend address
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "subnet", value)
@pulumi.input_type
class InboundNatPoolArgs:
def __init__(__self__, *,
backend_port: pulumi.Input[int],
frontend_port_range_end: pulumi.Input[int],
frontend_port_range_start: pulumi.Input[int],
protocol: pulumi.Input[Union[str, 'TransportProtocol']],
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configuration: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Inbound NAT pool of the loadbalancer
:param pulumi.Input[int] backend_port: Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
:param pulumi.Input[int] frontend_port_range_end: Gets or sets the ending port range for the NAT pool. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
:param pulumi.Input[int] frontend_port_range_start: Gets or sets the starting port range for the NAT pool. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
:param pulumi.Input[Union[str, 'TransportProtocol']] protocol: Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input['SubResourceArgs'] frontend_ip_configuration: Gets or sets a reference to frontend IP Addresses
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
pulumi.set(__self__, "backend_port", backend_port)
pulumi.set(__self__, "frontend_port_range_end", frontend_port_range_end)
pulumi.set(__self__, "frontend_port_range_start", frontend_port_range_start)
pulumi.set(__self__, "protocol", protocol)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> pulumi.Input[int]:
"""
Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
"""
return pulumi.get(self, "backend_port")
@backend_port.setter
def backend_port(self, value: pulumi.Input[int]):
pulumi.set(self, "backend_port", value)
@property
@pulumi.getter(name="frontendPortRangeEnd")
def frontend_port_range_end(self) -> pulumi.Input[int]:
"""
Gets or sets the ending port range for the NAT pool. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
"""
return pulumi.get(self, "frontend_port_range_end")
@frontend_port_range_end.setter
def frontend_port_range_end(self, value: pulumi.Input[int]):
pulumi.set(self, "frontend_port_range_end", value)
@property
@pulumi.getter(name="frontendPortRangeStart")
def frontend_port_range_start(self) -> pulumi.Input[int]:
"""
Gets or sets the starting port range for the NAT pool. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
"""
return pulumi.get(self, "frontend_port_range_start")
@frontend_port_range_start.setter
def frontend_port_range_start(self, value: pulumi.Input[int]):
pulumi.set(self, "frontend_port_range_start", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'TransportProtocol']]:
"""
Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'TransportProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets a reference to frontend IP Addresses
"""
return pulumi.get(self, "frontend_ip_configuration")
@frontend_ip_configuration.setter
def frontend_ip_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "frontend_ip_configuration", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class InboundNatRuleArgs:
def __init__(__self__, *,
enable_floating_ip: pulumi.Input[bool],
frontend_port: pulumi.Input[int],
protocol: pulumi.Input[Union[str, 'TransportProtocol']],
backend_ip_configuration: Optional[pulumi.Input['SubResourceArgs']] = None,
backend_port: Optional[pulumi.Input[int]] = None,
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configuration: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Inbound NAT rule of the loadbalancer
:param pulumi.Input[bool] enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn availability Group. This setting is required when using the SQL Always ON availability Groups in SQL server. This setting can't be changed after you create the endpoint
:param pulumi.Input[int] frontend_port: Gets or sets the port for the external endpoint. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
:param pulumi.Input[Union[str, 'TransportProtocol']] protocol: Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
:param pulumi.Input['SubResourceArgs'] backend_ip_configuration: Gets or sets a reference to a private ip address defined on a NetworkInterface of a VM. Traffic sent to frontendPort of each of the frontendIPConfigurations is forwarded to the backed IP
:param pulumi.Input[int] backend_port: Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input['SubResourceArgs'] frontend_ip_configuration: Gets or sets a reference to frontend IP Addresses
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[int] idle_timeout_in_minutes: Gets or sets the timeout for the Tcp idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to Tcp
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
pulumi.set(__self__, "frontend_port", frontend_port)
pulumi.set(__self__, "protocol", protocol)
if backend_ip_configuration is not None:
pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> pulumi.Input[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn availability Group. This setting is required when using the SQL Always ON availability Groups in SQL server. This setting can't be changed after you create the endpoint
"""
return pulumi.get(self, "enable_floating_ip")
@enable_floating_ip.setter
def enable_floating_ip(self, value: pulumi.Input[bool]):
pulumi.set(self, "enable_floating_ip", value)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> pulumi.Input[int]:
"""
Gets or sets the port for the external endpoint. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
"""
return pulumi.get(self, "frontend_port")
@frontend_port.setter
def frontend_port(self, value: pulumi.Input[int]):
pulumi.set(self, "frontend_port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'TransportProtocol']]:
"""
Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'TransportProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="backendIPConfiguration")
def backend_ip_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets a reference to a private ip address defined on a NetworkInterface of a VM. Traffic sent to frontendPort of each of the frontendIPConfigurations is forwarded to the backed IP
"""
return pulumi.get(self, "backend_ip_configuration")
@backend_ip_configuration.setter
def backend_ip_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "backend_ip_configuration", value)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
"""
return pulumi.get(self, "backend_port")
@backend_port.setter
def backend_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backend_port", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets a reference to frontend IP Addresses
"""
return pulumi.get(self, "frontend_ip_configuration")
@frontend_ip_configuration.setter
def frontend_ip_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "frontend_ip_configuration", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the timeout for the Tcp idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to Tcp
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@idle_timeout_in_minutes.setter
def idle_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_timeout_in_minutes", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class LoadBalancingRuleArgs:
def __init__(__self__, *,
backend_address_pool: pulumi.Input['SubResourceArgs'],
enable_floating_ip: pulumi.Input[bool],
frontend_port: pulumi.Input[int],
protocol: pulumi.Input[Union[str, 'TransportProtocol']],
backend_port: Optional[pulumi.Input[int]] = None,
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configuration: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
load_distribution: Optional[pulumi.Input[Union[str, 'LoadDistribution']]] = None,
name: Optional[pulumi.Input[str]] = None,
probe: Optional[pulumi.Input['SubResourceArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Rules of the load balancer
:param pulumi.Input['SubResourceArgs'] backend_address_pool: Gets or sets a reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs
:param pulumi.Input[bool] enable_floating_ip: Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn availability Group. This setting is required when using the SQL Always ON availability Groups in SQL server. This setting can't be changed after you create the endpoint
:param pulumi.Input[int] frontend_port: Gets or sets the port for the external endpoint. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
:param pulumi.Input[Union[str, 'TransportProtocol']] protocol: Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
:param pulumi.Input[int] backend_port: Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input['SubResourceArgs'] frontend_ip_configuration: Gets or sets a reference to frontend IP Addresses
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[int] idle_timeout_in_minutes: Gets or sets the timeout for the Tcp idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to Tcp
:param pulumi.Input[Union[str, 'LoadDistribution']] load_distribution: Gets or sets the load distribution policy for this rule
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input['SubResourceArgs'] probe: Gets or sets the reference of the load balancer probe used by the Load Balancing rule.
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
pulumi.set(__self__, "frontend_port", frontend_port)
pulumi.set(__self__, "protocol", protocol)
if backend_port is not None:
pulumi.set(__self__, "backend_port", backend_port)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration is not None:
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if load_distribution is not None:
pulumi.set(__self__, "load_distribution", load_distribution)
if name is not None:
pulumi.set(__self__, "name", name)
if probe is not None:
pulumi.set(__self__, "probe", probe)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> pulumi.Input['SubResourceArgs']:
"""
Gets or sets a reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs
"""
return pulumi.get(self, "backend_address_pool")
@backend_address_pool.setter
def backend_address_pool(self, value: pulumi.Input['SubResourceArgs']):
pulumi.set(self, "backend_address_pool", value)
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> pulumi.Input[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn availability Group. This setting is required when using the SQL Always ON availability Groups in SQL server. This setting can't be changed after you create the endpoint
"""
return pulumi.get(self, "enable_floating_ip")
@enable_floating_ip.setter
def enable_floating_ip(self, value: pulumi.Input[bool]):
pulumi.set(self, "enable_floating_ip", value)
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> pulumi.Input[int]:
"""
Gets or sets the port for the external endpoint. You can specify any port number you choose, but the port numbers specified for each role in the service must be unique. Possible values range between 1 and 65535, inclusive
"""
return pulumi.get(self, "frontend_port")
@frontend_port.setter
def frontend_port(self, value: pulumi.Input[int]):
pulumi.set(self, "frontend_port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'TransportProtocol']]:
"""
Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'TransportProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets a port used for internal connections on the endpoint. The localPort attribute maps the eternal port of the endpoint to an internal port on a role. This is useful in scenarios where a role must communicate to an internal component on a port that is different from the one that is exposed externally. If not specified, the value of localPort is the same as the port attribute. Set the value of localPort to '*' to automatically assign an unallocated port that is discoverable using the runtime API
"""
return pulumi.get(self, "backend_port")
@backend_port.setter
def backend_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "backend_port", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets a reference to frontend IP Addresses
"""
return pulumi.get(self, "frontend_ip_configuration")
@frontend_ip_configuration.setter
def frontend_ip_configuration(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "frontend_ip_configuration", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the timeout for the Tcp idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to Tcp
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@idle_timeout_in_minutes.setter
def idle_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_timeout_in_minutes", value)
@property
@pulumi.getter(name="loadDistribution")
def load_distribution(self) -> Optional[pulumi.Input[Union[str, 'LoadDistribution']]]:
"""
Gets or sets the load distribution policy for this rule
"""
return pulumi.get(self, "load_distribution")
@load_distribution.setter
def load_distribution(self, value: Optional[pulumi.Input[Union[str, 'LoadDistribution']]]):
pulumi.set(self, "load_distribution", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def probe(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the load balancer probe used by the Load Balancing rule.
"""
return pulumi.get(self, "probe")
@probe.setter
def probe(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "probe", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class NetworkInterfaceDnsSettingsArgs:
def __init__(__self__, *,
applied_dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
dns_servers: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
internal_dns_name_label: Optional[pulumi.Input[str]] = None,
internal_fqdn: Optional[pulumi.Input[str]] = None):
"""
Dns Settings of a network interface
:param pulumi.Input[Sequence[pulumi.Input[str]]] applied_dns_servers: Gets or sets list of Applied DNS servers IP addresses
:param pulumi.Input[Sequence[pulumi.Input[str]]] dns_servers: Gets or sets list of DNS servers IP addresses
:param pulumi.Input[str] internal_dns_name_label: Gets or sets the Internal DNS name
:param pulumi.Input[str] internal_fqdn: Gets or sets full IDNS name in the form, DnsName.VnetId.ZoneId.TopLevelSuffix. This is set when the NIC is associated to a VM
"""
if applied_dns_servers is not None:
pulumi.set(__self__, "applied_dns_servers", applied_dns_servers)
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if internal_dns_name_label is not None:
pulumi.set(__self__, "internal_dns_name_label", internal_dns_name_label)
if internal_fqdn is not None:
pulumi.set(__self__, "internal_fqdn", internal_fqdn)
@property
@pulumi.getter(name="appliedDnsServers")
def applied_dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Gets or sets list of Applied DNS servers IP addresses
"""
return pulumi.get(self, "applied_dns_servers")
@applied_dns_servers.setter
def applied_dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "applied_dns_servers", value)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Gets or sets list of DNS servers IP addresses
"""
return pulumi.get(self, "dns_servers")
@dns_servers.setter
def dns_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "dns_servers", value)
@property
@pulumi.getter(name="internalDnsNameLabel")
def internal_dns_name_label(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the Internal DNS name
"""
return pulumi.get(self, "internal_dns_name_label")
@internal_dns_name_label.setter
def internal_dns_name_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_dns_name_label", value)
@property
@pulumi.getter(name="internalFqdn")
def internal_fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets full IDNS name in the form, DnsName.VnetId.ZoneId.TopLevelSuffix. This is set when the NIC is associated to a VM
"""
return pulumi.get(self, "internal_fqdn")
@internal_fqdn.setter
def internal_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "internal_fqdn", value)
@pulumi.input_type
class NetworkInterfaceIpConfigurationArgs:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
load_balancer_backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
load_balancer_inbound_nat_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
private_ip_address: Optional[pulumi.Input[str]] = None,
private_ip_allocation_method: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
public_ip_address: Optional[pulumi.Input['SubResourceArgs']] = None,
subnet: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
IPConfiguration in a NetworkInterface
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] load_balancer_backend_address_pools: Gets or sets the reference of LoadBalancerBackendAddressPool resource
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] load_balancer_inbound_nat_rules: Gets or sets list of references of LoadBalancerInboundNatRules
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] private_ip_address: Gets or sets the privateIPAddress of the Network Interface IP Configuration
:param pulumi.Input[Union[str, 'IpAllocationMethod']] private_ip_allocation_method: Gets or sets PrivateIP allocation method (Static/Dynamic)
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] public_ip_address: Gets or sets the reference of the PublicIP resource
:param pulumi.Input['SubResourceArgs'] subnet: Gets or sets the reference of the subnet resource
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if load_balancer_backend_address_pools is not None:
pulumi.set(__self__, "load_balancer_backend_address_pools", load_balancer_backend_address_pools)
if load_balancer_inbound_nat_rules is not None:
pulumi.set(__self__, "load_balancer_inbound_nat_rules", load_balancer_inbound_nat_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if private_ip_address is not None:
pulumi.set(__self__, "private_ip_address", private_ip_address)
if private_ip_allocation_method is not None:
pulumi.set(__self__, "private_ip_allocation_method", private_ip_allocation_method)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address is not None:
pulumi.set(__self__, "public_ip_address", public_ip_address)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="loadBalancerBackendAddressPools")
def load_balancer_backend_address_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets or sets the reference of LoadBalancerBackendAddressPool resource
"""
return pulumi.get(self, "load_balancer_backend_address_pools")
@load_balancer_backend_address_pools.setter
def load_balancer_backend_address_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "load_balancer_backend_address_pools", value)
@property
@pulumi.getter(name="loadBalancerInboundNatRules")
def load_balancer_inbound_nat_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets or sets list of references of LoadBalancerInboundNatRules
"""
return pulumi.get(self, "load_balancer_inbound_nat_rules")
@load_balancer_inbound_nat_rules.setter
def load_balancer_inbound_nat_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "load_balancer_inbound_nat_rules", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="privateIPAddress")
def private_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the privateIPAddress of the Network Interface IP Configuration
"""
return pulumi.get(self, "private_ip_address")
@private_ip_address.setter
def private_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip_address", value)
@property
@pulumi.getter(name="privateIPAllocationMethod")
def private_ip_allocation_method(self) -> Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]:
"""
Gets or sets PrivateIP allocation method (Static/Dynamic)
"""
return pulumi.get(self, "private_ip_allocation_method")
@private_ip_allocation_method.setter
def private_ip_allocation_method(self, value: Optional[pulumi.Input[Union[str, 'IpAllocationMethod']]]):
pulumi.set(self, "private_ip_allocation_method", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="publicIPAddress")
def public_ip_address(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the PublicIP resource
"""
return pulumi.get(self, "public_ip_address")
@public_ip_address.setter
def public_ip_address(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "public_ip_address", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the subnet resource
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "subnet", value)
@pulumi.input_type
class OutboundNatRuleArgs:
def __init__(__self__, *,
allocated_outbound_ports: pulumi.Input[int],
backend_address_pool: pulumi.Input['SubResourceArgs'],
etag: Optional[pulumi.Input[str]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Outbound NAT pool of the loadbalancer
:param pulumi.Input[int] allocated_outbound_ports: Gets or sets the number of outbound ports to be used for SNAT
:param pulumi.Input['SubResourceArgs'] backend_address_pool: Gets or sets a reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] frontend_ip_configurations: Gets or sets Frontend IP addresses of the load balancer
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports)
pulumi.set(__self__, "backend_address_pool", backend_address_pool)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if frontend_ip_configurations is not None:
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> pulumi.Input[int]:
"""
Gets or sets the number of outbound ports to be used for SNAT
"""
return pulumi.get(self, "allocated_outbound_ports")
@allocated_outbound_ports.setter
def allocated_outbound_ports(self, value: pulumi.Input[int]):
pulumi.set(self, "allocated_outbound_ports", value)
@property
@pulumi.getter(name="backendAddressPool")
def backend_address_pool(self) -> pulumi.Input['SubResourceArgs']:
"""
Gets or sets a reference to a pool of DIPs. Outbound traffic is randomly load balanced across IPs in the backend IPs
"""
return pulumi.get(self, "backend_address_pool")
@backend_address_pool.setter
def backend_address_pool(self, value: pulumi.Input['SubResourceArgs']):
pulumi.set(self, "backend_address_pool", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets or sets Frontend IP addresses of the load balancer
"""
return pulumi.get(self, "frontend_ip_configurations")
@frontend_ip_configurations.setter
def frontend_ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "frontend_ip_configurations", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class ProbeArgs:
def __init__(__self__, *,
port: pulumi.Input[int],
protocol: pulumi.Input[Union[str, 'ProbeProtocol']],
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
interval_in_seconds: Optional[pulumi.Input[int]] = None,
load_balancing_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
number_of_probes: Optional[pulumi.Input[int]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
request_path: Optional[pulumi.Input[str]] = None):
"""
Load balancer Probe
:param pulumi.Input[int] port: Gets or sets Port for communicating the probe. Possible values range from 1 to 65535, inclusive.
:param pulumi.Input[Union[str, 'ProbeProtocol']] protocol: Gets or sets the protocol of the end point. Possible values are http pr Tcp. If Tcp is specified, a received ACK is required for the probe to be successful. If http is specified,a 200 OK response from the specifies URI is required for the probe to be successful
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[int] interval_in_seconds: Gets or sets the interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] load_balancing_rules: Gets Load balancer rules that use this probe
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[int] number_of_probes: Gets or sets the number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input[str] request_path: Gets or sets the URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value
"""
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if load_balancing_rules is not None:
pulumi.set(__self__, "load_balancing_rules", load_balancing_rules)
if name is not None:
pulumi.set(__self__, "name", name)
if number_of_probes is not None:
pulumi.set(__self__, "number_of_probes", number_of_probes)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_path is not None:
pulumi.set(__self__, "request_path", request_path)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Gets or sets Port for communicating the probe. Possible values range from 1 to 65535, inclusive.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'ProbeProtocol']]:
"""
Gets or sets the protocol of the end point. Possible values are http pr Tcp. If Tcp is specified, a received ACK is required for the probe to be successful. If http is specified,a 200 OK response from the specifies URI is required for the probe to be successful
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'ProbeProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the interval, in seconds, for how frequently to probe the endpoint for health status. Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5
"""
return pulumi.get(self, "interval_in_seconds")
@interval_in_seconds.setter
def interval_in_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval_in_seconds", value)
@property
@pulumi.getter(name="loadBalancingRules")
def load_balancing_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets Load balancer rules that use this probe
"""
return pulumi.get(self, "load_balancing_rules")
@load_balancing_rules.setter
def load_balancing_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "load_balancing_rules", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="numberOfProbes")
def number_of_probes(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
"""
return pulumi.get(self, "number_of_probes")
@number_of_probes.setter
def number_of_probes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "number_of_probes", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the URI used for requesting health status from the VM. Path is required if a protocol is set to http. Otherwise, it is not allowed. There is no default value
"""
return pulumi.get(self, "request_path")
@request_path.setter
def request_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_path", value)
@pulumi.input_type
class PublicIpAddressDnsSettingsArgs:
def __init__(__self__, *,
domain_name_label: Optional[pulumi.Input[str]] = None,
fqdn: Optional[pulumi.Input[str]] = None,
reverse_fqdn: Optional[pulumi.Input[str]] = None):
"""
Contains FQDN of the DNS record associated with the public IP address
:param pulumi.Input[str] domain_name_label: Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
:param pulumi.Input[str] fqdn: Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
:param pulumi.Input[str] reverse_fqdn: Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
if domain_name_label is not None:
pulumi.set(__self__, "domain_name_label", domain_name_label)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if reverse_fqdn is not None:
pulumi.set(__self__, "reverse_fqdn", reverse_fqdn)
@property
@pulumi.getter(name="domainNameLabel")
def domain_name_label(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the Domain name label.The concatenation of the domain name label and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
"""
return pulumi.get(self, "domain_name_label")
@domain_name_label.setter
def domain_name_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain_name_label", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Gets the FQDN, Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="reverseFqdn")
def reverse_fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Gets or Sets the Reverse FQDN. A user-visible, fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
"""
return pulumi.get(self, "reverse_fqdn")
@reverse_fqdn.setter
def reverse_fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reverse_fqdn", value)
@pulumi.input_type
class RouteArgs:
def __init__(__self__, *,
next_hop_type: pulumi.Input[Union[str, 'RouteNextHopType']],
address_prefix: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
next_hop_ip_address: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None):
"""
Route resource
:param pulumi.Input[Union[str, 'RouteNextHopType']] next_hop_type: Gets or sets the type of Azure hop the packet should be sent to.
:param pulumi.Input[str] address_prefix: Gets or sets the destination CIDR to which the route applies.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[str] next_hop_ip_address: Gets or sets the IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the resource Updating/Deleting/Failed
"""
pulumi.set(__self__, "next_hop_type", next_hop_type)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if next_hop_ip_address is not None:
pulumi.set(__self__, "next_hop_ip_address", next_hop_ip_address)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> pulumi.Input[Union[str, 'RouteNextHopType']]:
"""
Gets or sets the type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@next_hop_type.setter
def next_hop_type(self, value: pulumi.Input[Union[str, 'RouteNextHopType']]):
pulumi.set(self, "next_hop_type", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets the IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@next_hop_ip_address.setter
def next_hop_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "next_hop_ip_address", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@pulumi.input_type
class SecurityRuleArgs:
def __init__(__self__, *,
access: pulumi.Input[Union[str, 'SecurityRuleAccess']],
destination_address_prefix: pulumi.Input[str],
direction: pulumi.Input[Union[str, 'SecurityRuleDirection']],
protocol: pulumi.Input[Union[str, 'SecurityRuleProtocol']],
source_address_prefix: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
destination_port_range: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
source_port_range: Optional[pulumi.Input[str]] = None):
"""
Network security rule
:param pulumi.Input[Union[str, 'SecurityRuleAccess']] access: Gets or sets network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'
:param pulumi.Input[str] destination_address_prefix: Gets or sets destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
:param pulumi.Input[Union[str, 'SecurityRuleDirection']] direction: Gets or sets the direction of the rule.InBound or Outbound. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
:param pulumi.Input[Union[str, 'SecurityRuleProtocol']] protocol: Gets or sets Network protocol this rule applies to. Can be Tcp, Udp or All(*).
:param pulumi.Input[str] source_address_prefix: Gets or sets source address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
:param pulumi.Input[str] description: Gets or sets a description for this rule. Restricted to 140 chars.
:param pulumi.Input[str] destination_port_range: Gets or sets Destination Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input[int] priority: Gets or sets the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input[str] source_port_range: Gets or sets Source Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
pulumi.set(__self__, "access", access)
pulumi.set(__self__, "destination_address_prefix", destination_address_prefix)
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "source_address_prefix", source_address_prefix)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_port_range is not None:
pulumi.set(__self__, "destination_port_range", destination_port_range)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_port_range is not None:
pulumi.set(__self__, "source_port_range", source_port_range)
@property
@pulumi.getter
def access(self) -> pulumi.Input[Union[str, 'SecurityRuleAccess']]:
"""
Gets or sets network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: pulumi.Input[Union[str, 'SecurityRuleAccess']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter(name="destinationAddressPrefix")
def destination_address_prefix(self) -> pulumi.Input[str]:
"""
Gets or sets destination address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
"""
return pulumi.get(self, "destination_address_prefix")
@destination_address_prefix.setter
def destination_address_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_address_prefix", value)
@property
@pulumi.getter
def direction(self) -> pulumi.Input[Union[str, 'SecurityRuleDirection']]:
"""
Gets or sets the direction of the rule.InBound or Outbound. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: pulumi.Input[Union[str, 'SecurityRuleDirection']]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[Union[str, 'SecurityRuleProtocol']]:
"""
Gets or sets Network protocol this rule applies to. Can be Tcp, Udp or All(*).
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[Union[str, 'SecurityRuleProtocol']]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sourceAddressPrefix")
def source_address_prefix(self) -> pulumi.Input[str]:
"""
Gets or sets source address prefix. CIDR or source IP range. Asterisk '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
"""
return pulumi.get(self, "source_address_prefix")
@source_address_prefix.setter
def source_address_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "source_address_prefix", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets a description for this rule. Restricted to 140 chars.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationPortRange")
def destination_port_range(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Destination Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "destination_port_range")
@destination_port_range.setter
def destination_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_port_range", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Gets or sets the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="sourcePortRange")
def source_port_range(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Source Port or Range. Integer or range between 0 and 65535. Asterisk '*' can also be used to match all ports.
"""
return pulumi.get(self, "source_port_range")
@source_port_range.setter
def source_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_port_range", value)
@pulumi.input_type
class SubResourceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] id: Resource Id
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class SubnetArgs:
def __init__(__self__, *,
address_prefix: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
name: Optional[pulumi.Input[str]] = None,
network_security_group: Optional[pulumi.Input['SubResourceArgs']] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
Subnet in a VirtualNetwork resource
:param pulumi.Input[str] address_prefix: Gets or sets Address prefix for the subnet.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated
:param pulumi.Input[str] id: Resource Id
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] ip_configurations: Gets array of references to the network interface IP configurations using subnet
:param pulumi.Input[str] name: Gets name of the resource that is unique within a resource group. This name can be used to access the resource
:param pulumi.Input['SubResourceArgs'] network_security_group: Gets or sets the reference of the NetworkSecurityGroup resource
:param pulumi.Input[str] provisioning_state: Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
:param pulumi.Input['SubResourceArgs'] route_table: Gets or sets the reference of the RouteTable resource
"""
pulumi.set(__self__, "address_prefix", address_prefix)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if name is not None:
pulumi.set(__self__, "name", name)
if network_security_group is not None:
pulumi.set(__self__, "network_security_group", network_security_group)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_table is not None:
pulumi.set(__self__, "route_table", route_table)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Input[str]:
"""
Gets or sets Address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
A unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
Gets array of references to the network interface IP configurations using subnet
"""
return pulumi.get(self, "ip_configurations")
@ip_configurations.setter
def ip_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "ip_configurations", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the NetworkSecurityGroup resource
"""
return pulumi.get(self, "network_security_group")
@network_security_group.setter
def network_security_group(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "network_security_group", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
Gets or sets the reference of the RouteTable resource
"""
return pulumi.get(self, "route_table")
@route_table.setter
def route_table(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "route_table", value)
| 44.311538 | 555 | 0.669727 |
acf32e0fb209b81fc2456ed0326f693802bbdea1 | 11,477 | py | Python | config/settings/base.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | config/settings/base.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | config/settings/base.py | pygabo/Serempre | 6b29e337abd8d1b3f71ee889d318a2d473d6c744 | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# serempre_todo/
APPS_DIR = ROOT_DIR / "serempre_todo"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"rest_framework.authtoken",
]
LOCAL_APPS = [
"serempre_todo.users.apps.UsersConfig",
"serempre_todo.task.apps.TaskConfig",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "serempre_todo.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "task-list"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"serempre_todo.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Jose Gabriel Guzman Lopez""", "jose-gabriel-guzman-lopez@example.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "none"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "serempre_todo.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "serempre_todo.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# Your stuff...
# ------------------------------------------------------------------------------
| 41.136201 | 93 | 0.632134 |
acf32e9662aa17e179bb91f1bf76ecb7681ec7a0 | 3,693 | py | Python | hypothesisEngine/utilities/fitness/get_data.py | pavandonthireddy/Project_V4 | 6dea85b6b2ac9b05056b1f8a859361427d642fed | [
"Apache-2.0"
] | null | null | null | hypothesisEngine/utilities/fitness/get_data.py | pavandonthireddy/Project_V4 | 6dea85b6b2ac9b05056b1f8a859361427d642fed | [
"Apache-2.0"
] | null | null | null | hypothesisEngine/utilities/fitness/get_data.py | pavandonthireddy/Project_V4 | 6dea85b6b2ac9b05056b1f8a859361427d642fed | [
"Apache-2.0"
] | null | null | null | from os import path
import numpy as np
from hypothesisEngine.algorithm.parameters import params
def get_Xy_train_test_separate(train_filename, test_filename, skip_header=0):
"""
Read in training and testing data files, and split each into X
(all columns up to last) and y (last column).
:param train_filename: The file name of the training dataset.
:param test_filename: The file name of the testing dataset.
:param skip_header: The number of header lines to skip.
:return: Parsed numpy arrays of training and testing input (x) and
output (y) data.
"""
if params['DATASET_DELIMITER']:
# Dataset delimiter has been explicitly specified.
delimiter = params['DATASET_DELIMITER']
else:
# Try to auto-detect the field separator (i.e. delimiter).
f = open(train_filename)
for line in f:
if line.startswith("#") or len(line) < 2:
# Skip excessively short lines or commented out lines.
continue
else:
# Set the delimiter.
if "\t" in line:
delimiter = "\t"
break
elif "," in line:
delimiter = ","
break
elif ";" in line:
delimiter = ";"
break
elif ":" in line:
delimiter = ":"
break
else:
print("utilities.fitness.get_data.get_Xy_train_test_separate\n"
"Warning: Dataset delimiter not found. "
"Defaulting to whitespace delimiter.")
delimiter = " "
break
f.close()
# Read in all training data.
train_Xy = np.genfromtxt(train_filename, skip_header=skip_header,
delimiter=delimiter)
try:
# Separate out input (X) and output (y) data.
train_X = train_Xy[:, :-1].transpose() # all columns but last
train_y = train_Xy[:, -1].transpose() # last column
except IndexError:
s = "utilities.fitness.get_data.get_Xy_train_test_separate\n" \
"Error: specified delimiter '%s' incorrectly parses training " \
"data." % delimiter
raise Exception(s)
if test_filename:
# Read in all testing data.
test_Xy = np.genfromtxt(test_filename, skip_header=skip_header,
delimiter=delimiter)
# Separate out input (X) and output (y) data.
test_X = test_Xy[:, :-1].transpose() # all columns but last
test_y = test_Xy[:, -1].transpose() # last column
else:
test_X, test_y = None, None
return train_X, train_y, test_X, test_y
def get_data(train, test):
"""
Return the training and test data for the current experiment.
:param train: The desired training dataset.
:param test: The desired testing dataset.
:return: The parsed data contained in the dataset files.
"""
# Get the path to the training dataset.
train_set = path.join("..", "datasets", train)
if test:
# Get the path to the testing dataset.
test_set = path.join("..", "datasets", test)
else:
# There is no testing dataset used.
test_set = None
# Read in the training and testing datasets from the specified files.
training_in, training_out, test_in, \
test_out = get_Xy_train_test_separate(train_set, test_set, skip_header=1)
return training_in, training_out, test_in, test_out
| 33.880734 | 83 | 0.57081 |
acf32faf7381c3d0f19e9d42633007726243aeed | 5,289 | py | Python | tests/run_gui_tests.py | dowellr85/qt_monkey | f034fefc6519dcdf659fa7e4d35dfbb358ed881e | [
"BSD-3-Clause"
] | 38 | 2016-05-25T14:55:38.000Z | 2022-02-06T20:00:14.000Z | tests/run_gui_tests.py | dowellr85/qt_monkey | f034fefc6519dcdf659fa7e4d35dfbb358ed881e | [
"BSD-3-Clause"
] | 10 | 2016-05-06T08:37:24.000Z | 2020-02-12T08:02:39.000Z | tests/run_gui_tests.py | dowellr85/qt_monkey | f034fefc6519dcdf659fa7e4d35dfbb358ed881e | [
"BSD-3-Clause"
] | 17 | 2016-05-25T14:55:34.000Z | 2021-12-22T02:05:40.000Z | #!/usr/bin/env python
import subprocess, sys, io, json, re, codecs, tempfile, atexit, os
ANOTHER_VARIANT = "//another variant:"
EXPECT_LINE = "//expect: "
def args_str_to_list(args_str):
def append_to_args(args, arg):
if arg.isdigit():
args.append(int(arg))
else:
args.append(arg)
whitespaces = re.compile(r"\s+")
quote_param = re.compile(r'"(\\.|[^"])+"')
single_param = re.compile(r"'(\\.|[^'])+'")
res = []
pos = 0
while pos < len(args_str):
m = re.match(whitespaces, args_str[pos:])
if m:
pos += m.end(0)
continue
if args_str[pos] == ',':
pos += 1
continue
m = re.match(quote_param, args_str[pos:])
if m:
res.append(args_str[pos + m.start(0):pos + m.end(0)])
pos += m.end(0)
continue
m = re.match(single_param, args_str[pos:])
if m:
res.append(args_str[pos + m.start(0):pos + m.end(0)])
pos += m.end(0)
continue
new_pos = args_str.find(",", pos)
if new_pos == -1:
append_to_args(res, args_str[pos:])
break
else:
append_to_args(res, args_str[pos:new_pos])
pos = new_pos
return res
def extract_func_name_and_params(line_with_func_call):
try:
func_m = re.match('(?P<func_prefix>[^\(]+)\((?P<args>.*)\);$', line_with_func_call)
args_str = func_m.group("args")
args = args_str_to_list(args_str)
return (func_m.group("func_prefix"), args)
except AttributeError:
sys.stderr.write("error happens with |%s|\n" % line_with_func_call)
raise
except Exception as e:
raise type(e)(e.message + " happens with '%s'" % line_with_func_call)
(prefix, params) = extract_func_name_and_params("Test.mouseClick('MainWindow.centralwidget.pushButton', 'Qt.LeftButton', 67, 13);")
print("params %s" % params)
assert prefix == "Test.mouseClick"
assert params == ["'MainWindow.centralwidget.pushButton'", "'Qt.LeftButton'", 67, 13]
def compare_two_func_calls(f1_call, f2_call):
(pref1, params1) = extract_func_name_and_params(f1_call)
(pref2, params2) = extract_func_name_and_params(f2_call)
if pref1 != pref2 or len(params1) != len(params2):
return False
i = -1
for p1, p2 in zip(params1, params2):
i += 1
if type(p1) is int and type(p2) is int:
continue
if p1 != p2:
sys.stderr.write("params not equal %s vs %s\n" % (p1, p2))
return False
return True
def prepare_script_for_os(script_path):
if sys.platform == "darwin":
tf = tempfile.NamedTemporaryFile(delete=False)
with open(script_path, "r") as f:
for line in f.readlines():
if not line.startswith("Test.mouseClick('MainWindow.menubar"):
tf.write(line)
def delete_tmp_file():
print("delete tmp file")
os.unlink(tf.name)
atexit.register(delete_tmp_file)
tf.close()
return tf.name
else:
return script_path
qt_monkey_app_path = sys.argv[1]
test_app_path = sys.argv[2]
script_path = sys.argv[3]
script_path = prepare_script_for_os(script_path)
print("we run script from %s" % script_path)
monkey_cmd = [qt_monkey_app_path, "--script", script_path,
"--exit-on-script-error",
"--user-app", test_app_path]
monkey = subprocess.Popen(monkey_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=sys.stderr)
code_listing = []
input_stream = codecs.getreader("utf-8")(monkey.stdout)
for line in input_stream:
print("MONKEY: %s" % line)
# print("Parsed json: %s" % json.loads(line))
msg = json.loads(line)
if type(msg) is dict:
event = msg.get("event")
if event:
code = event["script"]
for line in code.split("\n"):
code_listing.append(line)
with open(script_path, "r") as fin:
i = 0
j = 0
expect_lines = fin.readlines()
while j < len(expect_lines):
if i >= len(code_listing):
sys.stderr.write("Unexpected end of actual result\n")
sys.exit(1)
line = expect_lines[j].strip()
expect_seq = False
if line.startswith(EXPECT_LINE):
line = line[len(EXPECT_LINE):]
expect_seq = True
if not compare_two_func_calls(line, code_listing[i]):
if (i + 1) < len(code_listing) and code_listing[i+1].startswith(ANOTHER_VARIANT) and compare_two_func_calls(line, code_listing[i + 1][len(ANOTHER_VARIANT):]):
i += 1
elif (j + 1) < len(expect_lines) and expect_lines[j + 1].startswith(EXPECT_LINE) and compare_two_func_calls(expect_lines[j + 1][len(EXPECT_LINE):], code_listing[i]):
j += 1
else:
sys.stderr.write(("Line %d, expected\n`%s'\n, actual\n`%s'\n"
"Full log:\n%s\n") % (i + 1, line, code_listing[i], "\n".join(code_listing)))
sys.exit(1)
i += 1
j += 1
while expect_seq and j < len(expect_lines) and expect_lines[j].startswith(EXPECT_LINE):
j += 1
| 35.496644 | 177 | 0.580639 |
acf32fc3ba3df56a1659119df3dd09d23fb5147c | 4,797 | py | Python | third_party/tvcm/tvcm/generate.py | bpsinc-native/src_third_party_trace-viewer | 1968c6eb82a51e388415f86434e6c6d2f95e1c35 | [
"BSD-3-Clause"
] | null | null | null | third_party/tvcm/tvcm/generate.py | bpsinc-native/src_third_party_trace-viewer | 1968c6eb82a51e388415f86434e6c6d2f95e1c35 | [
"BSD-3-Clause"
] | null | null | null | third_party/tvcm/tvcm/generate.py | bpsinc-native/src_third_party_trace-viewer | 1968c6eb82a51e388415f86434e6c6d2f95e1c35 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import optparse
import sys
import os
import re
import StringIO
from tvcm import js_utils
from tvcm import module as module_module
from tvcm import html_generation_controller
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__),
"..", "..", "..", "src"))
html_warning_message = """
<!--
WARNING: This file is auto generated.
Do not edit directly.
-->
"""
js_warning_message = """
// Copyright (c) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/* WARNING: This file is auto generated.
*
* Do not edit directly.
*/
"""
css_warning_message = """
/* Copyright (c) 2014 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
/* WARNING: This file is auto-generated.
*
* Do not edit directly.
*/
"""
def GenerateJS(load_sequence,
use_include_tags_for_scripts=False,
dir_for_include_tag_root=None):
f = StringIO.StringIO()
GenerateJSToFile(f,
load_sequence,
use_include_tags_for_scripts,
dir_for_include_tag_root)
return f.getvalue()
def GenerateJSToFile(f,
load_sequence,
use_include_tags_for_scripts=False,
dir_for_include_tag_root=None):
if use_include_tags_for_scripts and dir_for_include_tag_root == None:
raise Exception('Must provide dir_for_include_tag_root')
f.write(js_warning_message)
f.write('\n')
loader = load_sequence[0].loader
platform_script = loader.LoadRawScript('platform.min.js')
f.write(platform_script.contents)
polymer_script = loader.LoadRawScript('polymer.min.js')
f.write(polymer_script.contents)
f.write('\n')
f.write("window._TVCM_IS_COMPILED = true;\n")
for module in load_sequence:
module.AppendJSContentsToFile(f,
use_include_tags_for_scripts,
dir_for_include_tag_root)
class ExtraScript(object):
def __init__(self, script_id=None, text_content=None, content_type=None):
if script_id != None:
assert script_id[0] != '#'
self.script_id = script_id
self.text_content = text_content
self.content_type = content_type
def WriteToFile(self, output_file):
attrs = []
if self.script_id:
attrs.append('id="%s"' % self.script_id)
if self.content_type:
attrs.append('content-type="%s"' % self.content_type)
if len(attrs) > 0:
output_file.write('<script %s>\n' % ' '.join(attrs))
else:
output_file.write('<script>\n')
if self.text_content:
output_file.write(self.text_content)
output_file.write('</script>\n')
def GenerateStandaloneHTMLAsString(*args, **kwargs):
f = StringIO.StringIO()
GenerateStandaloneHTMLToFile(f, *args, **kwargs)
return f.getvalue()
def GenerateStandaloneHTMLToFile(output_file,
load_sequence,
title,
flattened_js_url=None,
extra_scripts=None):
extra_scripts = extra_scripts or []
output_file.write("""<!DOCTYPE HTML>
<html>
<head i18n-values="dir:textdirection;">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>%s</title>
""" % title)
loader = load_sequence[0].loader
written_style_sheets = set()
class HTMLGenerationController(html_generation_controller.HTMLGenerationController):
def __init__(self, module):
self.module = module
def GetHTMLForStylesheetHRef(self, href):
resource = self.module.HRefToResource(
href, '<link rel="stylesheet" href="%s">' % href)
style_sheet = loader.LoadStyleSheet(resource.name)
if style_sheet in written_style_sheets:
return None
written_style_sheets.add(style_sheet)
return "<style>\n%s\n</style>" % style_sheet.contents_with_inlined_images
for module in load_sequence:
ctl = HTMLGenerationController(module)
module.AppendHTMLContentsToFile(output_file, ctl)
if flattened_js_url:
output_file.write('<script src="%s"></script>\n' % flattened_js_url)
else:
output_file.write('<script>\n')
output_file.write(GenerateJS(load_sequence))
output_file.write('</script>\n')
for extra_script in extra_scripts:
extra_script.WriteToFile(output_file)
output_file.write("""</head>
<body>
""")
output_file.write("""</body>
</html>
""")
| 28.217647 | 86 | 0.665624 |
acf32ff5f22d3fc611c17e39eaa1a8cf00502acd | 20,191 | py | Python | python/cuml/naive_bayes/naive_bayes.py | zbjornson/cuml | 39e1bb2290dcb82b7c935f79164f098400158467 | [
"Apache-2.0"
] | null | null | null | python/cuml/naive_bayes/naive_bayes.py | zbjornson/cuml | 39e1bb2290dcb82b7c935f79164f098400158467 | [
"Apache-2.0"
] | null | null | null | python/cuml/naive_bayes/naive_bayes.py | zbjornson/cuml | 39e1bb2290dcb82b7c935f79164f098400158467 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import cupy.prof
import math
import warnings
from cuml.common import with_cupy_rmm
from cuml.common import CumlArray
from cuml.common.base import Base
from cuml.common.input_utils import input_to_cuml_array
from cuml.common.kernel_utils import cuda_kernel_factory
from cuml.common.import_utils import has_scipy
from cuml.prims.label import make_monotonic
from cuml.prims.label import check_labels
from cuml.prims.label import invert_labels
from cuml.metrics import accuracy_score
def count_features_coo_kernel(float_dtype, int_dtype):
"""
A simple reduction kernel that takes in a sparse (COO) array
of features and computes the sum (or sum squared) for each class
label
"""
kernel_str = r'''({0} *out,
int *rows, int *cols,
{0} *vals, int nnz,
int n_rows, int n_cols,
{1} *labels,
int n_classes,
bool square) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= nnz) return;
int row = rows[i];
int col = cols[i];
{0} val = vals[i];
if(square) val *= val;
{1} label = labels[row];
atomicAdd(out + ((col * n_classes) + label), val);
}'''
return cuda_kernel_factory(kernel_str,
(float_dtype, int_dtype),
"count_features_coo")
def count_classes_kernel(float_dtype, int_dtype):
kernel_str = r'''
({0} *out, int n_rows, {1} *labels) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= n_rows) return;
{1} label = labels[row];
atomicAdd(out + label, 1);
}'''
return cuda_kernel_factory(kernel_str,
(float_dtype, int_dtype),
"count_classes")
def count_features_dense_kernel(float_dtype, int_dtype):
kernel_str = r'''
({0} *out,
{0} *in,
int n_rows,
int n_cols,
{1} *labels,
int n_classes,
bool square,
bool rowMajor) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= n_rows || col >= n_cols) return;
{0} val = !rowMajor ?
in[col * n_rows + row] : in[row * n_cols + col];
if(val == 0.0) return;
if(square) val *= val;
{1} label = labels[row];
atomicAdd(out + ((col * n_classes) + label), val);
}'''
return cuda_kernel_factory(kernel_str,
(float_dtype, int_dtype,),
"count_features_dense")
class MultinomialNB(Base):
# TODO: Make this extend cuml.Base:
# https://github.com/rapidsai/cuml/issues/1834
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification
with discrete features (e.g., word counts for text classification).
The multinomial distribution normally requires integer feature counts.
However, in practice, fractional counts such as tf-idf may also work.
NOTE: While cuML only provides the multinomial version currently, the
other variants are planned to be included soon. Refer to the
corresponding Github issue for updates:
https://github.com/rapidsai/cuml/issues/1666
Examples
--------
Load the 20 newsgroups dataset from Scikit-learn and train a
Naive Bayes classifier.
.. code-block:: python
import cupy as cp
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from cuml.naive_bayes import MultinomialNB
# Load corpus
twenty_train = fetch_20newsgroups(subset='train',
shuffle=True, random_state=42)
# Turn documents into term frequency vectors
count_vect = CountVectorizer()
features = count_vect.fit_transform(twenty_train.data)
# Put feature vectors and labels on the GPU
X = cp.sparse.csr_matrix(features.tocsr(), dtype=cp.float32)
y = cp.asarray(twenty_train.target, dtype=cp.int32)
# Train model
model = MultinomialNB()
model.fit(X, y)
# Compute accuracy on training set
model.score(X, y)
Output:
.. code-block:: python
0.9244298934936523
"""
@with_cupy_rmm
def __init__(self,
alpha=1.0,
fit_prior=True,
class_prior=None,
output_type=None,
handle=None):
"""
Create new multinomial Naive Bayes instance
Parameters
----------
alpha : float Additive (Laplace/Lidstone) smoothing parameter (0 for
no smoothing).
fit_prior : boolean Whether to learn class prior probabilities or no.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes) Prior probabilities of the
classes. If specified, the priors are not adjusted
according to the data.
"""
super(MultinomialNB, self).__init__(handle=handle,
output_type=output_type)
self.alpha = alpha
self.fit_prior = fit_prior
if class_prior is not None:
self._class_prior, *_ = input_to_cuml_array(class_prior)
else:
self._class_prior_ = None
self.fit_called_ = False
self._classes_ = None
self._n_classes_ = 0
self._n_features_ = None
# Needed until Base no longer assumed cumlHandle
self.handle = None
@cp.prof.TimeRangeDecorator(message="fit()", color_id=0)
@with_cupy_rmm
def fit(self, X, y, sample_weight=None):
"""
Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like shape (n_samples) Target values.
sample_weight : array-like of shape (n_samples)
Weights applied to individial samples (1. for unweighted).
"""
self._set_n_features_in(X)
return self.partial_fit(X, y, sample_weight)
@cp.prof.TimeRangeDecorator(message="fit()", color_id=0)
@with_cupy_rmm
def _partial_fit(self, X, y, sample_weight=None, _classes=None):
self._set_output_type(X)
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.common.import_utils import dummy_function_always_false \
as scipy_sparse_isspmatrix
# todo: use a sparse CumlArray style approach when ready
# https://github.com/rapidsai/cuml/issues/2216
if scipy_sparse_isspmatrix(X) or cp.sparse.isspmatrix(X):
X = X.tocoo()
rows = cp.asarray(X.row, dtype=X.row.dtype)
cols = cp.asarray(X.col, dtype=X.col.dtype)
data = cp.asarray(X.data, dtype=X.data.dtype)
X = cp.sparse.coo_matrix((data, (rows, cols)), shape=X.shape)
else:
X = input_to_cuml_array(X, order='K').array.to_output('cupy')
y = input_to_cuml_array(y).array.to_output('cupy')
Y, label_classes = make_monotonic(y, copy=True)
if not self.fit_called_:
self.fit_called_ = True
if _classes is not None:
_classes, *_ = input_to_cuml_array(_classes, order='K')
check_labels(Y, _classes.to_output('cupy'))
self._classes_ = _classes
else:
self._classes_ = CumlArray(data=label_classes)
self._n_classes_ = self._classes_.shape[0]
self._n_features_ = X.shape[1]
self._init_counters(self._n_classes_, self._n_features_,
X.dtype)
else:
check_labels(Y, self._classes_)
self._count(X, Y)
self._update_feature_log_prob(self.alpha)
self._update_class_log_prior(class_prior=self._class_prior_)
return self
@with_cupy_rmm
def update_log_probs(self):
"""
Updates the log probabilities. This enables lazy update for
applications like distributed Naive Bayes, so that the model
can be updated incrementally without incurring this cost each
time.
"""
self._update_feature_log_prob(self.alpha)
self._update_class_log_prior(class_prior=self._class_prior_)
@with_cupy_rmm
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""
Incremental fit on a batch of samples.
This method is expected to be called several times consecutively on
different chunks of a dataset so as to implement out-of-core or online
learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible (as long
as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, cupy sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features
y : array-like of shape (n_samples) Target values.
classes : array-like of shape (n_classes)
List of all the classes that can possibly appear in the y
vector. Must be provided at the first call to partial_fit,
can be omitted in subsequent calls.
sample_weight : array-like of shape (n_samples)
Weights applied to individual samples (1. for
unweighted). Currently sample weight is ignored
Returns
-------
self : object
"""
return self._partial_fit(X, y, sample_weight=sample_weight,
_classes=classes)
@cp.prof.TimeRangeDecorator(message="predict()", color_id=1)
@with_cupy_rmm
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : cupy.ndarray of shape (n_samples)
"""
out_type = self._get_output_type(X)
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.common.import_utils import dummy_function_always_false \
as scipy_sparse_isspmatrix
# todo: use a sparse CumlArray style approach when ready
# https://github.com/rapidsai/cuml/issues/2216
if scipy_sparse_isspmatrix(X) or cp.sparse.isspmatrix(X):
X = X.tocoo()
rows = cp.asarray(X.row, dtype=X.row.dtype)
cols = cp.asarray(X.col, dtype=X.col.dtype)
data = cp.asarray(X.data, dtype=X.data.dtype)
X = cp.sparse.coo_matrix((data, (rows, cols)), shape=X.shape)
else:
X = input_to_cuml_array(X, order='K').array.to_output('cupy')
jll = self._joint_log_likelihood(X)
indices = cp.argmax(jll, axis=1).astype(self._classes_.dtype)
y_hat = invert_labels(indices, classes=self._classes_)
return CumlArray(data=y_hat).to_output(out_type)
@with_cupy_rmm
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in the
model. The columns correspond to the classes in sorted order, as
they appear in the attribute classes_.
"""
out_type = self._get_output_type(X)
if has_scipy():
from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix
else:
from cuml.common.import_utils import dummy_function_always_false \
as scipy_sparse_isspmatrix
# todo: use a sparse CumlArray style approach when ready
# https://github.com/rapidsai/cuml/issues/2216
if scipy_sparse_isspmatrix(X) or cp.sparse.isspmatrix(X):
X = X.tocoo()
rows = cp.asarray(X.row, dtype=X.row.dtype)
cols = cp.asarray(X.col, dtype=X.col.dtype)
data = cp.asarray(X.data, dtype=X.data.dtype)
X = cp.sparse.coo_matrix((data, (rows, cols)), shape=X.shape)
else:
X = input_to_cuml_array(X, order='K').array.to_output('cupy')
jll = self._joint_log_likelihood(X)
# normalize by P(X) = P(f_1, ..., f_n)
# Compute log(sum(exp()))
# Subtract max in exp to prevent inf
a_max = cp.amax(jll, axis=1, keepdims=True)
exp = cp.exp(jll - a_max)
logsumexp = cp.log(cp.sum(exp, axis=1))
a_max = cp.squeeze(a_max, axis=1)
log_prob_x = a_max + logsumexp
if log_prob_x.ndim < 2:
log_prob_x = log_prob_x.reshape((1, log_prob_x.shape[0]))
result = jll - log_prob_x.T
return CumlArray(result).to_output(out_type)
@with_cupy_rmm
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in the model.
The columns correspond to the classes in sorted order, as they
appear in the attribute classes_.
"""
out_type = self._get_output_type(X)
result = cp.exp(self.predict_log_proba(X))
return CumlArray(result).to_output(out_type)
@with_cupy_rmm
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a
harsh metric since you require for each sample that each label set be
correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. Currently, sample weight is ignored
Returns
-------
score : float Mean accuracy of self.predict(X) with respect to y.
"""
y_hat = self.predict(X)
return accuracy_score(y_hat, cp.asarray(y, dtype=y.dtype))
def _init_counters(self, n_effective_classes, n_features, dtype):
self._class_count_ = CumlArray.zeros(n_effective_classes,
order="F", dtype=dtype)
self._feature_count_ = CumlArray.zeros((n_effective_classes,
n_features),
order="F", dtype=dtype)
def _count(self, X, Y):
"""
Sum feature counts & class prior counts and add to current model.
Parameters
----------
X : cupy.ndarray or cupy.sparse matrix of size
(n_rows, n_features)
Y : cupy.array of monotonic class labels
"""
if X.ndim != 2:
raise ValueError("Input samples should be a 2D array")
if Y.dtype != self._classes_.dtype:
warnings.warn("Y dtype does not match classes_ dtype. Y will be "
"converted, which will increase memory consumption")
counts = cp.zeros((self._n_classes_, self._n_features_), order="F",
dtype=X.dtype)
class_c = cp.zeros(self._n_classes_, order="F", dtype=X.dtype)
n_rows = X.shape[0]
n_cols = X.shape[1]
labels_dtype = self._classes_.dtype
if cp.sparse.isspmatrix(X):
X = X.tocoo()
count_features_coo = count_features_coo_kernel(X.dtype,
labels_dtype)
count_features_coo((math.ceil(X.nnz / 32),), (32,),
(counts,
X.row,
X.col,
X.data,
X.nnz,
n_rows,
n_cols,
Y,
self._n_classes_, False))
else:
count_features_dense = count_features_dense_kernel(X.dtype,
labels_dtype)
count_features_dense((math.ceil(n_rows / 32),
math.ceil(n_cols / 32), 1),
(32, 32, 1),
(counts,
X,
n_rows,
n_cols,
Y,
self._n_classes_,
False,
X.flags["C_CONTIGUOUS"]))
count_classes = count_classes_kernel(X.dtype, labels_dtype)
count_classes((math.ceil(n_rows / 32),), (32,),
(class_c, n_rows, Y))
self._feature_count_ += counts
self._class_count_ += class_c
def _update_class_log_prior(self, class_prior=None):
if class_prior is not None:
if class_prior.shape[0] != self._n_classes_:
raise ValueError("Number of classes must match "
"number of priors")
self._class_log_prior_ = cp.log(class_prior)
elif self.fit_prior:
log_class_count = cp.log(self._class_count_)
self._class_log_prior_ = log_class_count - \
cp.log(self._class_count_.sum())
else:
self._class_log_prior_ = cp.full(self._n_classes_,
-1*math.log(self._n_classes_))
def _update_feature_log_prob(self, alpha):
"""
Apply add-lambda smoothing to raw counts and recompute
log probabilities
Parameters
----------
alpha : float amount of smoothing to apply (0. means no smoothing)
"""
smoothed_fc = self._feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1).reshape(-1, 1)
self._feature_log_prob_ = (cp.log(smoothed_fc) -
cp.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""
Calculate the posterior log probability of the samples X
Parameters
----------
X : array-like of size (n_samples, n_features)
"""
ret = X.dot(self._feature_log_prob_.T)
ret += self._class_log_prior_
return ret
| 32.93801 | 79 | 0.581546 |
acf33156acedd4bd2b769f154e765306887037e5 | 4,617 | py | Python | scanpy/tools/draw_graph.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/draw_graph.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/draw_graph.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | 1 | 2019-02-18T07:39:59.000Z | 2019-02-18T07:39:59.000Z | # Author: F. Alex Wolf (http://falexwolf.de)
"""Graph drawing for the single-cell graph.
References
----------
- General: https://en.wikipedia.org/wiki/Force-directed_graph_drawing
- Suggested for drawing knn-graphs in the context of single-cell
transcriptomics: Weinreb et al., bioRxiv doi:10.1101/090332 (2016)
"""
import numpy as np
from .. import utils
from ..data_structs.data_graph import add_or_update_graph_in_adata
def draw_graph(adata,
layout='fr',
root=None,
n_neighbors=30,
n_pcs=50,
random_state=0,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
adjacency=None,
n_jobs=None,
copy=False,
**kwargs):
"""Force-directed graph drawing [Fruchterman91]_ [Weinreb17]_ [Csardi06]_.
Often a good alternative to tSNE, but runs considerably slower.
`[source] <tl.draw_graph_>`__ `Force-directed graph drawing`_ describes a
class of long-established algorithms for visualizing graphs. It has been
suggested for visualizing single-cell data by [Weinreb17]_. Here, by
default, the Fruchterman & Reingold [Fruchterman91]_ algorithm is used; many
other layouts are available. Uses the igraph implementation [Csardi06]_.
.. _Force-directed graph drawing: https://en.wikipedia.org/wiki/Force-directed_graph_drawing
.. _tl.draw_graph: https://github.com/theislab/scanpy/tree/master/scanpy/tools/draw_graph.py
Parameters
----------
adata : AnnData
Annotated data matrix.
layout : str, optional (default: 'fr')
Any valid igraph layout: http://igraph.org/c/doc/igraph-Layout.html. Of
particular interest are 'fr' (Fruchterman Reingold), 'grid_fr' (Grid
Fruchterman Reingold, faster than 'fr'), 'kk' (Kamadi Kawai', slower
than 'fr'), 'lgl' (Large Graph, very fast), 'drl' (Distributed Recursive
Layout, pretty fast) and 'rt' (Reingold Tilford tree layout).
n_neighbors : int
Number of nearest neighbors in graph.
n_pcs : int
Number of PCs used to compute distances.
**kwargs : further parameters
Parameters of chosen igraph algorithm. See, e.g.,
http://igraph.org/python/doc/igraph.Graph-class.html#layout_fruchterman_reingold.
Returns
-------
Returns or updates adata depending on `copy` with
`"X_draw_graph_" + layout`, the graph-drawing coordinates (adata.smp)
References
----------
- The package "igraph", which provides the drawing implementations used
here: Csardi & Nepusz, InterJournal Complex Systems, 1695 (2006)
- Suggestion to use the "spring" graph-drawing algorithm of the package D3js
for single-cell data: Weinreb et al., bioRxiv doi:10.1101/090332 (2016)
"""
from .. import logging as logg
logg.info('drawing single-cell graph using layout "{}"'.format(layout),
r=True)
avail_layouts = {'fr', 'drl', 'kk', 'grid_fr', 'lgl', 'rt', 'rt_circular'}
if layout not in avail_layouts:
raise ValueError('Provide a valid layout, one of {}.'.format(avail_layouts))
adata = adata.copy() if copy else adata
add_or_update_graph_in_adata(
adata,
n_neighbors=n_neighbors,
n_pcs=n_pcs,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_jobs=n_jobs)
adjacency = adata.add['data_graph_norm_weights']
g = utils.get_igraph_from_adjacency(adjacency)
if layout in {'fr', 'drl', 'kk', 'grid_fr'}:
np.random.seed(random_state)
init_coords = np.random.random((adjacency.shape[0], 2)).tolist()
ig_layout = g.layout(layout, # weights='weight',
seed=init_coords, **kwargs)
elif 'rt' in layout:
if root is not None: root = [root]
ig_layout = g.layout(layout, root=root, **kwargs)
else:
ig_layout = g.layout(layout, **kwargs)
if 'draw_graph_layout' in adata.add:
adata.add['draw_graph_layout'] = list(adata.add['draw_graph_layout']) + [layout]
else:
adata.add['draw_graph_layout'] = [layout]
smp_key = 'X_draw_graph_' + layout
adata.smp[smp_key] = np.array(ig_layout.coords)
logg.m(' finished', t=True, end=' ')
logg.m('and added\n'
' "{}", graph_drawing coordinates (adata.smp)\n'
' "draw_graph_layout", the chosen layout (adata.add)'
.format(smp_key))
return adata if copy else None
| 40.858407 | 96 | 0.648256 |
acf3329e4747dbaa4a571f7d8d3ca4d89e27bdd6 | 3,852 | py | Python | tests/svm_test.py | nbanmp/mythril | 3d7350ca2e87eb995b5ced0a4ceb87f53f3c1da0 | [
"MIT"
] | null | null | null | tests/svm_test.py | nbanmp/mythril | 3d7350ca2e87eb995b5ced0a4ceb87f53f3c1da0 | [
"MIT"
] | null | null | null | tests/svm_test.py | nbanmp/mythril | 3d7350ca2e87eb995b5ced0a4ceb87f53f3c1da0 | [
"MIT"
] | null | null | null | import json
from mythril.analysis.symbolic import SymExecWrapper
from mythril.analysis.callgraph import generate_graph
from mythril.ether.ethcontract import ETHContract
from mythril.ether.soliditycontract import SolidityContract
from mythril.laser.ethereum.state import GlobalState, MachineState, Account
from mythril.laser.ethereum import svm
from tests import *
class LaserEncoder(json.JSONEncoder):
def default(self, o):
if getattr(o, "__module__", None) == "z3.z3":
return str(o)
return str(o)
def _all_info(laser):
accounts = {}
for address, _account in laser.world_state.accounts.items():
account = _account.as_dict
account["code"] = account["code"].instruction_list
account["balance"] = str(account["balance"])
accounts[address] = account
nodes = {}
for uid, node in laser.nodes.items():
states = []
for state in node.states:
if isinstance(state, MachineState):
states.append(state.as_dict)
elif isinstance(state, GlobalState):
environment = state.environment.as_dict
environment["active_account"] = environment["active_account"].address
states.append(
{
"accounts": state.accounts.keys(),
"environment": environment,
"mstate": state.mstate.as_dict,
}
)
nodes[uid] = {
"uid": node.uid,
"contract_name": node.contract_name,
"start_addr": node.start_addr,
"states": states,
"constraints": node.constraints,
"function_name": node.function_name,
"flags": str(node.flags),
}
edges = [edge.as_dict for edge in laser.edges]
return {
"accounts": accounts,
"nodes": nodes,
"edges": edges,
"total_states": laser.total_states,
"max_depth": laser.max_depth,
}
class SVMTestCase(BaseTestCase):
def setUp(self):
super(SVMTestCase, self).setUp()
svm.gbl_next_uid = 0
def test_laser_result(self):
for input_file in TESTDATA_INPUTS_CONTRACTS.iterdir():
if input_file.name in ["weak_random.sol", "environments.sol"]:
continue
output_expected = TESTDATA_OUTPUTS_EXPECTED_LASER_RESULT / (
input_file.name + ".json"
)
output_current = TESTDATA_OUTPUTS_CURRENT_LASER_RESULT / (
input_file.name + ".json"
)
disassembly = SolidityContract(str(input_file)).disassembly
account = Account("0x0000000000000000000000000000000000000000", disassembly)
accounts = {account.address: account}
laser = svm.LaserEVM(accounts, max_depth=22)
laser.sym_exec(account.address)
laser_info = _all_info(laser)
output_current.write_text(
json.dumps(laser_info, cls=LaserEncoder, indent=4)
)
if not (output_expected.read_text() == output_expected.read_text()):
self.found_changed_files(input_file, output_expected, output_current)
self.assert_and_show_changed_files()
def runTest(self):
code = "0x60606040525b603c5b60006010603e565b9050593681016040523660008237602060003683856040603f5a0204f41560545760206000f35bfe5b50565b005b73c3b2ae46792547a96b9f84405e36d0e07edcd05c5b905600a165627a7a7230582062a884f947232ada573f95940cce9c8bfb7e4e14e21df5af4e884941afb55e590029"
contract = ETHContract(code)
sym = SymExecWrapper(contract, "0xd0a6E6C543bC68Db5db3A191B171A77407Ff7ccf")
html = generate_graph(sym)
self.assertTrue("0 PUSH1 0x60\\n2 PUSH1 0x40\\n4 MSTORE\\n5 JUMPDEST" in html)
| 35.666667 | 281 | 0.634476 |
acf33329fc3633abd5e8abf4ef73251c5e830c67 | 5,833 | py | Python | packages/dcos-integration-test/extra/test_units.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 1 | 2020-03-24T12:04:41.000Z | 2020-03-24T12:04:41.000Z | packages/dcos-integration-test/extra/test_units.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 3 | 2018-02-15T16:56:48.000Z | 2018-08-22T19:51:05.000Z | packages/dcos-integration-test/extra/test_units.py | nkhanal0/dcos | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | [
"Apache-2.0"
] | 1 | 2018-02-15T15:14:27.000Z | 2018-02-15T15:14:27.000Z | import glob
import logging
import os
import pathlib
import stat
import subprocess
import pytest
__maintainer__ = 'gpaul'
__contact__ = 'dcos-security@mesosphere.io'
@pytest.mark.supportedwindows
def test_verify_units():
"""Test that all systemd units are valid."""
def _check_units(path):
"""Verify all the units given by `path'"""
for file in glob.glob(path):
cmd = subprocess.run(
["/usr/bin/systemd-analyze", "verify", "--no-pager", file],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
# systemd-analyze returns 0 even if there were warnings, so we
# assert that the command output was empty.
if cmd.stdout:
# We specifically allow directives that exist in newer systemd
# versions but will cause older systemd versions to complain.
# The "old" systemd version we are using as a baseline is
# systemd 219, which ships with CentOS 7.2.1511.
def _check_line(line):
# `systemd-analyze verify` checks for errors in the given
# unit files, as well as other files that are loaded
# transitively. We do not want our tests to fail when
# third-party software ships bad unit files, so we
# explicitly check that 'dcos-' is present on a
# line before checking if it is valid.
if "dcos-" not in line:
return True
# The TasksMax directive exists in newer versions of systemd
# where it is important to set. As we want to support multiple
# versions of systemd our tests must ignore errors that
# complain that it is an unknown directive.
ignore_new_directives = ["TasksMax"]
for directive in ignore_new_directives:
# When systemd does not understand a directive it
# prints a line with the following format:
#
# [/etc/systemd/system/foo.service:5] Unknown lvalue 'EExecStat' in section 'Service'
#
# We ignore such errors when the lvalue is one of the
# well-known directives that got added to newer
# versions of systemd.
unknown_lvalue_err = "Unknown lvalue '%s'" % directive
if unknown_lvalue_err in line:
# This version of systemd does not understand this
# directive. It got added in newer versions.
# As systemd ignores directives it does not
# understand this is not a problem and we simply
# ignore this error.
pass
else:
# Whatever problem systemd-analyze sees in this
# line is more significant than a simple
# 'unknown lvalue' complaint. We treat it as a
# valid issue and fail.
return False
return True
for line in cmd.stdout.split("\n"):
if not _check_line(line):
pytest.fail("Invalid systemd unit: " + line)
_check_units("/etc/systemd/system/dcos-*.service")
_check_units("/etc/systemd/system/dcos-*.socket")
@pytest.mark.supportedwindows
def test_socket_units():
"""Test that socket units configure socket files in /run/dcos
that are owned by 'dcos_adminrouter'.
"""
def _check_unit(file):
logging.info("Checking socket unit {}".format(file))
out = subprocess.check_output(
["/usr/bin/systemctl", "show", "--no-pager", os.path.basename(file)],
stderr=subprocess.STDOUT,
universal_newlines=True)
user = ""
group = ""
mode = ""
had_unix_socket = False
for line in out.split("\n"):
parts = line.split("=")
if len(parts) != 2:
continue
k, v = parts
if k == "SocketUser":
user = v
if k == "SocketGroup":
group = v
if k == "ListenStream":
# Unix sockets are distinguished from IP sockets by having a '/' as the first
# character in the value of the ListenStream directive.
if v.startswith("/"):
had_unix_socket = True
assert v.startswith("/run/dcos/"), "DC/OS unix sockets must go in the /run/dcos directory"
if k == "SocketMode":
mode = v
if not had_unix_socket:
# This socket file doesn't declare any unix sockets, ignore.
return
assert user == "root"
assert group == "dcos_adminrouter"
assert mode == "0660"
for file in glob.glob("/etc/systemd/system/dcos-*.socket"):
_check_unit(file)
@pytest.mark.supportedwindows
def test_socket_files():
"""Test that all socket files in /run/dcos are owned by 'dcos_adminrouter'."""
for file in glob.glob("/run/dcos/*"):
path = pathlib.Path(file)
if not path.is_socket():
# This is not a unix socket file, ignore.
continue
logging.info("Checking socket file {}".format(file))
assert path.owner() == "root"
assert path.group() == "dcos_adminrouter"
assert stat.S_IMODE(path.stat().st_mode) == 0o660
| 43.529851 | 112 | 0.535916 |
acf333d206240394d20a58dafcf82df7c0eb76df | 26,558 | py | Python | pypy/interpreter/argument.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/interpreter/argument.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/interpreter/argument.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | """
Arguments objects.
"""
from pypy.interpreter.error import OperationError
class AbstractArguments:
def parse(self, fnname, signature, defaults_w=[]):
"""Parse args and kwargs to initialize a frame
according to the signature of code object.
"""
try:
return self.match_signature(signature, defaults_w)
except ArgErr, e:
raise OperationError(self.space.w_TypeError,
self.space.wrap(e.getmsg(fnname)))
def parse_into_scope(self, scope_w, fnname, signature, defaults_w=[]):
"""Parse args and kwargs to initialize a frame
according to the signature of code object.
Store the argumentvalues into scope_w.
scope_w must be big enough for signature.
"""
argnames, varargname, kwargname = signature
has_vararg = varargname is not None
has_kwarg = kwargname is not None
try:
return self._match_signature(scope_w, argnames, has_vararg,
has_kwarg, defaults_w, 0, None)
except ArgErr, e:
raise OperationError(self.space.w_TypeError,
self.space.wrap(e.getmsg(fnname)))
def frompacked(space, w_args=None, w_kwds=None):
"""Convenience static method to build an Arguments
from a wrapped sequence and a wrapped dictionary."""
return Arguments(space, [], w_stararg=w_args, w_starstararg=w_kwds)
frompacked = staticmethod(frompacked)
def topacked(self):
"""Express the Argument object as a pair of wrapped w_args, w_kwds."""
space = self.space
args_w, kwds_w = self.unpack()
w_args = space.newtuple(args_w)
w_kwds = space.newdict()
for key, w_value in kwds_w.items():
space.setitem(w_kwds, space.wrap(key), w_value)
return w_args, w_kwds
def fromshape(space, (shape_cnt,shape_keys,shape_star,shape_stst), data_w):
args_w = data_w[:shape_cnt]
p = shape_cnt
kwds_w = {}
for i in range(len(shape_keys)):
kwds_w[shape_keys[i]] = data_w[p]
p += 1
if shape_star:
w_star = data_w[p]
p += 1
else:
w_star = None
if shape_stst:
w_starstar = data_w[p]
p += 1
else:
w_starstar = None
return Arguments(space, args_w, kwds_w, w_star, w_starstar)
fromshape = staticmethod(fromshape)
def prepend(self, w_firstarg):
"Return a new Arguments with a new argument inserted first."
return ArgumentsPrepended(self, w_firstarg)
def popfirst(self):
"""For optimization only: might return (w_firstarg, args_with_rest),
or might just raise IndexError.
"""
raise IndexError
def match_signature(self, signature, defaults_w):
"""Parse args and kwargs according to the signature of a code object,
or raise an ArgErr in case of failure.
"""
argnames, varargname, kwargname = signature
scopelen = len(argnames)
has_vararg = varargname is not None
has_kwarg = kwargname is not None
if has_vararg:
scopelen += 1
if has_kwarg:
scopelen += 1
scope_w = [None] * scopelen
self._match_signature(scope_w, argnames, has_vararg, has_kwarg, defaults_w, 0, None)
return scope_w
def unmatch_signature(self, signature, data_w):
"""kind of inverse of match_signature"""
args_w, kwds_w = self.unpack()
need_cnt = len(args_w)
need_kwds = kwds_w.keys()
space = self.space
argnames, varargname, kwargname = signature
cnt = len(argnames)
data_args_w = data_w[:cnt]
if varargname:
data_w_stararg = data_w[cnt]
cnt += 1
else:
data_w_stararg = space.newtuple([])
unfiltered_kwds_w = {}
if kwargname:
data_w_starargarg = data_w[cnt]
for w_key in space.unpackiterable(data_w_starargarg):
key = space.str_w(w_key)
w_value = space.getitem(data_w_starargarg, w_key)
unfiltered_kwds_w[key] = w_value
cnt += 1
assert len(data_w) == cnt
ndata_args_w = len(data_args_w)
if ndata_args_w >= need_cnt:
args_w = data_args_w[:need_cnt]
for argname, w_arg in zip(argnames[need_cnt:], data_args_w[need_cnt:]):
unfiltered_kwds_w[argname] = w_arg
assert not space.is_true(data_w_stararg)
else:
args_w = data_args_w[:]
for w_stararg in space.unpackiterable(data_w_stararg):
args_w.append(w_stararg)
assert len(args_w) == need_cnt
kwds_w = {}
for key in need_kwds:
kwds_w[key] = unfiltered_kwds_w[key]
return Arguments(self.space, args_w, kwds_w)
def normalize(self):
"""Return an instance of the Arguments class. (Instances of other
classes may not be suitable for long-term storage or multiple
usage.) Also force the type and validity of the * and ** arguments
to be checked now.
"""
args_w, kwds_w = self.unpack()
return Arguments(self.space, args_w, kwds_w)
def unpack(self):
""" Purely abstract
"""
raise NotImplementedError()
def firstarg(self):
""" Purely abstract
"""
raise NotImplementedError()
def _match_signature(self, scope_w, argnames, has_vararg=False, has_kwarg=False, defaults_w=[], blindargs=0, extravarargs=None):
""" Purely abstract
"""
raise NotImplementedError()
def fixedunpack(self, argcount):
""" Purely abstract
"""
raise NotImplementedError()
class ArgumentsPrepended(AbstractArguments):
def __init__(self, args, w_firstarg):
self.space = args.space
self.args = args
self.w_firstarg = w_firstarg
def firstarg(self):
"Return the first argument for inspection."
return self.w_firstarg
def popfirst(self):
return self.w_firstarg, self.args
def __repr__(self):
return 'ArgumentsPrepended(%r, %r)' % (self.args, self.w_firstarg)
def has_keywords(self):
return self.args.has_keywords()
def unpack(self):
arguments_w, kwds_w = self.args.unpack()
return ([self.w_firstarg] + arguments_w), kwds_w
def fixedunpack(self, argcount):
if argcount <= 0:
raise ValueError, "too many arguments (%d expected)" % argcount # XXX: Incorrect
return [self.w_firstarg] + self.args.fixedunpack(argcount - 1)
def _rawshape(self, nextra=0):
return self.args._rawshape(nextra + 1)
def _match_signature(self, scope_w, argnames, has_vararg=False, has_kwarg=False, defaults_w=[], blindargs=0, extravarargs=None):
"""Parse args and kwargs according to the signature of a code object,
or raise an ArgErr in case of failure.
Return the number of arguments filled in.
"""
if blindargs < len(argnames):
scope_w[blindargs] = self.w_firstarg
else:
if extravarargs is None:
extravarargs = [ self.w_firstarg ]
else:
extravarargs.append(self.w_firstarg)
return self.args._match_signature(scope_w, argnames, has_vararg,
has_kwarg, defaults_w,
blindargs + 1, extravarargs)
def flatten(self):
(shape_cnt, shape_keys, shape_star, shape_stst), data_w = self.args.flatten()
data_w.insert(0, self.w_firstarg)
return (shape_cnt + 1, shape_keys, shape_star, shape_stst), data_w
def num_args(self):
return self.args.num_args() + 1
def num_kwds(self):
return self.args.num_kwds()
class ArgumentsFromValuestack(AbstractArguments):
"""
Collects the arguments of a function call as stored on a PyFrame
valuestack.
Only for the case of purely positional arguments, for now.
"""
def __init__(self, space, frame, nargs=0):
self.space = space
self.frame = frame
self.nargs = nargs
def firstarg(self):
if self.nargs <= 0:
return None
return self.frame.peekvalue(self.nargs - 1)
def popfirst(self):
if self.nargs <= 0:
raise IndexError
frame = self.frame
newnargs = self.nargs-1
return (frame.peekvalue(newnargs),
ArgumentsFromValuestack(self.space, frame, newnargs))
def __repr__(self):
return 'ArgumentsFromValuestack(%r, %r)' % (self.frame, self.nargs)
def has_keywords(self):
return False
def unpack(self):
args_w = [None] * self.nargs
for i in range(self.nargs):
args_w[i] = self.frame.peekvalue(self.nargs - 1 - i)
return args_w, {}
def fixedunpack(self, argcount):
if self.nargs > argcount:
raise ValueError, "too many arguments (%d expected)" % argcount
elif self.nargs < argcount:
raise ValueError, "not enough arguments (%d expected)" % argcount
data_w = [None] * self.nargs
nargs = self.nargs
for i in range(nargs):
data_w[i] = self.frame.peekvalue(nargs - 1 - i)
return data_w
def _rawshape(self, nextra=0):
return nextra + self.nargs, (), False, False
def _match_signature(self, scope_w, argnames, has_vararg=False, has_kwarg=False, defaults_w=[], blindargs=0, extravarargs=None):
"""Parse args and kwargs according to the signature of a code object,
or raise an ArgErr in case of failure.
Return the number of arguments filled in.
"""
co_argcount = len(argnames)
if blindargs + self.nargs + len(defaults_w) < co_argcount:
raise ArgErrCount(blindargs + self.nargs , 0,
(co_argcount, has_vararg, has_kwarg),
defaults_w, co_argcount - blindargs -
self.nargs - len(defaults_w))
if blindargs + self.nargs > co_argcount and not has_vararg:
raise ArgErrCount(blindargs + self.nargs, 0,
(co_argcount, has_vararg, has_kwarg),
defaults_w, 0)
if blindargs + self.nargs >= co_argcount:
for i in range(co_argcount - blindargs):
scope_w[i + blindargs] = self.frame.peekvalue(self.nargs - 1 - i)
if has_vararg:
if blindargs > co_argcount:
stararg_w = extravarargs
for i in range(self.nargs):
stararg_w.append(self.frame.peekvalue(self.nargs - 1 - i))
else:
stararg_w = [None] * (self.nargs + blindargs - co_argcount)
for i in range(co_argcount - blindargs, self.nargs):
stararg_w[i - co_argcount + blindargs] = self.frame.peekvalue(self.nargs - 1 - i)
scope_w[co_argcount] = self.space.newtuple(stararg_w)
co_argcount += 1
else:
for i in range(self.nargs):
scope_w[i + blindargs] = self.frame.peekvalue(self.nargs - 1 - i)
ndefaults = len(defaults_w)
missing = co_argcount - self.nargs - blindargs
first_default = ndefaults - missing
for i in range(missing):
scope_w[self.nargs + blindargs + i] = defaults_w[first_default + i]
if has_vararg:
scope_w[co_argcount] = self.space.newtuple([])
co_argcount += 1
if has_kwarg:
scope_w[co_argcount] = self.space.newdict()
co_argcount += 1
return co_argcount
def flatten(self):
data_w = [None] * self.nargs
for i in range(self.nargs):
data_w[i] = self.frame.peekvalue(self.nargs - 1 - i)
return nextra + self.nargs, (), False, False, data_w
def num_args(self):
return self.nargs
def num_kwds(self):
return 0
class Arguments(AbstractArguments):
"""
Collects the arguments of a function call.
Instances should be considered immutable.
"""
### Construction ###
def __init__(self, space, args_w, kwds_w=None,
w_stararg=None, w_starstararg=None):
self.space = space
self.arguments_w = args_w
self.kwds_w = kwds_w
self.w_stararg = w_stararg
self.w_starstararg = w_starstararg
def num_args(self):
self._unpack()
return len(self.arguments_w)
def num_kwds(self):
self._unpack()
return len(self.kwds_w)
def __repr__(self):
if self.w_starstararg is not None:
return 'Arguments(%s, %s, %s, %s)' % (self.arguments_w,
self.kwds_w,
self.w_stararg,
self.w_starstararg)
if self.w_stararg is None:
if not self.kwds_w:
return 'Arguments(%s)' % (self.arguments_w,)
else:
return 'Arguments(%s, %s)' % (self.arguments_w, self.kwds_w)
else:
return 'Arguments(%s, %s, %s)' % (self.arguments_w,
self.kwds_w,
self.w_stararg)
### Manipulation ###
def unpack(self):
"Return a ([w1,w2...], {'kw':w3...}) pair."
self._unpack()
return self.arguments_w, self.kwds_w
def popfirst(self):
self._unpack()
return self.arguments_w[0], Arguments(self.space, self.arguments_w[1:],
kwds_w = self.kwds_w)
def _unpack(self):
"unpack the *arg and **kwd into w_arguments and kwds_w"
# --- unpack the * argument now ---
if self.w_stararg is not None:
self.arguments_w += self.space.unpackiterable(self.w_stararg)
self.w_stararg = None
# --- unpack the ** argument now ---
if self.kwds_w is None:
self.kwds_w = {}
if self.w_starstararg is not None:
space = self.space
w_starstararg = self.w_starstararg
# maybe we could allow general mappings?
if not space.is_true(space.isinstance(w_starstararg, space.w_dict)):
raise OperationError(space.w_TypeError,
space.wrap("argument after ** must be "
"a dictionary"))
# don't change the original yet,
# in case something goes wrong
d = self.kwds_w.copy()
for w_key in space.unpackiterable(w_starstararg):
try:
key = space.str_w(w_key)
except OperationError, e:
if not e.match(space, space.w_TypeError):
raise
raise OperationError(space.w_TypeError,
space.wrap("keywords must be strings"))
if key in d:
raise OperationError(self.space.w_TypeError,
self.space.wrap("got multiple values "
"for keyword argument "
"'%s'" % key))
d[key] = space.getitem(w_starstararg, w_key)
self.kwds_w = d
self.w_starstararg = None
def has_keywords(self):
return bool(self.kwds_w) or (self.w_starstararg is not None and
self.space.is_true(self.w_starstararg))
def fixedunpack(self, argcount):
"""The simplest argument parsing: get the 'argcount' arguments,
or raise a real ValueError if the length is wrong."""
if self.has_keywords():
raise ValueError, "no keyword arguments expected"
if len(self.arguments_w) > argcount:
raise ValueError, "too many arguments (%d expected)" % argcount
if self.w_stararg is not None:
self.arguments_w += self.space.unpackiterable(self.w_stararg,
argcount - len(self.arguments_w))
self.w_stararg = None
elif len(self.arguments_w) < argcount:
raise ValueError, "not enough arguments (%d expected)" % argcount
return self.arguments_w
def firstarg(self):
"Return the first argument for inspection."
if self.arguments_w:
return self.arguments_w[0]
if self.w_stararg is None:
return None
w_iter = self.space.iter(self.w_stararg)
try:
return self.space.next(w_iter)
except OperationError, e:
if not e.match(self.space, self.space.w_StopIteration):
raise
return None
### Parsing for function calls ###
def _match_signature(self, scope_w, argnames, has_vararg=False,
has_kwarg=False, defaults_w=[], blindargs=0,
extravarargs=None):
"""Parse args and kwargs according to the signature of a code object,
or raise an ArgErr in case of failure.
Return the number of arguments filled in.
"""
#
# args_w = list of the normal actual parameters, wrapped
# kwds_w = real dictionary {'keyword': wrapped parameter}
# argnames = list of formal parameter names
# scope_w = resulting list of wrapped values
#
co_argcount = len(argnames) # expected formal arguments, without */**
if self.w_stararg is not None:
# There is a case where we don't have to unpack() a w_stararg:
# if it matches exactly a *arg in the signature.
if (len(self.arguments_w) + blindargs == co_argcount and
has_vararg and
self.space.is_w(self.space.type(self.w_stararg),
self.space.w_tuple)):
pass
else:
self._unpack() # sets self.w_stararg to None
# always unpack the ** arguments
if self.w_starstararg is not None:
self._unpack()
args_w = self.arguments_w
kwds_w = self.kwds_w
num_kwds = 0
if kwds_w is not None:
num_kwds = len(kwds_w)
# put as many positional input arguments into place as available
if blindargs >= co_argcount:
input_argcount = co_argcount
elif len(args_w) + blindargs > co_argcount:
for i in range(co_argcount - blindargs):
scope_w[i + blindargs] = args_w[i]
input_argcount = co_argcount
next_arg = co_argcount - blindargs
else:
for i in range(len(args_w)):
scope_w[i + blindargs] = args_w[i]
input_argcount = len(args_w) + blindargs
# check that no keyword argument conflicts with these
# note that for this purpose we ignore the first blindargs,
# which were put into place by prepend(). This way, keywords do
# not conflict with the hidden extra argument bound by methods.
if kwds_w and input_argcount > blindargs:
for name in argnames[blindargs:input_argcount]:
if name in kwds_w:
raise ArgErrMultipleValues(name)
remainingkwds_w = self.kwds_w
missing = 0
if input_argcount < co_argcount:
if remainingkwds_w is None:
remainingkwds_w = {}
else:
remainingkwds_w = remainingkwds_w.copy()
# not enough args, fill in kwargs or defaults if exists
def_first = co_argcount - len(defaults_w)
for i in range(input_argcount, co_argcount):
name = argnames[i]
if name in remainingkwds_w:
scope_w[i] = remainingkwds_w[name]
del remainingkwds_w[name]
elif i >= def_first:
scope_w[i] = defaults_w[i-def_first]
else:
# error: not enough arguments. Don't signal it immediately
# because it might be related to a problem with */** or
# keyword arguments, which will be checked for below.
missing += 1
# collect extra positional arguments into the *vararg
if has_vararg:
if self.w_stararg is None: # common case
args_left = co_argcount - blindargs
if args_left < 0: # check required by rpython
assert extravarargs is not None
starargs_w = extravarargs
if len(args_w):
starargs_w.extend(args_w)
elif len(args_w) > args_left:
starargs_w = args_w[args_left:]
else:
starargs_w = []
scope_w[co_argcount] = self.space.newtuple(starargs_w)
else: # shortcut for the non-unpack() case above
scope_w[co_argcount] = self.w_stararg
elif len(args_w) + blindargs > co_argcount:
raise ArgErrCount(len(args_w) + blindargs, num_kwds,
(co_argcount, has_vararg, has_kwarg),
defaults_w, 0)
# collect extra keyword arguments into the **kwarg
if has_kwarg:
w_kwds = self.space.newdict()
if remainingkwds_w:
for key, w_value in remainingkwds_w.items():
self.space.setitem(w_kwds, self.space.wrap(key), w_value)
scope_w[co_argcount + has_vararg] = w_kwds
elif remainingkwds_w:
raise ArgErrUnknownKwds(remainingkwds_w)
if missing:
raise ArgErrCount(len(args_w) + blindargs, num_kwds,
(co_argcount, has_vararg, has_kwarg),
defaults_w, missing)
return co_argcount + has_vararg + has_kwarg
### Argument <-> list of w_objects together with "shape" information
def _rawshape(self, nextra=0):
shape_cnt = len(self.arguments_w)+nextra # Number of positional args
if self.kwds_w:
shape_keys = self.kwds_w.keys() # List of keywords (strings)
else:
shape_keys = []
shape_star = self.w_stararg is not None # Flag: presence of *arg
shape_stst = self.w_starstararg is not None # Flag: presence of **kwds
shape_keys.sort()
return shape_cnt, tuple(shape_keys), shape_star, shape_stst # shape_keys are sorted
def flatten(self):
shape_cnt, shape_keys, shape_star, shape_stst = self._rawshape()
data_w = self.arguments_w + [self.kwds_w[key] for key in shape_keys]
if shape_star:
data_w.append(self.w_stararg)
if shape_stst:
data_w.append(self.w_starstararg)
return (shape_cnt, shape_keys, shape_star, shape_stst), data_w
def rawshape(args, nextra=0):
return args._rawshape(nextra)
#
# ArgErr family of exceptions raised in case of argument mismatch.
# We try to give error messages following CPython's, which are very informative.
#
class ArgErr(Exception):
def getmsg(self, fnname):
raise NotImplementedError
class ArgErrCount(ArgErr):
def __init__(self, nargs, nkwds, signature, defaults_w, missing_args):
self.signature = signature
self.num_defaults = len(defaults_w)
self.missing_args = missing_args
self.num_args = nargs
self.num_kwds = nkwds
def getmsg(self, fnname):
args = None
num_args, has_vararg, has_kwarg = self.signature
#args_w, kwds_w = args.unpack()
if has_kwarg or (self.num_kwds and self.num_defaults):
msg2 = "non-keyword "
if self.missing_args:
required_args = num_args - self.num_defaults
nargs = required_args - self.missing_args
else:
nargs = self.num_args
else:
msg2 = ""
nargs = self.num_args + self.num_kwds
n = num_args
if n == 0:
msg = "%s() takes no %sargument (%d given)" % (
fnname,
msg2,
nargs)
else:
defcount = self.num_defaults
if defcount == 0 and not has_vararg:
msg1 = "exactly"
elif not self.missing_args:
msg1 = "at most"
else:
msg1 = "at least"
n -= defcount
if not self.num_kwds: # msg "f() takes at least X non-keyword args"
msg2 = "" # is confusing if no kwd arg actually provided
if n == 1:
plural = ""
else:
plural = "s"
msg = "%s() takes %s %d %sargument%s (%d given)" % (
fnname,
msg1,
n,
msg2,
plural,
nargs)
return msg
class ArgErrMultipleValues(ArgErr):
def __init__(self, argname):
self.argname = argname
def getmsg(self, fnname):
msg = "%s() got multiple values for keyword argument '%s'" % (
fnname,
self.argname)
return msg
class ArgErrUnknownKwds(ArgErr):
def __init__(self, kwds_w):
self.kwd_name = ''
self.num_kwds = len(kwds_w)
if self.num_kwds == 1:
self.kwd_name = kwds_w.keys()[0]
def getmsg(self, fnname):
if self.num_kwds == 1:
msg = "%s() got an unexpected keyword argument '%s'" % (
fnname,
self.kwd_name)
else:
msg = "%s() got %d unexpected keyword arguments" % (
fnname,
self.num_kwds)
return msg
| 38.048711 | 132 | 0.559568 |
acf333e91ea32349051dc836e7cf19249b26e86f | 3,379 | py | Python | dev/tools/leveleditor/python/Lib/test/test_multibytecodec.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | dev/tools/leveleditor/python/Lib/test/test_multibytecodec.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2019-09-03T04:11:46.000Z | 2019-09-03T04:11:46.000Z | ToolKit/cmds/python/Lib/test/crashed/test_multibytecodec.py | kontais/EFI-MIPS | c4de746e6a926fc9df71231a539e2c0a170bcc90 | [
"BSD-3-Clause"
] | 3 | 2019-09-04T02:59:01.000Z | 2021-08-23T06:07:28.000Z | #!/usr/bin/env python
#
# test_multibytecodec.py
# Unit test for multibytecodec itself
#
# $CJKCodecs: test_multibytecodec.py,v 1.8 2004/06/19 06:09:55 perky Exp $
from test import test_support
from test import test_multibytecodec_support
import unittest, StringIO, codecs
class Test_StreamWriter(unittest.TestCase):
if len(u'\U00012345') == 2: # UCS2
def test_gb18030(self):
s= StringIO.StringIO()
c = codecs.lookup('gb18030')[3](s)
c.write(u'123')
self.assertEqual(s.getvalue(), '123')
c.write(u'\U00012345')
self.assertEqual(s.getvalue(), '123\x907\x959')
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(), '123\x907\x959')
c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
self.assertEqual(s.getvalue(),
'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(),
'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
self.assertRaises(UnicodeError, c.reset)
self.assertEqual(s.getvalue(),
'123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851')
# standard utf-8 codecs has broken StreamReader
if test_multibytecodec_support.__cjkcodecs__:
def test_utf_8(self):
s= StringIO.StringIO()
c = codecs.lookup('utf-8')[3](s)
c.write(u'123')
self.assertEqual(s.getvalue(), '123')
c.write(u'\U00012345')
self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85')
c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac')
self.assertEqual(s.getvalue(),
'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
'\xea\xb0\x80\xc2\xac')
c.write(u'\U00012345'[0])
self.assertEqual(s.getvalue(),
'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
'\xea\xb0\x80\xc2\xac')
c.reset()
self.assertEqual(s.getvalue(),
'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
'\xea\xb0\x80\xc2\xac\xed\xa0\x88')
c.write(u'\U00012345'[1])
self.assertEqual(s.getvalue(),
'123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85'
'\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85')
else: # UCS4
pass
def test_nullcoding(self):
self.assertEqual(''.decode('gb18030'), u'')
self.assertEqual(unicode('', 'gb18030'), u'')
self.assertEqual(u''.encode('gb18030'), '')
def test_str_decode(self):
self.assertEqual('abcd'.encode('gb18030'), 'abcd')
def test_streamwriter_strwrite(self):
s = StringIO.StringIO()
wr = codecs.getwriter('gb18030')(s)
wr.write('abcd')
self.assertEqual(s.getvalue(), 'abcd')
def test_main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_StreamWriter))
test_support.run_suite(suite)
if __name__ == "__main__":
test_main()
| 39.752941 | 76 | 0.559041 |
acf334122b30894bf23eb4bf165208bc5716ccf5 | 1,874 | py | Python | numpoly/array_function/rint.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 8 | 2019-12-13T23:54:33.000Z | 2021-11-08T22:44:25.000Z | numpoly/array_function/rint.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 54 | 2019-08-25T20:03:10.000Z | 2021-08-09T08:59:27.000Z | numpoly/array_function/rint.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 2 | 2020-03-05T12:03:28.000Z | 2021-03-07T16:56:09.000Z | """Round elements of the array to the nearest integer."""
from __future__ import annotations
from typing import Any, Optional
import numpy
import numpy.typing
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements, simple_dispatch
@implements(numpy.rint)
def rint(
x: PolyLike,
out: Optional[ndpoly] = None,
where: numpy.typing.ArrayLike = True,
**kwargs: Any,
) -> ndpoly:
"""
Round elements of the array to the nearest integer.
Args:
x:
Input array.
out:
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where:
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
Output array is same shape and type as `x`. This is a scalar if `x`
is a scalar.
Examples:
>>> q0 = numpoly.variable()
>>> numpoly.rint([-1.7*q0, q0-1.5, -0.2,
... 3.2+1.5*q0, 1.7, 2.0])
polynomial([-2.0*q0, q0-2.0, 0.0, 2.0*q0+3.0, 2.0, 2.0])
"""
return simple_dispatch(
numpy_func=numpy.rint,
inputs=(x,),
out=None if out is None else (out,),
where=where,
**kwargs
)
| 31.762712 | 79 | 0.603522 |
acf334a4a67077bb500e0dd309071d13ea2cdce6 | 21,161 | py | Python | runway/model.py | notnotcamscott/portraiture | 3b4d1dad269ed544441627b54417f2735cb2396b | [
"MIT"
] | null | null | null | runway/model.py | notnotcamscott/portraiture | 3b4d1dad269ed544441627b54417f2735cb2396b | [
"MIT"
] | null | null | null | runway/model.py | notnotcamscott/portraiture | 3b4d1dad269ed544441627b54417f2735cb2396b | [
"MIT"
] | null | null | null | import os
import sys
import logging
import datetime
import traceback
import inspect
import json
from six import reraise
from flask import Flask, request, jsonify
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from .exceptions import RunwayError, MissingInputError, MissingOptionError, \
InferenceError, UnknownCommandError, SetupError
from .data_types import *
from .utils import gzipped, serialize_command, cast_to_obj, timestamp_millis, \
validate_post_request_body_is_json, get_json_or_none_if_invalid, argspec
from .__version__ import __version__ as model_sdk_version
class RunwayModel(object):
"""A Runway Model server. A singleton instance of this class is created automatically
when the runway module is imported.
"""
def __init__(self):
self.millis_run_started_at = None
self.millis_last_command = None
self.options = []
self.setup_fn = None
self.commands = {}
self.command_fns = {}
self.model = None
self.running_status = 'STARTING'
self.app = Flask(__name__)
# Support utf-8 in application/json requests and responses.
# We wrap this in a try/except block because, for whatever reason,
# `make docs` throws a TypeError that keys are unassignable to
# self.app.config. This DOES NOT occur when using the RunwayModel module
# anywere except in the docs build environment.
try: self.app.config['JSON_AS_ASCII'] = False
except TypeError: pass
CORS(self.app)
self.define_error_handlers()
self.define_routes()
def define_error_handlers(self):
# not yet implemented, but if and when it is lets make sure its returned
# as JSON
@self.app.errorhandler(401)
def unauthorized(e):
msg = 'Unauthorized (well... '
msg += 'really unauthenticated but hey I didn\'t write the spec).'
return jsonify(dict(error=msg)), 401
# not yet implemented, but if and when it is lets make sure its returned
# as JSON
@self.app.errorhandler(403)
def forbidden(e):
return jsonify(dict(error='Forbidden.')), 403
@self.app.errorhandler(404)
def page_not_found(e):
return jsonify(dict(error='Not found.')), 404
@self.app.errorhandler(405)
def method_not_allowed(e):
return jsonify(dict(error='Method not allowed.')), 405
# we shouldn't have any of these as we are wrapping errors in
# RunwayError objects and returning stacktraces, but it can't hurt
# to be safe.
@self.app.errorhandler(500)
def internal_server_error(e):
return jsonify(dict(error='Internal server error.')), 500
def define_routes(self):
@self.app.route('/', methods=['GET'])
@self.app.route('/meta', methods=['GET'])
def manifest():
return jsonify(dict(
modelSDKVersion=model_sdk_version,
millisRunning=self.millis_running(),
millisSinceLastCommand=self.millis_since_last_command(),
GPU=os.environ.get('GPU') == '1',
options=[opt.to_dict() for opt in self.options],
commands=[serialize_command(cmd) for cmd in self.commands.values()]
))
@self.app.route('/healthcheck', methods=['GET'])
def healthcheck_route():
return jsonify(dict(status=self.running_status))
@self.app.route('/setup', methods=['POST'])
@validate_post_request_body_is_json
def setup_route():
opts = get_json_or_none_if_invalid(request)
try:
self.setup_model(opts)
return jsonify(dict(success=True))
except RunwayError as err:
err.print_exception()
return jsonify(err.to_response()), err.code
@self.app.route('/setup', methods=['GET'])
def setup_options_route():
return jsonify(self.options)
@self.app.route('/<command_name>', methods=['POST'])
@validate_post_request_body_is_json
def command_route(command_name):
try:
try:
command_fn = self.command_fns[command_name]
except KeyError:
raise UnknownCommandError(command_name)
inputs = self.commands[command_name]['inputs']
outputs = self.commands[command_name]['outputs']
input_dict = get_json_or_none_if_invalid(request)
deserialized_inputs = {}
for inp in inputs:
name = inp.name
if name in input_dict:
value = input_dict[name]
elif hasattr(inp, 'default'):
value = inp.default
else:
raise MissingInputError(name)
deserialized_inputs[name] = inp.deserialize(value)
try:
self.millis_last_command = timestamp_millis()
results = command_fn(self.model, deserialized_inputs)
if type(results) != dict:
name = outputs[0].name
value = results
results = {}
results[name] = value
except Exception as err:
raise reraise(InferenceError, InferenceError(repr(err)), sys.exc_info()[2])
serialized_outputs = {}
for out in outputs:
name = out.to_dict()['name']
serialized_outputs[name] = out.serialize(results[name])
return jsonify(serialized_outputs)
except RunwayError as err:
err.print_exception()
return jsonify(err.to_response()), err.code
@self.app.route('/<command_name>', methods=['GET'])
def usage_route(command_name):
try:
try:
command = self.commands[command_name]
except KeyError:
raise UnknownCommandError(command_name)
return jsonify(serialize_command(command))
except RunwayError as err:
err.print_exception()
return jsonify(err.to_response()), err.code
def millis_running(self):
if self.millis_run_started_at is None: return None
return timestamp_millis() - self.millis_run_started_at
def millis_since_last_command(self):
if self.millis_last_command is None: return None
return timestamp_millis() - self.millis_last_command
def setup(self, decorated_fn=None, options=None):
"""This decorator is used to wrap your own ``setup()`` (or equivalent)
function to run whatever initialization code you'd like. Your wrapped
function `should` configure and return a model. Your function `should`
also be made resilient to being called multiple times, as this is
possible depending on the behavior of the client application.
This endpoint exposes a ``/setup`` HTTP route and calls the wrapped
(decorated) function once on ``runway.run()`` and whenever a new POST
request is made to the ``/setup`` route.
.. code-block:: python
import runway
from runway.data_types import category
from your_code import model
options = {"network_size": category(choices=[64, 128, 256, 512], default=256)}
@runway.setup(options=options)
def setup(opts):
print("Setup ran, and the network size is {}".format(opts["network_size"]))
return model(network_size=opts["network_size"])
.. note::
This is example code for demonstration purposes only. It will not
run, as the ``your_code`` import is not a real python module.
:param decorated_fn: A function to be decorated. This argument is automatically
assigned the value of the wrapped function if the decorator syntax is used
without a function call
(e.g. ``@runway.setup`` instead of ``@runway.setup()``).
:type decorated_fn: function, optional
:param options: A dictionary of setup options, mapping string names
to ``runway.data_types``. These options define the schema of the
object that is passed as the single argument to the wrapped
function.
:type options: dict, optional
:return: A decorated function
:rtype: function or `NoneType`
"""
if decorated_fn:
self.options = []
self.setup_fn = decorated_fn
else:
def decorator(fn):
self.options = []
for name, opt in options.items():
opt = cast_to_obj(opt)
opt.name = name
self.options.append(opt)
self.setup_fn = fn
return fn
return decorator
def command(self, name, inputs={}, outputs={}, description=None):
"""This decorator function is used to define the interface for your
model. All functions that are wrapped by this decorator become exposed
via HTTP requests to ``/<command_name>``. Each command that you define
can be used to get data into or out of your runway model, or trigger an
action.
.. code-block:: python
import runway
from runway.data_types import category, vector, image
from your_code import model
@runway.setup
def setup():
return model()
sample_inputs= {
"z": vector(length=512),
"category": category(choices=["day", "night"])
}
sample_outputs = {
"image": image(width=1024, height=1024)
}
@runway.command("sample", inputs=sample_inputs, outputs=sample_outputs)
def sample(model, inputs):
# The parameters passed to a function decorated by @runway.command() are:
# 1. The return value of a function wrapped by @runway.setup(), usually a model.
# 2. The inputs sent with the HTTP request to the /<command_name> endpoint,
# as defined by the inputs keyword argument delivered to @runway.command().
img = model.sample(z=inputs["z"], category=inputs["category"])
# `img` can be a PIL or numpy image. It will be encoded as a base64 URI string
# automatically by @runway.command().
return { "image": img }
:param name: The name of the command. This name is used to create the
HTTP route associated with the command
(i.e. a name of "generate_text" will generate a ``/generate_text``
route).
:type name: string
:param inputs: A dictionary mapping input names to
``runway.data_types``. This dictionary defines the interface used
to send input data to this command. At least one key value pair is
required.
:type inputs: dict
:param outputs: A dictionary defining the output data returned from the
wrapped function as ``runway.data_types``. At least one key value
pair is required.
:type outputs: dict
:param description: A text description of what this command does.
If this parameter is present its value will be rendered as a tooltip
in Runway. Defaults to None.
:type description: string
:raises Exception: An exception if there isn't at least one key value
pair for both inputs and outputs dictionaries
:return: A decorated function
:rtype: function
"""
if len(inputs.values()) == 0 or len(outputs.values()) == 0:
raise Exception('You need to provide at least one input and output for the command')
inputs_as_list = []
for input_name, inp in inputs.items():
inp_obj = cast_to_obj(inp)
# It is the responsibility of the RunwayModel's setup() and command()
# functions to assign names to runway.data_types based on the dictionary
# keys
inp_obj.name = input_name
inputs_as_list.append(inp_obj)
outputs_as_list = []
for output_name, out in outputs.items():
out_obj = cast_to_obj(out)
out_obj.name = output_name
outputs_as_list.append(out_obj)
command_info = dict(
name=name,
description=description,
inputs=inputs_as_list,
outputs=outputs_as_list
)
self.commands[name] = command_info
def decorator(fn):
self.command_fns[name] = fn
return fn
return decorator
def setup_model(self, opts):
self.running_status = 'STARTING'
if self.setup_fn and self.options:
deserialized_opts = {}
for opt in self.options:
name = opt.name
opt = cast_to_obj(opt)
opt.name = name
if name in opts:
deserialized_opts[name] = opt.deserialize(opts[name])
elif hasattr(opt, 'default'):
deserialized_opts[name] = opt.default
else:
raise MissingOptionError(name)
try:
self.model = self.setup_fn(deserialized_opts)
except Exception as err:
raise reraise(SetupError, SetupError(repr(err)), sys.exc_info()[2])
elif self.setup_fn:
try:
if len(argspec(self.setup_fn).args) == 0:
self.model = self.setup_fn()
else:
self.model = self.setup_fn({})
except Exception as err:
raise reraise(SetupError, SetupError(repr(err)), sys.exc_info()[2])
self.running_status = 'RUNNING'
def run(self, host='0.0.0.0', port=8000, model_options={}, debug=False, meta=False, no_serve=False):
"""Run the model and start listening for HTTP requests on the network.
By default, the server will run on port ``8000`` and listen on all
network interfaces (``0.0.0.0``).
.. code-block:: python
import runway
# ... setup an initialization function with @runway.setup()
# ... define a command or two with @runway.command()
# now it's time to run the model server which actually sets up the
# routes and handles the HTTP requests.
if __name__ == "__main__":
runway.run()
``runway.run()`` acts as the entrypoint to the runway module. You should
call it as the last thing in your ``runway_model.py``, once you've
defined a ``@runway.setup()`` function and one or more
``@runway.command()`` functions.
:param host: The IP address to bind the HTTP server to, defaults to
``"0.0.0.0"`` (all interfaces). This value will be overwritten by the
``RW_HOST`` environment variable if it is present.
:type host: string, optional
:param port: The port to bind the HTTP server to, defaults to ``8000``.
This value will be overwritten by the ``RW_PORT`` environment
variable if it is present.
:type port: int, optional
:param model_options: The model options that are passed to
``@runway.setup()`` during initialization, defaults to ``{}``. This
value will be overwritten by the ``RW_MODEL_OPTIONS`` environment
variable if it is present.
:type model_options: dict, optional
:param debug: Whether to run the Flask HTTP server in debug mode, which
enables live reloading, defaults to ``False``. This value will be
overwritten by the ``RW_DEBUG`` environment variable if it is
present.
:type debug: boolean, optional
:param meta: Print the model's options and commands as JSON and exit,
defaults to ``False``. This functionality is used in a production
environment to dynamically discover the interface presented by
the Runway model at runtime. This value will be overwritten by the
``RW_META`` environment variable if it is present.
:type meta: boolean, optional
:param no_serve: Don't start the Flask server, defaults to ``False``
(i.e. the Flask server is started by default when the
``runway.run()`` function is called without setting this argument
set to True). This functionality is used during automated testing to
mock HTTP requests using Flask's ``app.test_client()``
(see Flask's testing_ docs for more details).
:type meta: boolean, optional
.. _testing: http://flask.pocoo.org/docs/1.0/testing/
.. warning::
All keyword arguments to the ``runway.run()`` function will be
overwritten by environment variables when your model is run by the
Runway app. Using the default values for these arguments, or
supplying your own in python code, is fine so long as you are aware
of the fact that their values will be overwritten by the following
environment variables at runtime in a production environment:
- ``RW_HOST``: Defines the IP address to bind the HTTP server to.
This environment variable overwrites any value passed as the
``host`` keyword argument.
- ``RW_PORT``: Defines the port to bind the HTTP server to. This
environment variable overwrites any value passed as the ``port``
keyword argument.
- ``RW_MODEL_OPTIONS``: Defines the model options that are passed to
``@runway.setup()`` during initialization. This environment
variable overwrites any value passed as the ``model_options``
keyword argument.
- ``RW_DEBUG``: Defines whether to run the Flask HTTP server in
debug mode, which enables live reloading. This environment
variable overwrites any value passed as the ``debug`` keyword
argument. ``RW_DEBUG=1`` enables debug mode.
- ``RW_META``: Defines the behavior of the ``runway.run()``
function. If ``RW_META=1`` the function prints the model's options
and commands as JSON and then exits. This environment variable
overwrites any value passed as the ``meta`` keyword argument.
- ``RW_NO_SERVE``: Forces ``runway.run()`` to not start its Flask
server. This environment variable overwrites any value passed as
the ``no_serve`` keyword argument.
"""
env_host = os.getenv('RW_HOST')
env_port = os.getenv('RW_PORT')
env_meta = os.getenv('RW_META')
env_debug = os.getenv('RW_DEBUG')
env_no_serve = os.getenv('RW_NO_SERVE')
env_model_options = os.getenv('RW_MODEL_OPTIONS')
if env_host is not None:
host = env_host
if env_port is not None:
port = int(env_port)
if env_meta is not None:
meta = bool(int(env_meta))
if env_debug is not None:
debug = bool(int(env_debug))
if env_no_serve is not None:
no_serve = bool(int(env_no_serve))
if env_model_options is not None:
model_options = json.loads(env_model_options)
if meta:
print(json.dumps(dict(
options=[opt.to_dict() for opt in self.options],
commands=[serialize_command(cmd) for cmd in self.commands.values()]
)))
return
print('Setting up model...')
try:
self.setup_model(model_options)
except RunwayError as err:
err.print_exception()
sys.exit(1)
# start the run started at millis timer even if we don't actually serve
self.millis_run_started_at = timestamp_millis()
if no_serve:
print('Not starting model server because "no_serve" directive is present.')
else:
print('Starting model server at http://{0}:{1}...'.format(host, port))
if debug:
logging.basicConfig(level=logging.DEBUG)
self.app.debug = True
self.app.run(host=host, port=port, debug=True, threaded=True)
else:
http_server = WSGIServer((host, port), self.app)
try:
http_server.serve_forever()
except KeyboardInterrupt:
print('Stopping server...')
| 43.811594 | 104 | 0.592033 |
acf337761e1eeb17eb51ecee615757ea9c3b75d6 | 2,883 | py | Python | algorithm/vowel_consonant_heuristic.py | GMKenny/ipass_scrabble | 18a953e9bac6cc197266c7234f38296027c2e23d | [
"MIT"
] | null | null | null | algorithm/vowel_consonant_heuristic.py | GMKenny/ipass_scrabble | 18a953e9bac6cc197266c7234f38296027c2e23d | [
"MIT"
] | null | null | null | algorithm/vowel_consonant_heuristic.py | GMKenny/ipass_scrabble | 18a953e9bac6cc197266c7234f38296027c2e23d | [
"MIT"
] | 1 | 2022-03-14T14:53:01.000Z | 2022-03-14T14:53:01.000Z |
class VowelConsonant:
def __init__(self):
"""Heuristic strategy: place words that are have either more vowels of consonants based on the letters
remaining in the rack_tiles"""
# Row below not needed but given for refrence of constants
# constants ["b", "c", "d", "f", "g", "h", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "v", "w", "x", "y", "z"]
self.vowel = ["a", "o", "i", "u", "e"]
def determine_vowel_consonant_move(self, rack_tiles, all_words):
"""
for every letter in rack_tiles determine the amount of vowels and consonants.
for every word in all_words determine the amount of vowels and consonants.
sort the lists with vowels and consonants on highest value.
if there are more more vowels in the count of rack_tiles return a list with the vowel words first.
else return a list with the consonants words first.
:param rack_tiles: List of letters.
:param all_words: List of possible words.
:return: a list with words orders by consonants and vowel amount.
"""
current_vowel_count, current_consonant_count = self._vowel_consonant_count(rack_tiles)
vowels, consonanten = self._get_count_dict_vowel_consonant(all_words)
vowel_word_list = list(sorted(vowels, key=vowels.get, reverse=True))
consonant_word_list = list(sorted(consonanten, key=consonanten.get, reverse=True))
if current_vowel_count + 1 > current_consonant_count:
return vowel_word_list + consonant_word_list
else:
return consonant_word_list + vowel_word_list
def _get_count_dict_vowel_consonant(self, possible_word_list):
"""
for every word in possible_word_list determine the amount of vowels and consonants.
:param possible_word_list: list of possible words.
:return: two dictionary's with the count of the vowels and consonant.
"""
best_vowel_word = {}
best_consonant_word = {}
for word in possible_word_list:
best_vowel_word[word], best_consonant_word[word] = self._vowel_consonant_count([char for char in word])
return best_vowel_word, best_consonant_word
def _vowel_consonant_count(self, letter_list):
"""
for every letter in letter_list determine the amount of vowels and consonants.
if current letter is a vowel add 1 to vowel count else letter must be a consonants add 1 to consonants count
:param letter_list: List of letters.
:return: two int the count of vowels and consonants
"""
vowel_count = 0
consonants_count = 0
for letter in letter_list:
if letter in self.vowel:
vowel_count += 1
else:
consonants_count += 1
return vowel_count, consonants_count
| 43.681818 | 130 | 0.658342 |
acf33786f2ad0ec04d7bfdd030f98299e0ebd74e | 369 | py | Python | templatetags/cartoview_cms_public_display.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | templatetags/cartoview_cms_public_display.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | templatetags/cartoview_cms_public_display.py | cartologic/cartoview_cms | 3f21a092c90db3d4560d69c1c2a6c7843b23ea0e | [
"BSD-2-Clause"
] | null | null | null | from django import template
from ..models.generic_module.GenericModule import GenericModule
register = template.Library()
@register.inclusion_tag('cartoview_cms/templatetags/cartoview_cms_public_display.html')
def cartoview_cms_display_modules():
modules = None
modules = GenericModule.objects.filter(public_display=True).all()
return {'modules': modules} | 36.9 | 87 | 0.807588 |
acf33860c460744b7834a60301eda8b9940efa39 | 8,222 | py | Python | tests/integration/states/test_archive.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_archive.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | tests/integration/states/test_archive.py | nielsk/salt | be5d400d903e68d99c216fd63a7146d86a64a55d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Tests for the archive state
'''
# Import python libs
from __future__ import absolute_import
import errno
import logging
import os
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import skip_if_not_root, Webserver
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.paths import FILES
# Import salt libs
import salt.utils
import salt.utils.files
# Setup logging
log = logging.getLogger(__name__)
if salt.utils.is_windows():
ARCHIVE_DIR = os.path.join('c:/', 'tmp')
else:
ARCHIVE_DIR = '/tmp/archive'
ARCHIVE_NAME = 'custom.tar.gz'
ARCHIVE_TAR_SOURCE = 'http://localhost:{0}/{1}'.format(9999, ARCHIVE_NAME)
ARCHIVE_LOCAL_TAR_SOURCE = 'file://{0}'.format(os.path.join(FILES, 'file', 'base', ARCHIVE_NAME))
UNTAR_FILE = os.path.join(ARCHIVE_DIR, 'custom/README')
ARCHIVE_TAR_HASH = 'md5=7643861ac07c30fe7d2310e9f25ca514'
ARCHIVE_TAR_BAD_HASH = 'md5=d41d8cd98f00b204e9800998ecf8427e'
class ArchiveTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the archive state
'''
@classmethod
def setUpClass(cls):
cls.webserver = Webserver()
cls.webserver.start()
cls.archive_tar_source = cls.webserver.url('custom.tar.gz')
@classmethod
def tearDownClass(cls):
cls.webserver.stop()
def setUp(self):
self._clear_archive_dir()
def tearDown(self):
self._clear_archive_dir()
@staticmethod
def _clear_archive_dir():
try:
salt.utils.files.rm_rf(ARCHIVE_DIR)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
def _check_extracted(self, path):
'''
function to check if file was extracted
'''
log.debug('Checking for extracted file: %s', path)
self.assertTrue(os.path.isfile(path))
def test_archive_extracted_skip_verify(self):
'''
test archive.extracted with skip_verify
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source, archive_format='tar',
skip_verify=True)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_archive_extracted_with_source_hash(self):
'''
test archive.extracted without skip_verify
only external resources work to check to
ensure source_hash is verified correctly
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
@skip_if_not_root
def test_archive_extracted_with_root_user_and_group(self):
'''
test archive.extracted with user and group set to "root"
'''
r_group = 'root'
if salt.utils.is_darwin():
r_group = 'wheel'
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH,
user='root', group=r_group)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_archive_extracted_with_strip_in_options(self):
'''
test archive.extracted with --strip in options
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source,
source_hash=ARCHIVE_TAR_HASH,
options='--strip=1',
enforce_toplevel=False)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(os.path.join(ARCHIVE_DIR, 'README'))
def test_archive_extracted_with_strip_components_in_options(self):
'''
test archive.extracted with --strip-components in options
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source,
source_hash=ARCHIVE_TAR_HASH,
options='--strip-components=1',
enforce_toplevel=False)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(os.path.join(ARCHIVE_DIR, 'README'))
def test_archive_extracted_without_archive_format(self):
'''
test archive.extracted with no archive_format option
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source,
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_archive_extracted_with_cmd_unzip_false(self):
'''
test archive.extracted using use_cmd_unzip argument as false
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=self.archive_tar_source,
source_hash=ARCHIVE_TAR_HASH,
use_cmd_unzip=False,
archive_format='tar')
log.debug('ret = %s', ret)
if 'Timeout' in ret:
self.skipTest('Timeout talking to local tornado server.')
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_local_archive_extracted(self):
'''
test archive.extracted with local file
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar')
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_local_archive_extracted_skip_verify(self):
'''
test archive.extracted with local file, bad hash and skip_verify
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_BAD_HASH, skip_verify=True)
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_local_archive_extracted_with_source_hash(self):
'''
test archive.extracted with local file and valid hash
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_HASH)
log.debug('ret = %s', ret)
self.assertSaltTrueReturn(ret)
self._check_extracted(UNTAR_FILE)
def test_local_archive_extracted_with_bad_source_hash(self):
'''
test archive.extracted with local file and bad hash
'''
ret = self.run_state('archive.extracted', name=ARCHIVE_DIR,
source=ARCHIVE_LOCAL_TAR_SOURCE, archive_format='tar',
source_hash=ARCHIVE_TAR_BAD_HASH)
log.debug('ret = %s', ret)
self.assertSaltFalseReturn(ret)
| 34.838983 | 97 | 0.612746 |
acf3394405970ce3f2c32b0ae64a7e2096f71512 | 14,913 | py | Python | terra_sdk/client/lcd/api/tx.py | cloutier/terra.py | 55e0f9c6a643f6f208eb686b397aafa3d1009629 | [
"MIT"
] | null | null | null | terra_sdk/client/lcd/api/tx.py | cloutier/terra.py | 55e0f9c6a643f6f208eb686b397aafa3d1009629 | [
"MIT"
] | null | null | null | terra_sdk/client/lcd/api/tx.py | cloutier/terra.py | 55e0f9c6a643f6f208eb686b397aafa3d1009629 | [
"MIT"
] | null | null | null | import base64
import copy
import urllib.parse
from typing import List, Optional, Union, Dict
from multidict import CIMultiDict
import attr
from ..params import APIParams
from terra_sdk.core import AccAddress, Coins, Dec, Numeric, PublicKey
from terra_sdk.core.broadcast import (
AsyncTxBroadcastResult,
BlockTxBroadcastResult,
SyncTxBroadcastResult,
)
from terra_sdk.core.msg import Msg
from terra_sdk.core.block import Block
from terra_sdk.core.tx import AuthInfo, Fee, SignerData, SignMode, Tx, TxBody, TxInfo
from terra_sdk.util.hash import hash_amino
from terra_sdk.util.json import JSONSerializable
from . import tendermint
from ._base import BaseAsyncAPI, sync_bind
__all__ = ["AsyncTxAPI", "TxAPI", "BroadcastOptions", "CreateTxOptions"]
@attr.s
class SignerOptions:
""" SignerOptions specifies infomations about signers
Args:
address (AccAddress): address of the signer
sequence (int, optional): nonce of the messages from the signer
public_key (PublicKey, optional): signer's PublicKey
"""
address: AccAddress = attr.ib()
sequence: Optional[int] = attr.ib(default=None)
public_key: Optional[PublicKey] = attr.ib(default=None)
@attr.s
class CreateTxOptions:
"""
Args:
msgs (List[Msg]): list of messages to include
fee (Optional[Fee], optional): transaction fee. If ``None``, will be estimated.
See more on `fee estimation`_.
memo (str, optional): optional short string to include with transaction.
gas_prices (Coins.Input, optional): gas prices for fee estimation.
gas_adjustment (Numeric.Input, optional): gas adjustment for fee estimation.
fee_denoms (List[str], optional): list of denoms to use for fee after estimation.
account_number (int, optional): account number (overrides blockchain query if
provided)
sequence (int, optional): sequence (overrides blockchain qu ery if provided)
timeout_height (int, optional): specifies a block timeout height to prevent the tx from being committed past a certain height.
sign_mode: (SignMode, optional): SignMode.SIGN_MODE_DIRECT by default. multisig needs SignMode.SIGN_MODE_LEGACY_AMINO_JSON.
"""
msgs: List[Msg] = attr.ib()
fee: Optional[Fee] = attr.ib(default=None)
memo: Optional[str] = attr.ib(default=None)
gas: Optional[str] = attr.ib(default=None)
gas_prices: Optional[Coins.Input] = attr.ib(default=None)
gas_adjustment: Optional[Numeric.Output] = attr.ib(
default=1, converter=Numeric.parse
)
fee_denoms: Optional[List[str]] = attr.ib(default=None)
account_number: Optional[int] = attr.ib(default=None)
sequence: Optional[int] = attr.ib(default=None)
timeout_height: Optional[int] = attr.ib(default=None)
sign_mode: Optional[SignMode] = attr.ib(default=None)
@attr.s
class BroadcastOptions:
sequences: Optional[List[int]] = attr.ib()
fee_granter: Optional[AccAddress] = attr.ib(default=None)
""" deprecated
@attr.s
class TxSearchOption:
key: str = attr.ib()
value: Union[str, int] = attr.ib()
"""
@attr.s
class GasInfo:
gas_wanted: int = attr.ib(converter=int)
gas_used: int = attr.ib(converter=int)
@attr.s
class EventAttribute:
key: str = attr.ib()
value: str = attr.ib()
@attr.s
class Event:
type: str = attr.ib()
attributes: List[EventAttribute] = attr.ib(converter=list)
@attr.s
class SimulateResult:
data: str = attr.ib()
log: str = attr.ib()
events: List[Event] = attr.ib(converter=list)
@attr.s
class SimulateResponse(JSONSerializable):
gas_info: GasInfo = attr.ib()
result: SimulateResult = attr.ib()
@classmethod
def from_data(cls, data: dict):
return cls(gas_info=data["gas_info"], result=data["result"])
class AsyncTxAPI(BaseAsyncAPI):
async def tx_info(self, tx_hash: str) -> Tx:
"""Fetches information for an included transaction given a tx hash.
Args:
tx_hash (str): hash of transaction to lookup
Returns:
TxInfo: transaction info
"""
res = await self._c._get(f"/cosmos/tx/v1beta1/txs/{tx_hash}")
return TxInfo.from_data(res)
async def create(
self, signers: List[SignerOptions], options: CreateTxOptions
) -> Tx:
"""Create a new unsigned transaction, with helpful utilities such as lookup of
chain ID, account number, sequence and fee estimation.
Args:
sender (AccAddress): transaction sender's account address
msgs (List[Msg]): list of messages to include
fee (Optional[Fee], optional): fee to use (estimates if empty).
memo (str, optional): memo to use. Defaults to "".
gas_prices (Optional[Coins.Input], optional): gas prices for fee estimation.
gas_adjustment (Optional[Numeric.Input], optional): gas adjustment for fee estimation.
fee_denoms (Optional[List[str]], optional): list of denoms to use for gas fee when estimating.
account_number (Optional[int], optional): account number to use.
sequence (Optional[int], optional): sequence number to use.
Returns:
Tx: unsigned tx
"""
opt = copy.deepcopy(options)
signerData: List[SignerData] = []
for signer in signers:
seq = signer.sequence
pubkey = signer.public_key
if seq is None or pubkey is None:
acc = await BaseAsyncAPI._try_await(
self._c.auth.account_info(signer.address)
)
if seq is None:
seq = acc.get_sequence()
if pubkey is None:
pubkey = acc.get_public_key()
signerData.append(SignerData(seq, pubkey))
# create the fake fee
if opt.fee is None:
opt.fee = await BaseAsyncAPI._try_await(self.estimate_fee(signerData, opt))
return Tx(
TxBody(opt.msgs, opt.memo or "", opt.timeout_height or 0),
AuthInfo([], opt.fee),
"",
)
async def estimate_fee(
self, signers: List[SignerOptions], options: CreateTxOptions
) -> Fee:
"""Estimates the proper fee to apply by simulating it within the node.
Args:
signers ([SignerOptions]): signers
options (CreateTxOptions): transaction info to estimate fee
Returns:
Fee: estimated fee
"""
gas_prices = options.gas_prices or self._c.gas_prices
gas_adjustment = options.gas_adjustment or self._c.gas_adjustment
gas_prices_coins = None
if gas_prices:
gas_prices_coins = Coins(gas_prices)
if options.fee_denoms:
_fee_denoms = options.fee_denoms if options.fee_denoms else ['uusd']
gas_prices_coins = gas_prices_coins.filter(
lambda c: c.denom in _fee_denoms
)
tx_body = TxBody(messages=options.msgs, memo=options.memo or "")
emptyCoins = Coins()
emptyFee = Fee(0, emptyCoins)
auth_info = AuthInfo([], emptyFee)
tx = Tx(tx_body, auth_info, [])
tx.append_empty_signatures(signers)
gas = options.gas
if gas is None or gas == "auto" or gas == 0:
opt = copy.deepcopy(options)
opt.gas_adjustment = gas_adjustment
gas = str(self.estimate_gas(tx, opt))
fee_amount = gas_prices_coins.mul(gas).to_int_coins() if gas_prices_coins else Coins.from_str('0uusd')
return Fee(Numeric.parse(gas), fee_amount, "", "")
async def estimate_gas(self, tx: Tx, options: Optional[CreateTxOptions]) -> int:
gas_adjustment = options.gas_adjustment if options else self._c.gas_adjustment
res = await self._c._post(
"/cosmos/tx/v1beta1/simulate", {"tx_bytes": self.encode(tx)}
)
simulated = SimulateResponse.from_data(res)
return int(Dec(gas_adjustment).mul(simulated.gas_info["gas_used"]))
async def encode(self, tx: Tx) -> str:
"""Encode a Tx to base64 encoded proto string"""
return base64.b64encode(tx.to_proto().SerializeToString()).decode()
async def decode(self, tx: str) -> Tx:
"""Decode base64 encoded proto string to a Tx"""
return Tx.from_bytes(base64.b64decode(tx))
async def hash(self, tx: Tx) -> str:
"""Compute hash for a transaction.
Args:
tx (Tx): transaction to hash
Returns:
str: transaction hash
"""
amino = await self.encode(tx)
return hash_amino(amino)
async def _broadcast(
self, tx: Tx, mode: str, options: BroadcastOptions = None
) -> dict:
data = {"tx_bytes": self.encode(tx), "mode": mode}
return await self._c._post("/cosmos/tx/v1beta1/txs", data) # , raw=True)
async def broadcast_sync(
self, tx: Tx, options: BroadcastOptions = None
) -> SyncTxBroadcastResult:
"""Broadcasts a transaction using the ``sync`` broadcast mode.
Args:
tx (Tx): transaction to broadcast
options (BroadcastOptions): broacast options, optional
Returns:
SyncTxBroadcastResult: result
"""
res = await self._broadcast(tx, "BROADCAST_MODE_SYNC", options)
return SyncTxBroadcastResult(
txhash=res.get("txhash"),
raw_log=res.get("raw_log"),
code=res.get("code"),
codespace=res.get("codespace"),
)
async def broadcast_async(
self, tx: Tx, options: BroadcastOptions = None
) -> AsyncTxBroadcastResult:
"""Broadcasts a transaction using the ``async`` broadcast mode.
Args:
tx (Tx): transaction to broadcast
options (BroadcastOptions): broacast options, optional
Returns:
AsyncTxBroadcastResult: result
"""
res = await self._broadcast(tx, "BROADCAST_MODE_ASYNC", options)
return AsyncTxBroadcastResult(
txhash=res.get("txhash"),
)
async def broadcast(
self, tx: Tx, options: BroadcastOptions = None
) -> BlockTxBroadcastResult:
"""Broadcasts a transaction using the ``block`` broadcast mode.
Args:
tx (Tx): transaction to broadcast
options (BroadcastOptions): broacast options, optional
Returns:
BlockTxBroadcastResult: result
"""
res = await self._broadcast(tx, "BROADCAST_MODE_BLOCK", options)
res = res["tx_response"]
return BlockTxBroadcastResult(
height=res.get("height") or 0,
txhash=res.get("txhash"),
raw_log=res.get("raw_log"),
gas_wanted=res.get("gas_wanted") or 0,
gas_used=res.get("gas_used") or 0,
logs=res.get("logs"),
code=res.get("code"),
codespace=res.get("codespace"),
)
async def search(self, events: List[list], params: Optional[APIParams] = None) -> dict:
"""Searches for transactions given criteria.
Args:
events (dict): dictionary containing options
params (APIParams): optional parameters
Returns:
dict: transaction search results
"""
actual_params = CIMultiDict()
for event in events:
if event[0] == "tx.height":
actual_params.add("events", f"{event[0]}={event[1]}")
else:
actual_params.add("events", f"{event[0]}='{event[1]}'")
if params:
for p in params:
actual_params.add(p, params[p])
res = await self._c._get("/cosmos/tx/v1beta1/txs", actual_params)
return {
"txs": [TxInfo.from_data(tx) for tx in res.get("tx_responses")],
"pagination": res.get("pagination")
}
async def tx_infos_by_height(self, height: Optional[int] = None) -> List[TxInfo]:
"""Fetches information for an included transaction given block height or latest
Args:
height (int, optional): height to lookup. latest if height is None.
Returns:
List[TxInfo]: transaction info
"""
if height is None:
x = "latest"
else:
x = height
res = await self._c._get(f"/cosmos/base/tendermint/v1beta1/blocks/{x}")
txs = res.get("block").get("data").get("txs")
if len(txs) <= 0:
return []
return [self.decode(tx) for tx in txs]
class TxAPI(AsyncTxAPI):
@sync_bind(AsyncTxAPI.tx_info)
def tx_info(self, tx_hash: str) -> TxInfo:
pass
tx_info.__doc__ = AsyncTxAPI.tx_info.__doc__
@sync_bind(AsyncTxAPI.create)
def create(self, signers: List[SignerOptions], options: CreateTxOptions) -> Tx:
pass
create.__doc__ = AsyncTxAPI.create.__doc__
@sync_bind(AsyncTxAPI.estimate_fee)
def estimate_fee(
self, signers: List[SignerOptions], options: CreateTxOptions
) -> Fee:
pass
estimate_fee.__doc__ = AsyncTxAPI.estimate_fee.__doc__
@sync_bind(AsyncTxAPI.estimate_gas)
def estimate_gas(
self, tx: Tx, options: Optional[CreateTxOptions]
) -> SimulateResponse:
pass
estimate_gas.__doc__ = AsyncTxAPI.estimate_gas.__doc__
@sync_bind(AsyncTxAPI.encode)
def encode(self, tx: Tx) -> str:
pass
encode.__doc__ = AsyncTxAPI.encode.__doc__
@sync_bind(AsyncTxAPI.decode)
def decode(self, tx: str) -> Tx:
pass
decode.__doc__ = AsyncTxAPI.decode.__doc__
@sync_bind(AsyncTxAPI.hash)
def hash(self, tx: Tx) -> str:
pass
hash.__doc__ = AsyncTxAPI.hash.__doc__
@sync_bind(AsyncTxAPI.broadcast_sync)
def broadcast_sync(
self, tx: Tx, options: BroadcastOptions = None
) -> SyncTxBroadcastResult:
pass
broadcast_sync.__doc__ = AsyncTxAPI.broadcast_sync.__doc__
@sync_bind(AsyncTxAPI.broadcast_async)
def broadcast_async(
self, tx: Tx, options: BroadcastOptions = None
) -> AsyncTxBroadcastResult:
pass
broadcast_async.__doc__ = AsyncTxAPI.broadcast_async.__doc__
@sync_bind(AsyncTxAPI.broadcast)
def broadcast(
self, tx: Tx, options: BroadcastOptions = None
) -> BlockTxBroadcastResult:
pass
broadcast.__doc__ = AsyncTxAPI.broadcast.__doc__
@sync_bind(AsyncTxAPI.search)
def search(self, events: List[list], params: Optional[APIParams] = None) -> dict:
pass
search.__doc__ = AsyncTxAPI.search.__doc__
@sync_bind(AsyncTxAPI.tx_infos_by_height)
def tx_infos_by_height(self, height: Optional[int] = None) -> List[TxInfo]:
pass
tx_infos_by_height.__doc__ = AsyncTxAPI.tx_infos_by_height.__doc__
| 32.703947 | 135 | 0.630591 |
acf339a22c277f201eed39bfe4f8da466ffe2f95 | 54,106 | py | Python | utils/build_swift/build_swift/driver_arguments.py | httpgit12/jb4evea-16 | 48652077d89b20fc8fa9678749992f93e4d1526e | [
"Apache-2.0"
] | 15 | 2019-09-05T14:56:40.000Z | 2021-06-06T14:09:23.000Z | utils/build_swift/build_swift/driver_arguments.py | snehaa1989/swift | c56ae1e33640efc15cc6ebfcb7bf67297f1b12a3 | [
"Apache-2.0"
] | 8 | 2020-08-29T22:47:59.000Z | 2020-09-20T03:24:15.000Z | utils/build_swift/build_swift/driver_arguments.py | snehaa1989/swift | c56ae1e33640efc15cc6ebfcb7bf67297f1b12a3 | [
"Apache-2.0"
] | 2 | 2019-10-15T03:02:04.000Z | 2019-12-28T05:04:29.000Z | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', toggle_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], toggle_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], toggle_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], toggle_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], toggle_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], toggle_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], toggle_true('build_swiftformat'),
help='build swift-format')
option(['--swiftevolve'], toggle_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
toggle_true('install_playgroundsupport'),
help='install playground support')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], toggle_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
option(['--min-size-release'], store('build_variant'),
const='MinSizeRel',
help='build the MinSizeRel variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-clean-swiftpm', toggle_false('clean_swiftpm'),
help='skip cleaning up swiftpm')
option('--skip-clean-swift-driver', toggle_false('clean_swift_driver'),
help='skip cleaning up Swift driver')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
option('--enable-experimental-concurrency', toggle_true,
default=True,
help='Enable experimental Swift concurrency model.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
| 40.62012 | 79 | 0.625698 |
acf339eb4ae4775f8411ab216dbda15d97f3f7c8 | 829 | py | Python | engine/core/run.py | pianomanx/pwndora | d3f676a05d000035fd1e2552fcdb390c887b5edb | [
"Apache-2.0"
] | 112 | 2022-01-06T22:12:00.000Z | 2022-03-28T02:11:38.000Z | engine/core/run.py | pianomanx/pwndora | d3f676a05d000035fd1e2552fcdb390c887b5edb | [
"Apache-2.0"
] | 4 | 2022-01-23T09:47:24.000Z | 2022-03-28T01:15:26.000Z | engine/core/run.py | pianomanx/pwndora | d3f676a05d000035fd1e2552fcdb390c887b5edb | [
"Apache-2.0"
] | 27 | 2021-12-29T16:14:49.000Z | 2022-03-27T18:09:27.000Z | from core.threadscan import Threadscan
from core.api import submit_report
from datetime import datetime
from config import Config
from loguru import logger
def launch_scanner(targets, threads, timeout, top_ports, all_ports, custom):
Discover = Threadscan(targets)
if top_ports:
Discover.set_ports(Config.TOP_PORTS)
elif all_ports:
Discover.set_ports(Config.PORTS)
elif custom:
Discover.set_ports(custom)
else:
logger.info("Please set port scan mode")
exit()
start = datetime.now()
Discover.start_threads(threads, timeout)
end = datetime.now()
exec_time = end-start
logger.info("Execution time: {}".format(exec_time))
found = Discover.get_total_found()
total = Discover.get_total()
submit_report(total,found,exec_time,start,end)
| 24.382353 | 76 | 0.708082 |
acf33b9449ce8a72d21e9ccb7599843ce44b1d1c | 17,000 | py | Python | intersight/model/virtualization_base_virtual_disk.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/virtualization_base_virtual_disk.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/virtualization_base_virtual_disk.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
from intersight.model.cloud_aws_volume import CloudAwsVolume
from intersight.model.cloud_base_volume import CloudBaseVolume
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.virtualization_base_source_device import VirtualizationBaseSourceDevice
from intersight.model.virtualization_base_virtual_disk_all_of import VirtualizationBaseVirtualDiskAllOf
from intersight.model.virtualization_iwe_virtual_disk import VirtualizationIweVirtualDisk
from intersight.model.virtualization_vmware_virtual_disk import VirtualizationVmwareVirtualDisk
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
globals()['CloudAwsVolume'] = CloudAwsVolume
globals()['CloudBaseVolume'] = CloudBaseVolume
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['VirtualizationBaseSourceDevice'] = VirtualizationBaseSourceDevice
globals()['VirtualizationBaseVirtualDiskAllOf'] = VirtualizationBaseVirtualDiskAllOf
globals()['VirtualizationIweVirtualDisk'] = VirtualizationIweVirtualDisk
globals()['VirtualizationVmwareVirtualDisk'] = VirtualizationVmwareVirtualDisk
class VirtualizationBaseVirtualDisk(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'CLOUD.AWSVOLUME': "cloud.AwsVolume",
'VIRTUALIZATION.IWEVIRTUALDISK': "virtualization.IweVirtualDisk",
'VIRTUALIZATION.VMWAREVIRTUALDISK': "virtualization.VmwareVirtualDisk",
},
('object_type',): {
'CLOUD.AWSVOLUME': "cloud.AwsVolume",
'VIRTUALIZATION.IWEVIRTUALDISK': "virtualization.IweVirtualDisk",
'VIRTUALIZATION.VMWAREVIRTUALDISK': "virtualization.VmwareVirtualDisk",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'name': (str,), # noqa: E501
'size': (int,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'cloud.AwsVolume': CloudAwsVolume,
'cloud.BaseVolume': CloudBaseVolume,
'virtualization.IweVirtualDisk': VirtualizationIweVirtualDisk,
'virtualization.VmwareVirtualDisk': VirtualizationVmwareVirtualDisk,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'name': 'Name', # noqa: E501
'size': 'Size', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""VirtualizationBaseVirtualDisk - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): Name of the storage disk. Name must be unique within a Datastore.. [optional] # noqa: E501
size (int): Disk size represented in bytes.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
VirtualizationBaseSourceDevice,
VirtualizationBaseVirtualDiskAllOf,
],
'oneOf': [
],
}
| 53.797468 | 1,678 | 0.651059 |
acf33bba526d80c296d6374cfc1a2e7978e3c133 | 1,943 | py | Python | main.py | mashais/python_lesson_2 | f3ea51270be7cf6dc618235e0472aa80243c8002 | [
"MIT"
] | null | null | null | main.py | mashais/python_lesson_2 | f3ea51270be7cf6dc618235e0472aa80243c8002 | [
"MIT"
] | null | null | null | main.py | mashais/python_lesson_2 | f3ea51270be7cf6dc618235e0472aa80243c8002 | [
"MIT"
] | null | null | null | # lesson_2_homework
# Задачи на циклы и операторы условия
#Задача 1
# Вывести на экран циклом 5 строк из нулей, причем каждая строка должна быть пронумерована
for i in range(1, 5 + 1):
print(i,'000000000')
# Задача 2
# Пользователь в цикле вводит 10 цифр. Найти количество введенных пользователем цифр 5.
print('Input Digits')
d = 5
count = 0
for i in range(1, 10 + 1):
m = int(input('Digit ' + str(i) + ':'))
while m > 0:
if m%10 == d:
count += 1
m = m // 10
print((count), ('Digits 5'))
# Задача 3
#Найти сумму чисел от 1 до 100. Полученный результат вывести на экран.
sum = 0
for i in range(1,101):
sum+=i
print(sum)
# Задача 4
# Найти произведение ряда чисел от 1 до 10. Полученный результат вывести на экран.
mult = 1
for i in range(1,11):
mult*=i
print(mult)
# Задача 5
# Вывести цифры числа на каждой строчке.
integer_number = 2129
while integer_number>0:
print(integer_number%10)
integer_number = integer_number//10
# Задача 6
# Найти сумму цифр числа.
print('Input Number:')
n = input()
sum = 0
for i in n:
sum += int(i)
print(sum)
# Задача 7
# Найти произведение цифр числа.
print('Input Number:')
n = input()
mult = 1
for i in n:
mult = mult * int(i)
print(mult)
# Задача 8
#Дать ответ на вопрос: есть ли среди цифр числа 5?
integer_number = 297363
while integer_number>0:
if integer_number%10 == 5:
print('Yes')
break
integer_number = integer_number//10
else:print('No')
# Задача 9
# Найти максимальную цифру в числе
a = int(input('Input number:'))
m = a%10
a = a // 10
while a> 0:
if a%10 > m:
m = a%10
a = a//10
print (m)
# Задача 10
# Найти количество цифр 5 в числе
integer_number = 55599
d = 5
count = 0
while integer_number > 0:
if integer_number%10 == d:
count += 1
integer_number = integer_number // 10
print(count, 'Digit 5') | 14.946154 | 90 | 0.629439 |
acf33c3d108f523f528cd118814abd58c6de5e3c | 17,403 | py | Python | src/cbuild/hooks/pre_pkg/099_scriptlets.py | chimera-linux/cports | 37888f8ded3e6afc1a42227a4019bc25e2456afc | [
"BSD-2-Clause"
] | 46 | 2021-06-10T02:27:32.000Z | 2022-03-27T11:33:24.000Z | src/cbuild/hooks/pre_pkg/099_scriptlets.py | chimera-linux/cports | 37888f8ded3e6afc1a42227a4019bc25e2456afc | [
"BSD-2-Clause"
] | 58 | 2021-07-03T13:58:20.000Z | 2022-03-13T16:45:35.000Z | src/cbuild/hooks/pre_pkg/099_scriptlets.py | chimera-linux/cports | 37888f8ded3e6afc1a42227a4019bc25e2456afc | [
"BSD-2-Clause"
] | 6 | 2021-07-04T10:46:40.000Z | 2022-01-09T00:03:59.000Z | from cbuild.core import paths, template
import io
import re
import shlex
import shutil
import pathlib
import subprocess
# fallback python version when we cannot determine it
def _get_pyver(pkg):
rv = template.read_pkg(
"python", pkg.rparent.profile().arch,
True, False, 1, False, False, None,
resolve = pkg.rparent, ignore_missing = True, ignore_errors = True
)
if not rv:
pkg.error("failed getting python version")
# the full version
pv = rv.pkgver
# reduce to just major/minor
ld = pv.rfind(".")
if ld > 0:
spv = pv[0:ld]
if spv.find(".") < 0:
return ld
else:
return spv
# should be impossible
pkg.error(f"invalid python version ({pv})")
# hooks for xml/sgml registration
_xml_register_entries = r"""
local sgml_catalog=/etc/sgml/auto/catalog
local xml_catalog=/etc/xml/auto/catalog
[ -n "${sgml_entries}" -a ! -f "${sgml_catalog}" ] && return 0
[ -n "${xml_entries}" -a ! -f "${xml_catalog}" ] && return 0
if [ -n "${sgml_entries}" ]; then
echo -n "Registering SGML catalog entries... "
set -- ${sgml_entries}
while [ $# -gt 0 ]; do
/usr/bin/xmlcatmgr -sc ${sgml_catalog} add "$1" "$2" "$3"
shift; shift; shift;
done
echo "done."
fi
if [ -n "${xml_entries}" ]; then
echo -n "Registering XML catalog entries... "
set -- ${xml_entries}
while [ $# -gt 0 ]; do
/usr/bin/xmlcatmgr -c ${xml_catalog} add "$1" "$2" "$3"
shift; shift; shift;
done
echo "done."
fi
"""
_xml_unregister_entries = r"""
local sgml_catalog=/etc/sgml/auto/catalog
local xml_catalog=/etc/xml/auto/catalog
[ -n "${sgml_entries}" -a ! -f "${sgml_catalog}" ] && return 0
[ -n "${xml_entries}" -a ! -f "${xml_catalog}" ] && return 0
if [ -n "${sgml_entries}" ]; then
echo -n "Unregistering SGML catalog entries... "
set -- ${sgml_entries}
while [ $# -gt 0 ]; do
/usr/bin/xmlcatmgr -sc ${sgml_catalog} remove "$1" "$2" \
2>/dev/null
shift; shift; shift
done
echo "done."
fi
if [ -n "${xml_entries}" ]; then
echo -n "Unregistering XML catalog entries... "
set -- ${xml_entries}
while [ $# -gt 0 ]; do
/usr/bin/xmlcatmgr -c ${xml_catalog} remove "$1" "$2" \
2>/dev/null
shift; shift; shift
done
echo "done."
fi
"""
# hooks for account setup
_acct_setup = r"""
local USERADD USERMOD
[ -z "$system_users" -a -z "$system_groups" ] && return 0
if command -v useradd >/dev/null 2>&1; then
USERADD="useradd"
fi
if command -v usermod >/dev/null 2>&1; then
USERMOD="usermod"
fi
show_acct_details() {
echo " Account: $1"
echo " Description: '$2'"
echo " Homedir: '$3'"
echo " Shell: '$4'"
[ -n "$5" ] && echo " Additional groups: '$5'"
}
group_add() {
local _pretty_grname _grname _gid
if ! command -v groupadd >/dev/null 2>&1; then
echo "WARNING: cannot create $1 system group (missing groupadd)"
echo "The following group must be created manually: $1"
return 0
fi
_grname="${1%:*}"
_gid="${1##*:}"
[ "${_grname}" = "${_gid}" ] && _gid=
_pretty_grname="${_grname}${_gid:+ (gid: ${_gid})}"
groupadd -r ${_grname} ${_gid:+-g ${_gid}} >/dev/null 2>&1
case $? in
0) echo "Created ${_pretty_grname} system group." ;;
9) ;;
*) echo "ERROR: failed to create system group ${_pretty_grname}!"; return 1;;
esac
return 0
}
# System groups required by a package.
for grp in ${system_groups}; do
group_add $grp || return 1
done
# System user/group required by a package.
for acct in ${system_users}; do
_uname="${acct%:*}"
_uid="${acct##*:}"
[ "${_uname}" = "${_uid}" ] && _uid=
eval homedir="\$${_uname}_homedir"
eval shell="\$${_uname}_shell"
eval descr="\$${_uname}_descr"
eval groups="\$${_uname}_groups"
eval pgroup="\$${_uname}_pgroup"
[ -z "$homedir" ] && homedir="/var/empty"
[ -z "$shell" ] && shell="/usr/bin/nologin"
[ -z "$descr" ] && descr="${_uname} user"
[ -n "$groups" ] && user_groups="-G $groups"
if [ -n "${_uid}" ]; then
use_id="-u ${_uid} -g ${pgroup:-${_uid}}"
_pretty_uname="${_uname} (uid: ${_uid})"
else
use_id="-g ${pgroup:-${_uname}}"
_pretty_uname="${_uname}"
fi
if [ -z "$USERADD" -o -z "$USERMOD" ]; then
echo "WARNING: cannot create ${_uname} system account (missing useradd or usermod)"
echo "The following system account must be created:"
show_acct_details "${_pretty_uname}" "${descr}" "${homedir}" "${shell}" "${groups}"
continue
fi
group_add ${pgroup:-${acct}} || return 1
${USERADD} -c "${descr}" -d "${homedir}" \
${use_id} ${pgroup:+-N} -s "${shell}" \
${user_groups} -r ${_uname} >/dev/null 2>&1
case $? in
0)
echo "Created ${_pretty_uname} system user."
${USERMOD} -L ${_uname} >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "WARNING: unable to lock password for ${_uname} system account"
fi
;;
9)
${USERMOD} -c "${descr}" -d "${homedir}" \
-s "${shell}" -g "${pgroup:-${_uname}}" \
${user_groups} ${_uname} >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Updated ${_uname} system user."
else
echo "WARNING: unable to modify ${_uname} system account"
echo "Please verify that account is compatible with these settings:"
show_acct_details "${_pretty_uname}" \
"${descr}" "${homedir}" "${shell}" "${groups}"
continue
fi
;;
*)
echo "ERROR: failed to create system user ${_pretty_uname}!"
return 1
;;
esac
done
"""
_acct_drop = r"""
local USERMOD
[ -z "$system_users" ] && return 0
if command -v usermod >/dev/null 2>&1; then
USERMOD="usermod"
fi
for acct in ${system_users}; do
_uname="${acct%:*}"
comment="$( (getent passwd "${_uname}" | cut -d: -f5 | head -n1) 2>/dev/null )"
comment="${comment:-user} - removed package ${1}"
if [ -z "$USERMOD" ]; then
echo "WARNING: cannot disable ${_uname} system user (missing usermod)"
continue
fi
${USERMOD} -L -d /var/empty -s /usr/bin/false \
-c "${comment}" ${_uname} >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Disabled ${_uname} system user."
fi
done
"""
# python bytecode hooks
_py_compile = r"""
[ ! -x /usr/bin/python${pycompile_version} ] && return 0
[ -z "${pycompile_dirs}" -a -z "${pycompile_module}" ] && return 0
for f in ${pycompile_dirs}; do
echo "Byte-compiling python code in ${f}..."
python${pycompile_version} -m compileall -f -q ./${f} && \
python${pycompile_version} -O -m compileall -f -q ./${f}
done
for f in ${pycompile_module}; do
echo "Byte-compiling python${pycompile_version} code for module ${f}..."
if [ -d "usr/lib/python${pycompile_version}/site-packages/${f}" ]; then
python${pycompile_version} -m compileall -f -q \
usr/lib/python${pycompile_version}/site-packages/${f} && \
python${pycompile_version} -O -m compileall -f -q \
usr/lib/python${pycompile_version}/site-packages/${f}
else
python${pycompile_version} -m compileall -f -q \
usr/lib/python${pycompile_version}/site-packages/${f} && \
python${pycompile_version} -O -m compileall -f -q \
usr/lib/python${pycompile_version}/site-packages/${f}
fi
done
"""
_py_remove = r"""
[ ! -x /usr/bin/python${pycompile_version} ] && return 0
[ -z "${pycompile_dirs}" -a -z "${pycompile_module}" ] && return 0
for f in ${pycompile_dirs}; do
echo "Removing byte-compiled python${pycompile_version} files in ${f}..."
find ./${f} -type f -name \*.py[co] -delete 2>&1 >/dev/null
find ./${f} -type d -name __pycache__ -delete 2>&1 >/dev/null
done
for f in ${pycompile_module}; do
echo "Removing byte-compiled python${pycompile_version} code for module ${f}..."
if [ -d usr/lib/python${pycompile_version}/site-packages/${f} ]; then
find usr/lib/python${pycompile_version}/site-packages/${f} \
-type f -name \*.py[co] -delete 2>&1 >/dev/null
find usr/lib/python${pycompile_version}/site-packages/${f} \
-type d -name __pycache__ -delete 2>&1 >/dev/null
else
rm -f usr/lib/python${pycompile_version}/site-packages/${f%.py}.py[co]
fi
done
"""
# all known hook scriptlets
_hookscripts = {
"xml_catalog": {
"post-install": _xml_register_entries,
"post-upgrade": _xml_register_entries,
"pre-deinstall": _xml_unregister_entries,
"pre-upgrade": _xml_unregister_entries,
},
"system_accounts": {
"pre-install": _acct_setup,
"pre-upgrade": _acct_setup,
"post-deinstall": _acct_drop,
},
"pycompile": {
"post-install": _py_compile,
"post-upgrade": _py_compile,
"pre-upgrade": _py_remove,
"pre-deinstall": _py_remove,
}
}
def _handle_catalogs(pkg, _add_hook):
sgml_entries = []
xml_entries = []
catvars = {}
for ent in pkg.sgml_entries:
if not isinstance(ent, tuple) or len(ent) != 3:
pkg.error("invalid SGML catalog entry")
sgml_entries.append(ent)
for ent in pkg.xml_entries:
if not isinstance(ent, tuple) or len(ent) != 3:
pkg.error("invalid XML catalog entry")
xml_entries.append(ent)
for catalog in pkg.sgml_catalogs:
sgml_entries.append(("CATALOG", catalog, "--"))
for catalog in pkg.xml_catalogs:
xml_entries.append(("nextCatalog", catalog, "--"))
if len(sgml_entries) > 0 or len(xml_entries) > 0:
if len(sgml_entries) > 0:
catvars["sgml_entries"] = " ".join(
map(lambda v: " ".join(v), sgml_entries)
)
if len(xml_entries) > 0:
catvars["xml_entries"] = " ".join(
map(lambda v: " ".join(v), xml_entries)
)
# fire
_add_hook("xml_catalog", catvars)
def _handle_accounts(pkg, _add_hook):
# handle system groups
if len(pkg.system_groups) > 0:
_add_hook("system_accounts", {
"system_groups": " ".join(pkg.system_groups)
})
# handle system users
if len(pkg.system_users) > 0:
evars = {}
usrs = []
for u in pkg.system_users:
uname = None
uid = None
uhome = "/var/empty"
ushell = "/usr/bin/nologin"
udesc = None
ugroups = []
# TODO: validation
if isinstance(u, dict):
uname = u["name"]
uid = u["id"]
# the form can be with or without id
if uid:
usrs.append(f"{uname}:{uid}")
else:
usrs.append(uname)
# optional fields
if "home" in u:
evars[f"{uname}_homedir"] = u["home"]
if "shell" in u:
evars[f"{uname}_shell"] = u["shell"]
if "desc" in u:
evars[f"{uname}_descr"] = u["desc"]
if "groups" in u:
evars[f"{uname}_groups"] = ",".join(u["groups"])
if "pgroup" in u:
evars[f"{uname}_pgroup"] = u["pgroup"]
else:
usrs.append(u)
# add the main var
evars["system_users"] = " ".join(usrs)
# add the hook
_add_hook("system_accounts", evars)
def _handle_python(pkg, _add_hook):
pyver = None
pymods = []
# python modules
for d in (pkg.destdir / "usr/lib").glob("python*"):
# weird?
if not d.is_dir():
continue
# dig up python version from the dir
vn = d.name[len("python"):]
# also weird, but skip
if not re.match(r"^[0-9]\.[0-9]+$", vn):
continue
# no site-packages, skip
d = d / "site-packages"
if not d.is_dir():
continue
# we know a version, make sure there are no multiples
if pyver:
pkg.error(f"multiple Python versions found ({pyver} and {vn})")
pyver = vn
if len(pkg.pycompile_modules) == 0:
# generate implicit
for f in d.iterdir():
# eliminate whatever we don't want
if f.match("*.egg-info"):
continue
elif f.match("*.dist-info"):
continue
elif f.match("*.so"):
continue
elif f.match("*.pth"):
continue
elif f.name == "README.txt":
continue
# should be ok now
pymods.append(f.name)
else:
pymods = pkg.pycompile_modules
if len(pymods) > 0 or len(pkg.pycompile_dirs) > 0:
# version may not be obvious, in those cases figure it out
if not pyver:
pyver = _get_pyver(pkg)
# export vars
pyvars = {
"pycompile_version": pyver
}
# dirs
if len(pkg.pycompile_dirs) > 0:
# validate first
for d in pkg.pycompile_dirs:
d = pathlib.Path(d)
# must not be absolute
if d.is_absolute():
pkg.error("absolute pycompile_dirs specified")
# must exist
if not (pkg.destdir / d).is_dir():
pkg.error("non-existent pycompile_dirs specified")
# put into vars
pyvars["pycompile_dirs"] = " ".join(pkg.pycompile_dirs)
# modules
if len(pymods) > 0:
pyvars["pycompile_module"] = " ".join(pymods)
# add the hook
_add_hook("pycompile", pyvars)
def invoke(pkg):
# base
_hooks = {
"pre-install": "",
"pre-upgrade": "",
"pre-deinstall": "",
"post-install": "",
"post-upgrade": "",
"post-deinstall": "",
"trigger": ""
}
# executable hooks to invoke
_reghooks = {}
def _add_hook(hookn, evars):
if hookn in _reghooks:
_reghooks[hookn].update(evars)
else:
_reghooks[hookn] = evars
# handle individual hooks
_handle_accounts(pkg, _add_hook)
_handle_catalogs(pkg, _add_hook)
_handle_python(pkg, _add_hook)
hookpath = paths.distdir() / "main/apk-chimera-hooks/files"
# add executable scriptlets
for h in _reghooks:
envs = _reghooks[h]
# go through every target
for tgt in _hookscripts[h]:
if not tgt in _hooks:
# this should never happen unless we are buggy
pkg.error(f"unknown hook: {tgt}")
# export env vars for the hook
for e in envs:
_hooks[tgt] += f"{e}={shlex.quote(envs[e])}\n"
# export the scriptlet as function
_hooks[tgt] += f"\n_{h}_invoke() " + "{\n"
for l in io.StringIO(_hookscripts[h][tgt]):
# empty lines
if len(l.strip()) == 0:
_hooks[tgt] += "\n"
continue
# add the line, indent as needed
_hooks[tgt] += f" {l.rstrip()}\n"
# end the function
_hooks[tgt] += " return 0\n}\n"
# insert the hook
pkg.log(f"added hook '{h}' for scriptlet '{tgt}'")
_hooks[tgt] += f"_{h}_invoke '{pkg.pkgname}' '{pkg.pkgver}'" + \
" || exit $?\n"
# add user scriptlets
for h in _hooks:
up = pkg.rparent.template_path / f"{pkg.pkgname}.{h}"
if up.is_file():
# read entire thing into the buffer
sr = up.read_text()
# strip shebang
if sr.startswith("#!"):
nl = sr.find("\n")
if nl < 0:
# no newline found so it was just a comment
sr = ""
else:
sr = sr[nl + 1:].strip()
# append cleared up scriptlet
if len(sr) > 0:
_hooks[h] += "# package script\n"
_hooks[h] += "set -e\n\n"
_hooks[h] += sr
# log
pkg.log(f"added package scriptlet '{h}'")
# set up scriptlet dir
scdir = pkg.statedir / "scriptlets"
if scdir.is_dir():
# remove potential leftovers for this package
for sc in scdir.glob(f"{pkg.pkgname}.*"):
sc.unlink()
else:
scdir.mkdir()
# generate
for h in _hooks:
s = _hooks[h].strip()
# got nothing, do not generate
if len(s) == 0:
continue
# for triggers, ensure we trigger on something
if h == "trigger" and len(pkg.triggers) == 0:
pkg.error("trigger scriptlet provided but no triggers")
# create file
with open(scdir / f"{pkg.pkgname}.{h}", "w") as sf:
sf.write("#!/bin/sh\n\n")
sf.write(s)
sf.write("\n")
| 31.076786 | 91 | 0.529334 |
acf33c4b09bb3b808bdd50780be6ec53f217b6a8 | 2,007 | py | Python | blog_project/urls.py | mireille1999/Instagram | d3b08b74f0d2613ed113b57f52f9e3d957a6a498 | [
"Unlicense"
] | 13 | 2021-01-04T20:51:42.000Z | 2022-03-19T11:49:45.000Z | blog_project/urls.py | mireille1999/Instagram | d3b08b74f0d2613ed113b57f52f9e3d957a6a498 | [
"Unlicense"
] | 7 | 2020-12-29T11:31:44.000Z | 2022-03-12T00:53:07.000Z | blog_project/urls.py | mireille1999/Instagram | d3b08b74f0d2613ed113b57f52f9e3d957a6a498 | [
"Unlicense"
] | 1 | 2022-01-12T04:46:29.000Z | 2022-01-12T04:46:29.000Z | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from users import views as user_views
admin.site.site_header = settings.ADMIN_SITE_HEADER
urlpatterns = [
path('admin/site/', admin.site.urls),
path('', include('blog.urls', namespace='blog')),
path('account/register/', user_views.register, name='register'),
path('account/profile-update/',user_views.updateProfile,name='profile-update'),
path('account/follow-unfollow/<int:pk>/',user_views.userFollowUnfollow,name="follow-unfollow"),
path('account/password-change/', user_views.change_password, name='change-password'),
path('account/login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('account/logout/',auth_views.LogoutView.as_view(template_name='users/logout.html'),name='logout'),
path('account/password-reset/',auth_views.PasswordResetView.as_view(template_name='users/password_reset.html'),name='password_reset'),
path('account/password-reset/done/',auth_views.PasswordResetDoneView.as_view(template_name='users/password_reset_done.html'),name='password_reset_done'),
path('account/password-reset-confirm/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(template_name='users/password_reset_confirm.html'),name='password_reset_confirm'),
path('account/password-reset-complete/',auth_views.PasswordResetCompleteView.as_view(template_name='users/password_reset_complete.html'),name='password_reset_complete'),
path('account/community/', include('footer.urls', namespace='community')),
path('<str:username>/', user_views.profile, name='profile'),
path('validate/username/', user_views.validate_username,name='validate-username'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
handler404 = 'footer.views.error_404_view' | 62.71875 | 188 | 0.772795 |
acf33e530328cdbb18eb1c20fec3f8ee3c18adbb | 956 | py | Python | src/u_xx_2_CD.py | mbarzegary/finite-element-intro | 47ef0a3592b823ae71a874ee35850114f16b6d8b | [
"MIT"
] | 8 | 2021-01-26T13:18:02.000Z | 2022-02-14T15:20:11.000Z | src/u_xx_2_CD.py | mbarzegary/finite-element-intro | 47ef0a3592b823ae71a874ee35850114f16b6d8b | [
"MIT"
] | null | null | null | src/u_xx_2_CD.py | mbarzegary/finite-element-intro | 47ef0a3592b823ae71a874ee35850114f16b6d8b | [
"MIT"
] | 2 | 2021-08-05T23:14:15.000Z | 2021-10-05T10:22:29.000Z | from sympy import *
x, C, D = symbols('x C D')
i, j = symbols('i j', integer=True, positive=True)
psi_i = (1-x)**(i+1)
psi_j = psi_i.subs(i, j)
integrand = diff(psi_i, x)*diff(psi_j, x)
integrand = simplify(integrand)
A_ij = integrate(integrand, (x, 0, 1))
A_ij = simplify(A_ij)
print(('A_ij:', A_ij))
f = 2
b_i = integrate(f*psi_i, (x, 0, 1)) - \
integrate(diff(D*x, x)*diff(psi_i, x), (x, 0, 1)) - \
C*psi_i.subs(x, 0)
b_i = simplify(b_i)
print(('b_i:', b_i))
N = 1
A = zeros(N+1, N+1)
b = zeros(N+1)
print(('fresh b:', b))
for r in range(N+1):
for s in range(N+1):
A[r,s] = A_ij.subs(i, r).subs(j, s)
b[r,0] = b_i.subs(i, r)
print(('A:', A))
print(('b:', b[:,0]))
c = A.LUsolve(b)
print(('c:', c[:,0]))
u = sum(c[r,0]*psi_i.subs(i, r) for r in range(N+1)) + D*x
print(('u:', simplify(u)))
print(("u'':", simplify(diff(u, x, x))))
print(('BC x=0:', simplify(diff(u, x).subs(x, 0))))
print(('BC x=1:', simplify(u.subs(x, 1))))
| 28.117647 | 59 | 0.553347 |
acf33f5f84acaf13eb256561689e14a504fb29cc | 7,064 | py | Python | gdtoolkit/formatter/__main__.py | Aareon/godot-gdscript-toolkit | 92a729aa21202c0faec59b913b73414a4e5a6f38 | [
"MIT"
] | null | null | null | gdtoolkit/formatter/__main__.py | Aareon/godot-gdscript-toolkit | 92a729aa21202c0faec59b913b73414a4e5a6f38 | [
"MIT"
] | null | null | null | gdtoolkit/formatter/__main__.py | Aareon/godot-gdscript-toolkit | 92a729aa21202c0faec59b913b73414a4e5a6f38 | [
"MIT"
] | null | null | null | """GDScript formatter
Uncompromising GDScript code formatter. The only configurable thing is
max line length allowed. The rest will be taken care of by gdformat in a one,
consistent way.
Usage:
gdformat <path>... [options]
Options:
-c --check Don't write the files back,
just check if formatting is possible.
-l --line-length=<int> How many characters per line to allow.
[default: 100]
-h --help Show this screen.
--version Show version.
Examples:
echo 'tool' | gdformat - # reads from STDIN
"""
import sys
import pkg_resources
import os
from typing import List
from docopt import docopt
from gdtoolkit.formatter import format_code, check_formatting_safety
from gdtoolkit.parser import parser
def find_files_from(paths: List[str]) -> List[str]:
"""Finds files in the list of paths and walks directories recursively to find
gdscript files.
Returns a list of file paths.
"""
files = []
excluded_directories = {".git"}
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
dirnames[:] = [d for d in dirnames if d not in excluded_directories]
files += [
os.path.join(dirpath, f) for f in filenames if f.endswith(".gd")
]
else:
files.append(path)
return files
# TODO: refa & tests
# pylint: disable=too-many-statements
def main():
arguments = docopt(
__doc__,
version="gdformat {}".format(
pkg_resources.get_distribution("gdtoolkit").version
),
)
files: List[str] = find_files_from(arguments["<path>"])
line_length = int(arguments["--line-length"])
if files == ["-"]:
code = sys.stdin.read()
code_parse_tree = parser.parse(code, gather_metadata=True)
comment_parse_tree = parser.parse_comments(code)
formatted_code = format_code(
gdscript_code=code,
max_line_length=line_length,
parse_tree=code_parse_tree,
comment_parse_tree=comment_parse_tree,
)
check_formatting_safety(
code,
formatted_code,
max_line_length=line_length,
given_code_parse_tree=code_parse_tree,
given_code_comment_parse_tree=comment_parse_tree,
)
print(formatted_code, end="")
elif arguments["--check"]:
formattable_files = set()
for file_path in files:
with open(file_path, "r") as fh:
code = fh.read()
try:
code_parse_tree = parser.parse(code, gather_metadata=True)
comment_parse_tree = parser.parse_comments(code)
formatted_code = format_code(
gdscript_code=code,
max_line_length=line_length,
parse_tree=code_parse_tree,
comment_parse_tree=comment_parse_tree,
)
except Exception as e:
print(
"exception during formatting of {}".format(file_path),
file=sys.stderr,
)
raise e
if code != formatted_code:
print("would reformat {}".format(file_path), file=sys.stderr)
try:
check_formatting_safety(
code,
formatted_code,
max_line_length=line_length,
given_code_parse_tree=code_parse_tree,
given_code_comment_parse_tree=comment_parse_tree,
)
except Exception as e:
print(
"exception during formatting of {}".format(file_path),
file=sys.stderr,
)
raise e
formattable_files.add(file_path)
if len(formattable_files) == 0:
print(
"{} file{} would be left unchanged".format(
len(files), "s" if len(files) != 1 else ""
)
)
sys.exit(0)
formattable_num = len(formattable_files)
left_unchanged_num = len(files) - formattable_num
print(
"{} file{} would be reformatted, {} file{} would be left unchanged.".format(
formattable_num,
"s" if formattable_num != 1 else "",
left_unchanged_num,
"s" if left_unchanged_num != 1 else "",
),
file=sys.stderr,
)
sys.exit(1)
else:
formatted_files = set()
for file_path in files:
with open(file_path, "r+") as fh:
code = fh.read()
try:
code_parse_tree = parser.parse(code, gather_metadata=True)
comment_parse_tree = parser.parse_comments(code)
formatted_code = format_code(
gdscript_code=code,
max_line_length=line_length,
parse_tree=code_parse_tree,
comment_parse_tree=comment_parse_tree,
)
except Exception as e:
print(
"exception during formatting of {}".format(file_path),
file=sys.stderr,
)
raise e
if code != formatted_code:
try:
check_formatting_safety(
code,
formatted_code,
max_line_length=line_length,
given_code_parse_tree=code_parse_tree,
given_code_comment_parse_tree=comment_parse_tree,
)
except Exception as e:
print(
"exception during formatting of {}".format(file_path),
file=sys.stderr,
)
raise e
print("reformatted {}".format(file_path))
formatted_files.add(file_path)
fh.seek(0)
fh.truncate(0)
fh.write(formatted_code)
reformatted_num = len(formatted_files)
left_unchanged_num = len(files) - reformatted_num
print(
"{} file{} reformatted, {} file{} left unchanged.".format(
reformatted_num,
"s" if reformatted_num != 1 else "",
left_unchanged_num,
"s" if left_unchanged_num != 1 else "",
)
)
if __name__ == "__main__":
main()
| 36.984293 | 88 | 0.502831 |
acf34080b4922d66315e9e91a6798a3c1d9b3580 | 305 | py | Python | scrapeconfig_server/scrapeconfig_api/jsonapi/registry.py | MikeKMiller/ScraperEngine | 45b14c74a4edf304ad51b02feb51d2e3c03c6c1a | [
"BSD-3-Clause"
] | null | null | null | scrapeconfig_server/scrapeconfig_api/jsonapi/registry.py | MikeKMiller/ScraperEngine | 45b14c74a4edf304ad51b02feb51d2e3c03c6c1a | [
"BSD-3-Clause"
] | null | null | null | scrapeconfig_server/scrapeconfig_api/jsonapi/registry.py | MikeKMiller/ScraperEngine | 45b14c74a4edf304ad51b02feb51d2e3c03c6c1a | [
"BSD-3-Clause"
] | null | null | null | from scrapeconfig_orm.exceptions import ImproperlyConfigured
__all__ = [
'schema',
]
schemas = {}
def get_schema(schema_type):
try:
return schemas[schema_type]
except KeyError:
raise ImproperlyConfigured(
u"No schema for type '{}' exists".format(schema_type))
| 17.941176 | 66 | 0.672131 |
acf34093a52702a70632f48a3ac962158512bf93 | 35,196 | py | Python | Server/src/virtualenv/Lib/codecs.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/codecs.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | Server/src/virtualenv/Lib/codecs.py | ppyordanov/HCI_4_Future_Cities | 4dc7dc59acccf30357bde66524c2d64c29908de8 | [
"MIT"
] | null | null | null | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
""" # "
import __builtin__, sys
# ## Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = '\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = '\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = '\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % (
self.__class__.__module__, self.__class__.__name__, self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
self.buffer = "" # unencoded input that is kept between calls to encode()
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete byte
sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
self.buffer = "" # undecoded input that is kept between calls to decode()
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = ""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = ""
# For str->str decoding this will stay a str
# For str->unicode decoding the first read will promote it to unicode
self.charbuffer = ""
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines) <= 1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(False)[0]
return line
readsize = size or 72
line = ""
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if data.endswith("\r"):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = "".join(lines[1:]) + self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = ""
self.charbuffer = u""
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def next(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def next(self):
""" Return the next decoded line from the input stream."""
return self.reader.next()
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None:
if 'U' in mode:
# No automatic conversion of '\n' is done on reading and writing
mode = mode.strip().replace('U', '')
if mode[:1] not in set('rwa'):
mode = 'r' + mode
if 'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode("", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i] = i
return res
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k, v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| 32.289908 | 87 | 0.620213 |
acf3416deed416f3dbe10f782f2a29f5c1b02acf | 18,956 | py | Python | ui_tests/caseworker/conftest.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | ui_tests/caseworker/conftest.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | ui_tests/caseworker/conftest.py | django-doctor/lite-frontend | 330ff9575fd22d7c4c42698ac2d653244e6180d6 | [
"MIT"
] | null | null | null | from django.utils import timezone
from pytest_bdd import given, when, then, parsers
import time
from ui_tests.caseworker.pages.advice import FinalAdvicePage, TeamAdvicePage
from ui_tests.caseworker.pages.case_page import CasePage, CaseTabs
from ui_tests.caseworker.pages.goods_queries_pages import StandardGoodsReviewPages, OpenGoodsReviewPages
from caseworker.core.constants import DATE_FORMAT
from ui_tests.caseworker.fixtures.env import environment # noqa
from ui_tests.caseworker.fixtures.add_a_flag import ( # noqa
add_case_flag,
add_good_flag,
add_organisation_flag,
add_destination_flag,
get_flag_of_level,
)
from ui_tests.caseworker.fixtures.add_queue import add_queue # noqa
from ui_tests.caseworker.fixtures.add_a_document_template import ( # noqa
add_a_document_template,
get_template_id,
)
from ui_tests.caseworker.fixtures.add_a_picklist import ( # noqa
add_a_letter_paragraph_picklist,
add_an_ecju_query_picklist,
add_a_proviso_picklist,
add_a_standard_advice_picklist,
add_a_report_summary_picklist,
)
from ui_tests.caseworker.pages.advice import UserAdvicePage
from ui_tests.caseworker.pages.generate_decision_documents_page import GeneratedDecisionDocuments
from ui_tests.caseworker.pages.generate_document_page import GeneratedDocument
from ui_tests.caseworker.pages.give_advice_pages import GiveAdvicePages
from ui_tests.caseworker.pages.good_country_matrix_page import GoodCountryMatrixPage
from ui_tests.caseworker.pages.grant_licence_page import GrantLicencePage
from ui_tests.caseworker.pages.letter_templates import LetterTemplates
from ui_tests.caseworker.pages.shared import Shared
from tests_common import functions
from tests_common.fixtures.apply_for_application import * # noqa
from tests_common.fixtures.driver import driver # noqa
from tests_common.fixtures.sso_sign_in import sso_sign_in # noqa
from tests_common.fixtures.core import ( # noqa
context,
api_test_client,
exporter_info,
internal_info,
api_client,
)
from tests_common.fixtures.urls import internal_url, sso_sign_in_url, api_url # noqa
import tests_common.tools.helpers as utils
from ui_tests.caseworker.pages.case_list_page import CaseListPage
from ui_tests.caseworker.pages.application_page import ApplicationPage
@when("I go to the internal homepage") # noqa
def when_go_to_internal_homepage(driver, internal_url): # noqa
driver.get(internal_url)
@given("I go to internal homepage") # noqa
def go_to_internal_homepage(driver, internal_url): # noqa
driver.get(internal_url)
@given("I sign in to SSO or am signed into SSO") # noqa
def sign_into_sso(driver, sso_sign_in): # noqa
pass
@when("I go to application previously created") # noqa
def click_on_created_application(driver, context, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/queues/00000000-0000-0000-0000-000000000001/cases/" + context.case_id)
@given("I create standard application or standard application has been previously created") # noqa
def create_app(driver, apply_for_standard_application): # noqa
pass
@given("I create open application or open application has been previously created") # noqa
def create_open_app(driver, apply_for_open_application): # noqa
pass
@when("I click continue") # noqa
def i_click_continue(driver): # noqa
Shared(driver).click_submit()
# handle case when scenario clicks submit in consecutive steps: there is a race condition resulting in the same
# submit button being clicked for each step
time.sleep(5)
@when("I click change status") # noqa
def click_post_note(driver): # noqa
case_page = CasePage(driver)
case_page.change_tab(CaseTabs.DETAILS)
case_page.click_change_status()
@when(parsers.parse('I select status "{status}" and save')) # noqa
def select_status_save(driver, status, context): # noqa
application_page = ApplicationPage(driver)
application_page.select_status(status)
context.status = status
context.date_time_of_update = utils.get_formatted_date_time_h_m_pm_d_m_y()
Shared(driver).click_submit()
@when("I click on new queue in dropdown") # noqa
@when("I click on edited queue in dropdown") # noqa
def queue_shown_in_dropdown(driver, context): # noqa
CaseListPage(driver).click_on_queue_name(context.queue_name)
@when("I go to queues") # noqa
def go_to_queues(driver, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/queues/manage/")
@when("I add case to newly created queue") # noqa
def move_case_to_new_queue(driver, context): # noqa
ApplicationPage(driver).click_move_case_button()
if not driver.find_element_by_id(context.queue_name.replace(" ", "-")).is_selected():
driver.find_element_by_id(context.queue_name.replace(" ", "-")).click()
Shared(driver).click_submit()
@given("I create report summary picklist") # noqa
def add_report_summary_picklist(add_a_report_summary_picklist): # noqa
pass
@then("I see previously created application") # noqa
def see_queue_in_queue_list(driver, context): # noqa
case_page = CaseListPage(driver)
functions.try_open_filters(driver)
case_page.click_clear_filters_button()
case_page = CaseListPage(driver)
functions.try_open_filters(driver)
case_page.filter_by_case_reference(context.reference_code)
functions.click_apply_filters(driver)
assert driver.find_element_by_id(context.case_id).is_displayed()
@when("I show filters") # noqa
def i_show_filters(driver): # noqa
Shared(driver).try_open_filters()
@when("I go to users") # noqa
def go_to_users(driver, sso_sign_in, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/users/")
@given("I create a clc query") # noqa
def create_clc_query(driver, apply_for_clc_query, context): # noqa
pass
@when("I go to the case list page") # noqa
def case_list_page(driver, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/queues/00000000-0000-0000-0000-000000000001/")
@then("I should see my case in the cases list") # noqa
def case_in_cases_list(driver, context): # noqa
context.case_row = CaseListPage(driver).get_case_row(context.case_id)
assert context.reference_code in context.case_row.text
@then("I should see my case SLA") # noqa
def case_sla(driver, context): # noqa
assert CaseListPage(driver).get_case_row_sla(context.case_row) == "0"
@then("I see the case page") # noqa
def i_see_the_case_page(driver, context): # noqa
assert context.reference_code in driver.find_element_by_id(ApplicationPage.HEADING_ID).text
@when("I go to users") # noqa
def go_to_users(driver, sso_sign_in, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/users/")
@given("an Exhibition Clearance is created") # noqa
def an_exhibition_clearance_is_created(driver, apply_for_exhibition_clearance): # noqa
pass
@when("I combine all team advice") # noqa
def combine_all_advice(driver): # noqa
TeamAdvicePage(driver).click_combine_advice()
@when("I finalise the advice") # noqa
def finalise(driver): # noqa
CasePage(driver).change_tab(CaseTabs.FINAL_ADVICE)
FinalAdvicePage(driver).click_finalise()
@when("I select the template previously created") # noqa
def selected_created_template(driver, context): # noqa
GeneratedDocument(driver).click_letter_template(context.document_template_id)
Shared(driver).click_submit()
@when("I go to the documents tab") # noqa
def click_documents(driver): # noqa
CasePage(driver).change_tab(CaseTabs.DOCUMENTS)
@when("I click I'm done") # noqa
def im_done_button(driver): # noqa
ApplicationPage(driver).click_im_done_button()
@when("I go to my work queue") # noqa
def work_queue(driver, context, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/queues/" + context.queue_id)
@then("my case is not in the queue") # noqa
def my_case_not_in_queue(driver, context): # noqa
assert context.case_id not in Shared(driver).get_text_of_cases_form()
@given("a queue has been created") # noqa
def create_queue(context, api_test_client): # noqa
api_test_client.queues.add_queue(f"queue {utils.get_formatted_date_time_y_m_d_h_s()}")
context.queue_id = api_test_client.context["queue_id"]
context.queue_name = api_test_client.context["queue_name"]
@given(parsers.parse('I "{decision}" all elements of the application at user and team level')) # noqa
def approve_application_objects(context, api_test_client, decision): # noqa
context.advice_type = decision
text = "abc"
note = ""
footnote_required = "False"
data = [
{
"type": context.advice_type,
"text": text,
"note": note,
"end_user": context.end_user["id"],
"footnote_required": footnote_required,
},
{
"type": context.advice_type,
"text": text,
"note": note,
"consignee": context.consignee["id"],
"footnote_required": footnote_required,
},
{
"type": context.advice_type,
"text": text,
"note": note,
"good": context.good_id,
"footnote_required": footnote_required,
},
]
api_test_client.cases.create_user_advice(context.case_id, data)
api_test_client.cases.create_team_advice(context.case_id, data)
@when("I go to the final advice page by url") # noqa
def final_advice_page(driver, context, internal_url): # noqa
driver.get(
internal_url.rstrip("/")
+ "/queues/00000000-0000-0000-0000-000000000001/cases/"
+ context.case_id
+ "/final-advice/"
)
@when("I click edit flags link") # noqa
def click_edit_case_flags_link(driver): # noqa
CasePage(driver).click_change_case_flags()
@given(parsers.parse('the status is set to "{status}"')) # noqa
def set_status(api_test_client, context, status): # noqa
api_test_client.applications.set_status(context.app_id, status)
@given("case has been moved to new Queue") # noqa
def assign_case_to_queue(api_test_client): # noqa
api_test_client.cases.assign_case_to_queue()
@given("all flags are removed") # noqa
def remove_all_flags(context, api_test_client): # noqa
api_test_client.flags.assign_case_flags(context.case_id, [])
@when("I add a new queue") # noqa
def add_a_queue(driver, context, add_queue): # noqa
pass
@then("I see my autogenerated application form") # noqa
def generated_document(driver, context): # noqa
latest_document = GeneratedDocument(driver).get_latest_document()
assert "Application Form" in latest_document.text
assert GeneratedDocument(driver).check_download_link_is_present(latest_document)
@when(
parsers.parse('I respond "{controlled}", "{control_list_entry}", "{report}", "{comment}" and click submit')
) # noqa
def click_continue(driver, controlled, control_list_entry, report, comment, context): # noqa
is_standard = "SIEL" in context.reference_code
controlled = controlled == "yes"
query_page = StandardGoodsReviewPages(driver) if is_standard else OpenGoodsReviewPages(driver)
query_page.click_is_good_controlled(controlled)
query_page.type_in_to_control_list_entry(control_list_entry)
context.goods_control_list_entry = control_list_entry
query_page.enter_ars(report)
context.report = report
query_page.enter_a_comment(comment)
context.comment = comment
query_page.click_submit()
@then("the status has been changed in the application") # noqa
def audit_trail_updated(driver, context, internal_info, internal_url): # noqa
ApplicationPage(driver).go_to_cases_activity_tab(internal_url, context)
assert (
context.status.lower() in Shared(driver).get_audit_trail_text().lower()
), "status has not been shown as approved in audit trail"
@given("I create a proviso picklist") # noqa
def i_create_an_proviso_picklist(context, add_a_proviso_picklist): # noqa
context.proviso_picklist_name = add_a_proviso_picklist["name"]
context.proviso_picklist_question_text = add_a_proviso_picklist["text"]
@given("I create a standard advice picklist") # noqa
def i_create_an_standard_advice_picklist(context, add_a_standard_advice_picklist): # noqa
context.standard_advice_query_picklist_name = add_a_standard_advice_picklist["name"]
context.standard_advice_query_picklist_question_text = add_a_standard_advice_picklist["text"]
@when("I click on the user advice tab") # noqa
def i_click_on_view_advice(driver, context): # noqa
CasePage(driver).change_tab(CaseTabs.USER_ADVICE)
@when("I select all items in the user advice view") # noqa
def click_items_in_advice_view(driver, context): # noqa
context.number_of_advice_items_clicked = UserAdvicePage(driver).click_on_all_checkboxes()
@when(parsers.parse("I choose to '{option}' the licence")) # noqa
def choose_advice_option(driver, option, context): # noqa
GiveAdvicePages(driver).click_on_advice_option(option)
context.advice_data = []
@when(parsers.parse("I import text from the '{option}' picklist")) # noqa
def import_text_advice(driver, option, context): # noqa
GiveAdvicePages(driver).click_on_import_link(option)
text = GiveAdvicePages(driver).get_text_of_picklist_item()
context.advice_data.append(text)
GiveAdvicePages(driver).click_on_picklist_item(option)
@when(parsers.parse("I write '{text}' in the note text field")) # noqa
def write_note_text_field(driver, text, context): # noqa
GiveAdvicePages(driver).type_in_additional_note_text_field(text)
context.advice_data.append(text)
@when(parsers.parse("I select that a footnote is not required")) # noqa
def write_note_text_field(driver, text, context): # noqa
GiveAdvicePages(driver).select_footnote_not_required()
@when("I combine all user advice") # noqa
def combine_all_advice(driver): # noqa
UserAdvicePage(driver).click_combine_advice()
@given("I create a letter paragraph picklist") # noqa
def add_letter_paragraph_picklist(add_a_letter_paragraph_picklist): # noqa
pass
@when("I go to letters") # noqa
def i_go_to_letters(driver, internal_url): # noqa
driver.get(internal_url.rstrip("/") + "/document-templates")
@when("I create a letter template for a document") # noqa
def create_letter_template(driver, context, get_template_id): # noqa
template_page = LetterTemplates(driver)
template_page.click_create_a_template()
context.template_name = f"Template {utils.get_formatted_date_time_y_m_d_h_s()}"
template_page.enter_template_name(context.template_name)
functions.click_submit(driver)
template_page.select_which_type_of_cases_template_can_apply_to(
["Standard-Individual-Export-Licence", "Open-Individual-Export-Licence"]
)
functions.click_submit(driver)
template_page.select_which_type_of_decisions_template_can_apply_to(["Approve", "Proviso"])
functions.click_submit(driver)
template_page.select_visible_to_exporter("True")
functions.click_submit(driver)
template_page.select_has_signature("False")
functions.click_submit(driver)
template_page.click_licence_layout(get_template_id)
functions.click_submit(driver)
@when("I add a letter paragraph to template") # noqa
def add_two_letter_paragraphs(driver, context): # noqa
letter_template = LetterTemplates(driver)
letter_template.click_add_letter_paragraph()
context.letter_paragraph_name = letter_template.add_letter_paragraph()
letter_template.click_add_letter_paragraphs()
@when("I preview template") # noqa
def preview_template(driver): # noqa
LetterTemplates(driver).click_create_preview_button()
@when("I apply filters") # noqa
def i_apply_filters(driver, context): # noqa
functions.click_apply_filters(driver)
@then("I dont see previously created application") # noqa
def dont_see_queue_in_queue_list(driver, context): # noqa
case_page = CaseListPage(driver)
functions.try_open_filters(driver)
case_page.filter_by_case_reference(context.reference_code)
functions.click_apply_filters(driver)
assert context.reference_code not in driver.find_element_by_id("main-content").text
@when("I click clear filters") # noqa
def i_click_clear_filters(driver, context): # noqa
CaseListPage(driver).click_clear_filters_button()
@given("A template exists for the appropriate decision") # noqa
def template_with_decision(context, api_test_client): # noqa
document_template = api_test_client.document_templates.add_template(
api_test_client.picklists, advice_type=[context.advice_type]
)
context.document_template_id = document_template["id"]
context.document_template_name = document_template["name"]
@when("I generate a document for the decision") # noqa
def generate_decision_document(driver, context): # noqa
GeneratedDecisionDocuments(driver).click_generate_decision_document(context.advice_type)
@given(parsers.parse('I "{decision}" the open application good and country at all advice levels')) # noqa
def approve_open_application_objects(context, api_test_client, decision): # noqa
context.advice_type = decision
text = "abc"
note = ""
footnote_required = "False"
data = [
{
"type": context.advice_type,
"text": text,
"note": note,
"goods_type": context.goods_type["id"],
"footnote_required": footnote_required,
},
{
"type": context.advice_type,
"text": text,
"note": note,
"country": context.country["code"],
"footnote_required": footnote_required,
},
]
api_test_client.cases.create_user_advice(context.case_id, data)
api_test_client.cases.create_team_advice(context.case_id, data)
api_test_client.cases.create_final_advice(context.case_id, data)
@when("I approve the good country combination") # noqa
def approve_good_country_combination(driver, context): # noqa
GoodCountryMatrixPage(driver).select_good_country_option(
"approve", context.goods_type["id"], context.country["code"]
)
functions.click_submit(driver)
@when("I click continue on the approve open licence page") # noqa
def approve_licence_page(driver, context): # noqa
page = GrantLicencePage(driver)
context.licence_duration = page.get_duration_in_finalise_view()
context.licence_start_date = timezone.localtime().strftime(DATE_FORMAT)
functions.click_submit(driver)
@then("The licence information is in the second audit") # noqa
def licence_audit(driver, context, internal_url): # noqa
ApplicationPage(driver).go_to_cases_activity_tab(internal_url, context)
second_audit = ApplicationPage(driver).get_text_of_audit_trail_item(1)
assert context.licence_duration in second_audit
assert context.licence_start_date in second_audit
| 36.106667 | 115 | 0.748734 |
acf341bf4d4ece9b7ffeed222c2ebc53546361a5 | 3,186 | py | Python | Savitzjy_goulay.py | Samarargon/sem-proc | 2a5e53f14572d6057f2688d0d8bb48b34135cdef | [
"MIT"
] | null | null | null | Savitzjy_goulay.py | Samarargon/sem-proc | 2a5e53f14572d6057f2688d0d8bb48b34135cdef | [
"MIT"
] | null | null | null | Savitzjy_goulay.py | Samarargon/sem-proc | 2a5e53f14572d6057f2688d0d8bb48b34135cdef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 02 17:52:04 2016
@author: P1506478
"""
#%%
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
#%% | 39.825 | 89 | 0.644068 |
acf34434ccc23ab4882d5c64a63bbccfe00e6a01 | 7,357 | py | Python | test_cls_uae_rs.py | YonghaoXu/UAE-RS | 4ddc8371baec85d84ab4b25b217de8a9c8d82c38 | [
"MIT"
] | 9 | 2022-02-17T13:24:56.000Z | 2022-02-28T09:38:30.000Z | test_cls_uae_rs.py | YonghaoXu/UAE-RS | 4ddc8371baec85d84ab4b25b217de8a9c8d82c38 | [
"MIT"
] | null | null | null | test_cls_uae_rs.py | YonghaoXu/UAE-RS | 4ddc8371baec85d84ab4b25b217de8a9c8d82c38 | [
"MIT"
] | 2 | 2022-02-16T10:52:42.000Z | 2022-02-17T13:25:06.000Z | import os
import os.path as osp
import numpy as np
import argparse
from tools.utils import *
from torch import nn
from dataset.scene_dataset import *
from torch.utils import data
import tools.model as models
def main(args):
if args.dataID==1:
DataName = 'UCM'
num_classes = 21
classname = ('agricultural','airplane','baseballdiamond',
'beach','buildings','chaparral',
'denseresidential','forest','freeway',
'golfcourse','harbor','intersection',
'mediumresidential','mobilehomepark','overpass',
'parkinglot','river','runway',
'sparseresidential','storagetanks','tenniscourt')
elif args.dataID==2:
DataName = 'AID'
num_classes = 30
classname = ('airport','bareland','baseballfield',
'beach','bridge','center',
'church','commercial','denseresidential',
'desert','farmland','forest',
'industrial','meadow','mediumresidential',
'mountain','parking','park',
'playground','pond','port',
'railwaystation','resort','river',
'school','sparseresidential','square',
'stadium','storagetanks','viaduct')
adv_root_dir = osp.join(args.root_dir,'UAE-RS',DataName)+'/'
composed_transforms = transforms.Compose([
transforms.Resize(size=(args.crop_size,args.crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
adv_loader = data.DataLoader(
scene_dataset(root_dir=adv_root_dir,pathfile='./dataset/'+DataName+'_test.txt', transform=composed_transforms, mode='adv'),
batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
clean_loader = data.DataLoader(
scene_dataset(root_dir=args.root_dir,pathfile='./dataset/'+DataName+'_test.txt', transform=composed_transforms),
batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
###################Target target_model Definition###################
if args.target_model=='alexnet':
target_model = models.alexnet(pretrained=False)
target_model.classifier._modules['6'] = nn.Linear(4096, num_classes)
elif args.target_model=='vgg11':
target_model = models.vgg11(pretrained=False)
target_model.classifier._modules['6'] = nn.Linear(4096, num_classes)
elif args.target_model=='vgg16':
target_model = models.vgg16(pretrained=False)
target_model.classifier._modules['6'] = nn.Linear(4096, num_classes)
elif args.target_model=='vgg19':
target_model = models.vgg19(pretrained=False)
target_model.classifier._modules['6'] = nn.Linear(4096, num_classes)
elif args.target_model=='resnet18':
target_model = models.resnet18(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='resnet50':
target_model = models.resnet50(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='resnet101':
target_model = models.resnet101(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='resnext50_32x4d':
target_model = models.resnext50_32x4d(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='resnext101_32x8d':
target_model = models.resnext101_32x8d(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='densenet121':
target_model = models.densenet121(pretrained=False)
target_model.classifier = nn.Linear(1024, num_classes)
elif args.target_model=='densenet169':
target_model = models.densenet169(pretrained=False)
target_model.classifier = nn.Linear(1664, num_classes)
elif args.target_model=='densenet201':
target_model = models.densenet201(pretrained=False)
target_model.classifier = nn.Linear(1920, num_classes)
elif args.target_model=='inception':
target_model = models.inception_v3(pretrained=True, aux_logits=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='regnet_x_400mf':
target_model = models.regnet_x_400mf(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='regnet_x_8gf':
target_model = models.regnet_x_8gf(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
elif args.target_model=='regnet_x_16gf':
target_model = models.regnet_x_16gf(pretrained=False)
target_model.fc = torch.nn.Linear(target_model.fc.in_features, num_classes)
dirpath = args.save_path_prefix+DataName+'/Pretrain/'+args.target_model+'/'
model_path = os.listdir(dirpath)
for filename in model_path:
filepath = os.path.join(dirpath, filename)
if os.path.isfile(filepath) and filename.lower().endswith('.pth'):
print(os.path.join(dirpath, filename))
model_path_resume = os.path.join(dirpath, filename)
saved_state_dict = torch.load(model_path_resume)
new_params = target_model.state_dict().copy()
for i,j in zip(saved_state_dict,new_params):
new_params[j] = saved_state_dict[i]
target_model.load_state_dict(new_params)
target_model = torch.nn.DataParallel(target_model).cuda()
target_model.eval()
OA_clean,_ = test_acc(target_model,classname, clean_loader, 1,num_classes,print_per_batches=10)
OA_adv,_ = test_acc(target_model,classname, adv_loader, 1,num_classes,print_per_batches=10)
print('Clean Test Set OA:',OA_clean*100)
print('UAE-RS Test Set OA:',OA_adv*100)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataID', type=int, default=1)
parser.add_argument('--root_dir', type=str, default='/iarai/home/yonghao.xu/Data/',help='dataset path.')
parser.add_argument('--target_model', type=str, default='inception',
help='alexnet,vgg11,vgg16,vgg19,inception,resnet18,resnet50,resnet101,resnext50_32x4d,resnext101_32x8d,densenet121,densenet169,densenet201,regnet_x_400mf,regnet_x_8gf,regnet_x_16gf')
parser.add_argument('--save_path_prefix', type=str,default='./')
parser.add_argument('--crop_size', type=int, default=256)
parser.add_argument('--val_batch_size', type=int, default=32)
parser.add_argument('--num_workers', type=int, default=1)
main(parser.parse_args())
| 52.177305 | 207 | 0.647003 |
acf3446dbbebfc49009460a37af37b4f020c70be | 2,733 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/packet_capture.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/packet_capture.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/packet_capture.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PacketCapture(Model):
"""Parameters that define the create packet capture operation.
All required parameters must be populated in order to send to Azure.
:param target: Required. The ID of the targeted resource, only VM is
currently supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location: Required.
:type storage_location:
~azure.mgmt.network.v2017_06_01.models.PacketCaptureStorageLocation
:param filters:
:type filters:
list[~azure.mgmt.network.v2017_06_01.models.PacketCaptureFilter]
"""
_validation = {
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
}
def __init__(self, **kwargs):
super(PacketCapture, self).__init__(**kwargs)
self.target = kwargs.get('target', None)
self.bytes_to_capture_per_packet = kwargs.get('bytes_to_capture_per_packet', 0)
self.total_bytes_per_session = kwargs.get('total_bytes_per_session', 1073741824)
self.time_limit_in_seconds = kwargs.get('time_limit_in_seconds', 18000)
self.storage_location = kwargs.get('storage_location', None)
self.filters = kwargs.get('filters', None)
| 44.080645 | 106 | 0.668862 |
acf3448e88da4f6ed55f797e9175ae690ffc91de | 5,584 | py | Python | frappe/website/statics.py | pawaranand/phr_frappe | d997ae7d6fbade4b2c4a2491603d988876dfd67e | [
"MIT"
] | null | null | null | frappe/website/statics.py | pawaranand/phr_frappe | d997ae7d6fbade4b2c4a2491603d988876dfd67e | [
"MIT"
] | 1 | 2015-07-11T20:52:38.000Z | 2019-12-06T15:00:58.000Z | frappe/website/statics.py | pawaranand/phr_frappe | d997ae7d6fbade4b2c4a2491603d988876dfd67e | [
"MIT"
] | 2 | 2015-09-05T05:30:23.000Z | 2018-03-21T19:45:10.000Z | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, time
def sync_statics(rebuild=False):
s = sync()
s.verbose = True
# s.start(rebuild)
# frappe.db.commit()
while True:
s.start(rebuild)
frappe.db.commit()
time.sleep(2)
rebuild = False
class sync(object):
def __init__(self, verbose=False):
self.verbose = verbose
def start(self, rebuild=False):
self.synced = []
self.synced_paths = []
self.updated = 0
if rebuild:
frappe.db.sql("delete from `tabWeb Page` where ifnull(template_path, '')!=''")
for app in frappe.get_installed_apps():
self.sync_for_app(app)
self.cleanup()
def sync_for_app(self, app):
self.statics_path = frappe.get_app_path(app, "templates", "statics")
if os.path.exists(self.statics_path):
for basepath, folders, files in os.walk(self.statics_path):
self.sync_folder(basepath, folders, files)
def sync_folder(self, basepath, folders, files):
self.get_index_txt(basepath, files)
index_found = self.sync_index_page(basepath, files)
if not index_found and basepath!=self.statics_path:
# not synced either by generator or by index.html
return
if self.index:
self.sync_using_given_index(basepath, folders, files)
else:
self.sync_alphabetically(basepath, folders, [filename for filename in files if filename.endswith('html') or filename.endswith('md')])
def get_index_txt(self, basepath, files):
self.index = []
if "index.txt" in files:
with open(os.path.join(basepath, "index.txt"), "r") as indexfile:
self.index = indexfile.read().splitlines()
def sync_index_page(self, basepath, files):
for extn in ("md", "html"):
fname = "index." + extn
if fname in files:
self.sync_file(fname, os.path.join(basepath, fname), None)
return True
def sync_using_given_index(self, basepath, folders, files):
for i, page_name in enumerate(self.index):
if page_name in folders:
# for folder, sync inner index first (so that idx is set)
for extn in ("md", "html"):
path = os.path.join(basepath, page_name, "index." + extn)
if os.path.exists(path):
self.sync_file("index." + extn, path, i)
break
# other files
if page_name + ".md" in files:
self.sync_file(page_name + ".md", os.path.join(basepath, page_name + ".md"), i)
elif page_name + ".html" in files:
self.sync_file(page_name + ".html", os.path.join(basepath, page_name + ".html"), i)
else:
if page_name not in folders:
print page_name + " not found in " + basepath
def sync_alphabetically(self, basepath, folders, files):
files.sort()
for fname in files:
page_name = fname.rsplit(".", 1)[0]
if not (page_name=="index" and basepath!=self.statics_path):
self.sync_file(fname, os.path.join(basepath, fname), None)
def sync_file(self, fname, template_path, priority):
route = os.path.relpath(template_path, self.statics_path).rsplit(".", 1)[0]
if fname.rsplit(".", 1)[0]=="index" and \
os.path.dirname(template_path) != self.statics_path:
route = os.path.dirname(route)
parent_web_page = frappe.db.sql("""select name from `tabWeb Page` where
page_name=%s and ifnull(parent_website_route, '')=ifnull(%s, '')""",
(os.path.basename(os.path.dirname(route)), os.path.dirname(os.path.dirname(route))))
parent_web_page = parent_web_page and parent_web_page[0][0] or ""
page_name = os.path.basename(route)
published = 1
idx = priority
if (parent_web_page, page_name) in self.synced:
return
title = self.get_title(template_path)
if not frappe.db.get_value("Web Page", {"template_path":template_path}):
web_page = frappe.new_doc("Web Page")
web_page.page_name = page_name
web_page.parent_web_page = parent_web_page
web_page.template_path = template_path
web_page.title = title
web_page.published = published
web_page.idx = idx
web_page.from_website_sync = True
web_page.insert()
if self.verbose: print "Inserted: " + web_page.name
else:
web_page = frappe.get_doc("Web Page", {"template_path":template_path})
dirty = False
for key in ("parent_web_page", "title", "template_path", "published", "idx"):
if web_page.get(key) != locals().get(key):
web_page.set(key, locals().get(key))
dirty = True
if dirty:
web_page.from_website_sync = True
web_page.save()
if self.verbose: print "Updated: " + web_page.name
self.synced.append((parent_web_page, page_name))
def get_title(self, fpath):
title = os.path.basename(fpath).rsplit(".", 1)[0]
if title =="index":
title = os.path.basename(os.path.dirname(fpath))
title = title.replace("-", " ").replace("_", " ").title()
with open(fpath, "r") as f:
content = unicode(f.read().strip(), "utf-8")
if content.startswith("# "):
title = content.splitlines()[0][2:]
if "<!-- title:" in content:
title = content.split("<!-- title:", 1)[1].split("-->", 1)[0].strip()
return title
def cleanup(self):
if self.synced:
# delete static web pages that are not in immediate list
for static_page in frappe.db.sql("""select name, page_name, parent_web_page
from `tabWeb Page` where ifnull(template_path,'')!=''""", as_dict=1):
if (static_page.parent_web_page, static_page.page_name) not in self.synced:
frappe.delete_doc("Web Page", static_page.name, force=1)
else:
# delete all static web pages
frappe.delete_doc("Web Page", frappe.db.sql_list("""select name
from `tabWeb Page`
where ifnull(template_path,'')!=''"""), force=1)
| 32.091954 | 136 | 0.686067 |
acf345979df3b6a81b5fd7150c1bce23c0413011 | 1,525 | py | Python | python/phonenumbers/data/region_ET.py | timgates42/python-phonenumbers | 98895826729a234acc1e27ce8e280fe7e54754ab | [
"Apache-2.0"
] | null | null | null | python/phonenumbers/data/region_ET.py | timgates42/python-phonenumbers | 98895826729a234acc1e27ce8e280fe7e54754ab | [
"Apache-2.0"
] | 10 | 2020-03-24T10:47:53.000Z | 2021-04-08T19:51:44.000Z | myvenv/lib/python3.6/site-packages/phonenumbers/data/region_ET.py | yog240597/saleor | b75a23827a4ec2ce91637f0afe6808c9d09da00a | [
"CC-BY-4.0"
] | 1 | 2020-12-14T11:39:53.000Z | 2020-12-14T11:39:53.000Z | """Auto-generated file, do not edit by hand. ET metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ET = PhoneMetadata(id='ET', country_code=251, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:11|[2-59]\\d)\\d{7}', possible_length=(9,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='116671\\d{3}|(?:11(?:1(?:1[124]|2[2-57]|3[1-5]|5[5-8]|8[6-8])|2(?:13|3[6-8]|5[89]|7[05-9]|8[2-6])|3(?:2[01]|3[0-289]|4[1289]|7[1-4]|87)|4(?:1[69]|3[2-49]|4[0-3]|6[5-8])|5(?:1[578]|44|5[0-4])|6(?:1[78]|2[69]|39|4[5-7]|5[1-5]|6[0-59]|8[015-8]))|2(?:2(?:11[1-9]|22[0-7]|33\\d|44[1467]|66[1-68])|5(?:11[124-6]|33[2-8]|44[1467]|55[14]|66[1-3679]|77[124-79]|880))|3(?:3(?:11[0-46-8]|(?:22|55)[0-6]|33[0134689]|44[04]|66[01467])|4(?:44[0-8]|55[0-69]|66[0-3]|77[1-5]))|4(?:6(?:119|22[0-24-7]|33[1-5]|44[13-69]|55[14-689]|660|88[1-4])|7(?:(?:11|22)[1-9]|33[13-7]|44[13-6]|55[1-689]))|5(?:7(?:227|55[05]|(?:66|77)[14-8])|8(?:11[149]|22[013-79]|33[0-68]|44[013-8]|550|66[1-5]|77\\d)))\\d{4}', example_number='111112345', possible_length=(9,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='9\\d{8}', example_number='911234567', possible_length=(9,)),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['[1-59]'], national_prefix_formatting_rule='0\\1')])
| 138.636364 | 821 | 0.630164 |
acf34652e443f950f91bbf87d21c58291a1a6474 | 1,210 | py | Python | binding-python/runtime/src/test/python/tests/python/TestPython.py | apache/etch | 5a875755019a7f342a07c8c368a50e3efb6ae68c | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2015-02-14T15:09:54.000Z | 2021-11-10T15:09:45.000Z | binding-python/runtime/src/test/python/tests/python/TestPython.py | apache/etch | 5a875755019a7f342a07c8c368a50e3efb6ae68c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | binding-python/runtime/src/test/python/tests/python/TestPython.py | apache/etch | 5a875755019a7f342a07c8c368a50e3efb6ae68c | [
"ECL-2.0",
"Apache-2.0"
] | 14 | 2015-04-20T10:35:00.000Z | 2021-11-10T15:09:35.000Z | # Licensed to the Apache Software Foundation (ASF) under one *
# or more contributor license agreements. See the NOTICE file *
# distributed with this work for additional information *
# regarding copyright ownership. The ASF licenses this file *
# to you under the Apache License, Version 2.0 (the *
# "License"); you may not use this file except in compliance *
# with the License. You may obtain a copy of the License at *
# *
# http://www.apache.org/licenses/LICENSE-2.0 *
# *
# Unless required by applicable law or agreed to in writing, *
# software distributed under the License is distributed on an *
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY *
# KIND, either express or implied. See the License for the *
# specific language governing permissions and limitations *
# under the License.
import unittest
class Test_EtchPython(unittest.TestCase):
def setUp(self):
pass
def test1(self):
# placeholder
pass
if __name__=='__main__':
unittest.main()
| 40.333333 | 65 | 0.603306 |
acf3468f8af9a090aa190501813510af6f9bda4f | 4,417 | py | Python | Past_experiments/niiE6C2.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | 5 | 2019-07-23T14:49:46.000Z | 2022-03-30T13:54:22.000Z | Past_experiments/niiE6C2.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | null | null | null | Past_experiments/niiE6C2.py | abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import collections
import numpy as np
from six.moves import range
import tensorflow as tf
import datetime
from tensorflow_federated import python as tff
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.keras import layers
tf.compat.v1.enable_v2_behavior()
NUM_EXAMPLES_PER_USER = 2000
BATCH_SIZE = 32
USERS = 5
NUM_EPOCHS = 6
CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
def mane():
""" Run program """
cifar_train, cifar_test = tf.keras.datasets.cifar10.load_data()
federated_train_data = [get_distributed(cifar_train, u, 'non-iid') for u in range(USERS)]
federated_test_data = [get_distributed(cifar_test, u, 'non-iid') for u in range(USERS)]
sample_batch = federated_train_data[1][-2]
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
evaluation = tff.learning.build_federated_evaluation(model_fn)
state = iterative_process.initialize()
fd_test_accuracy = []
fd_train_loss = []
for round_num in range(4):
selected = np.random.choice(5, 2, replace=False)
state, metrics = iterative_process.next(state, list(np.array(federated_train_data)[selected]))
test_metrics = evaluation(state.model, federated_test_data)
fd_train_loss.append(metrics[1])
fd_test_accuracy.append(test_metrics.sparse_categorical_accuracy)
try:
with open('Log/Exp5/niiE6C2.txt', 'w') as log:
print("Cifar10, Federated E6C2, non-IID, minibatch_size: 32", file=log)
print("Train Loss: {}".format(fd_train_loss), file=log)
print("Test Accuracy: {}".format(fd_test_accuracy), file=log)
except IOError:
print('File Error')
def get_indices_unbalanced(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
class_shares = CLASSES // min(CLASSES, USERS)
user_indices = []
for u in range(USERS):
user_indices.append(
np.array(
[indices_array.pop(0)[:NUM_EXAMPLES_PER_USER//class_shares] for j in range(class_shares)])
.flatten())
return user_indices
def get_indices_even(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
user_indices = []
class_shares = NUM_EXAMPLES_PER_USER // CLASSES
# take even shares of each class for every user
for u in range(USERS):
starting_index = u*class_shares
user_indices.append(np.array(indices_array).T[starting_index:starting_index + class_shares].flatten())
return user_indices
def get_distributed(source, u, distribution):
if distribution == 'iid':
indices = get_indices_even(source[1])[u]
else:
indices = get_indices_unbalanced(source[1])[u]
output_sequence = []
for repeat in range(NUM_EPOCHS):
for i in range(0, len(indices), BATCH_SIZE):
batch_samples = indices[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][b] / 255.0 for b in batch_samples], dtype=np.float32),
'y': np.array([source[1][b] for b in batch_samples], dtype=np.int32)})
return output_sequence
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3, 3),
activation="tanh",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS)),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="tanh", padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="tanh"),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
model.compile(loss=loss_fn, optimizer="adam", metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
if __name__ == "__main__":
mane()
| 33.976923 | 113 | 0.685307 |
acf346c2ce85e805bfb4912c172b4a3b68832fd6 | 2,898 | py | Python | setmode.py | claytonn73/myenergi_api | eb6578954247f4df06b0e4e0779f94ea9aca0ee0 | [
"MIT"
] | null | null | null | setmode.py | claytonn73/myenergi_api | eb6578954247f4df06b0e4e0779f94ea9aca0ee0 | [
"MIT"
] | null | null | null | setmode.py | claytonn73/myenergi_api | eb6578954247f4df06b0e4e0779f94ea9aca0ee0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Python script to set the Zappi mode.
This script will check the current mode and then set to the desired mode if different
It will then wait until the mode has been successfully set before exiting
"""
import argparse
import logging
import logging.handlers
import time
import os
import dotenv
import myenergi
def get_logger():
"""Log messages to the syslog."""
logger = logging.getLogger()
handler = logging.handlers.SysLogHandler(facility=logging.handlers.SysLogHandler.LOG_DAEMON, address='/dev/log')
logger.setLevel(logging.INFO)
# logging.getLogger().addHandler(logging.StreamHandler())
logger.addHandler(handler)
log_format = 'python[%(process)d]: [%(levelname)s] %(filename)s:%(funcName)s:%(lineno)d \"%(message)s\"'
handler.setFormatter(logging.Formatter(fmt=log_format))
return logger
def get_options():
"""Get the required options using argparse or from a dotenv file."""
env = dotenv.dotenv_values(os.path.expanduser("~/.env"))
parser = argparse.ArgumentParser(description='Sets the Zappi mode using the myenergi API')
if "myenergi_serial" not in env:
parser.add_argument('-s', '--serial', required=True, help='myenergi hub serial number')
if "myenergi_password" not in env:
parser.add_argument('-p', '--password', required=True, help='myenergi password')
parser.add_argument('-m', '--mode', required=True,
choices=list(myenergi.ZappiMode.values()), help='Desired operating mode for Zappi')
args = parser.parse_args()
if "myenergi_serial" in env:
args.serial = env['myenergi_serial']
if "myenergi_password" in env:
args.password = env['myenergi_password']
return args
def main():
"""Set the mode for all Zappis as requested if not already set."""
# Set the logging level for the myenergi api client
logging.getLogger('myenergi.api').setLevel(logging.INFO)
# Set up the local logger
logger = get_logger()
args = get_options()
with myenergi.API(args.serial, args.password) as mye:
if mye.get_serials("ZAPPI") is None:
logger.error("Unable to set mode as no Zappi Detected")
else:
for serial in mye.get_serials("ZAPPI"):
current_mode = myenergi.ZappiMode[mye.get_zappi_info(serial, "MODE")]
if args.mode == current_mode:
logger.info("Mode for Zappi SN: %s is already %s", serial, args.mode)
else:
mye.set_zappi_mode(serial, args.mode)
while args.mode != myenergi.ZappiMode[mye.get_zappi_info(serial, "MODE")]:
time.sleep(3)
mye.refresh_status("ZAPPI", serial)
logger.info("Mode for Zappi SN:%s has been switched to %s", serial, args.mode)
if __name__ == "__main__":
main()
| 38.64 | 116 | 0.657695 |
acf346c594774cbd45afda6813294aa9aa34cd8e | 264 | py | Python | Step1-PythonBasic/Practices/yuxq/26-30/paixu.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | Step1-PythonBasic/Practices/yuxq/26-30/paixu.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | Step1-PythonBasic/Practices/yuxq/26-30/paixu.py | Jumpers/MysoftAutoTest | 50efc385a96532fc0777061d6c5e7201a4991f04 | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
def break_words(stuff):
"""This function will break up words for us."""
words=stuff.split(' ')#拆分字符串stuff,分割符为空格
return words
words=break_words('are you right')
words.sort()
print words
a=sorted(('are','you','right'))
print a | 22 | 52 | 0.659091 |
acf347d2af8cdd3fbdb0bcc35c8bb6acabb01211 | 7,863 | py | Python | source/tests/test_scheduler.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | 6 | 2021-09-23T16:33:24.000Z | 2022-03-31T11:45:13.000Z | source/tests/test_scheduler.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | 4 | 2021-09-24T21:34:14.000Z | 2022-01-27T22:11:08.000Z | source/tests/test_scheduler.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | b45588c094734cce70198811890a28e65b8e39e1 | [
"Apache-2.0"
] | 9 | 2021-09-23T23:24:46.000Z | 2022-02-12T04:53:16.000Z | # ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
import json
import os
import boto3
import pytest
from moto.core import ACCOUNT_ID
from moto.dynamodb2 import mock_dynamodb2
from moto.stepfunctions import mock_stepfunctions
from aws_solutions.scheduler.cdk.aws_lambda.scheduler.handler import (
create_schedule,
read_schedule,
update_schedule,
delete_schedule,
)
from aws_solutions.scheduler.common import (
Scheduler,
Schedule,
ScheduleError,
Task,
)
@pytest.fixture
def scheduler_stepfunctions_target_arn():
stepfunction_name = "personalizestack-personalize-target"
stepfunction_arn = (
f"arn:aws:states:us-east-1:{ACCOUNT_ID}:stateMachine:{stepfunction_name}"
)
return stepfunction_arn
@pytest.fixture
def scheduler_stepfunctions_scheduler_arn():
stepfunction_name = "personalizestack-personalize-scheduler"
stepfunction_arn = (
f"arn:aws:states:us-east-1:{ACCOUNT_ID}:stateMachine:{stepfunction_name}"
)
return stepfunction_arn
@pytest.fixture
def scheduler_stepfunctions(
scheduler_stepfunctions_target_arn, scheduler_stepfunctions_scheduler_arn
):
with mock_stepfunctions():
sfn = boto3.client("stepfunctions")
definition = json.dumps(
{
"StartAt": "FirstState",
"States": {
"Type": "Task",
"Resource": f"arn:aws:lambda:us-east-1:{ACCOUNT_ID}:function:FUNCTION_NAME",
"End": True,
},
}
)
sfn.create_state_machine(
name=scheduler_stepfunctions_target_arn.split(":")[-1],
definition=definition,
roleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/sf_role",
)
sfn.create_state_machine(
name=scheduler_stepfunctions_scheduler_arn.split(":")[-1],
definition=definition,
roleArn=f"arn:aws:iam::{ACCOUNT_ID}:role/sf_role",
)
yield sfn, scheduler_stepfunctions_target_arn, scheduler_stepfunctions_scheduler_arn
@pytest.fixture
def scheduler_table():
scheduler_table_name = "scheduler"
os.environ["DDB_SCHEDULES_TABLE"] = scheduler_table_name
with mock_dynamodb2():
ddb = boto3.resource("dynamodb")
ddb.create_table(
TableName=scheduler_table_name,
KeySchema=[
{"AttributeName": "name", "KeyType": "HASH"},
{
"AttributeName": "version",
"KeyType": "RANGE",
},
],
AttributeDefinitions=[
{"AttributeName": "name", "AttributeType": "S"},
{"AttributeName": "version", "AttributeType": "S"},
],
)
yield ddb, scheduler_table_name
@pytest.fixture
def task(scheduler_stepfunctions_target_arn):
return Task(
name="test",
schedule="cron(* * * * ? *)",
state_machine={"arn": scheduler_stepfunctions_target_arn, "input": {}},
)
@pytest.fixture
def scheduler(scheduler_table, scheduler_stepfunctions, mocker):
_, scheduler_table_name = scheduler_table
sfn_cli, _, sfn_arn = scheduler_stepfunctions
_scheduler = Scheduler()
_scheduler.sfn_cli = sfn_cli
_scheduler.stepfunction = sfn_arn
mocker.patch(
"aws_solutions.scheduler.cdk.aws_lambda.scheduler.handler.scheduler", _scheduler
)
yield _scheduler
def test_create(scheduler, task):
scheduler.create(task)
scheduled = scheduler.read(task.name)
assert scheduled == task
def test_read(scheduler, task):
scheduler.create(task)
scheduler.update(task)
scheduled = scheduler.read(task)
assert scheduled.latest == 1
assert scheduled.version == "v0"
def test_delete(scheduler, task):
scheduler.create(task)
scheduler.update(task)
scheduler.delete(task)
assert not scheduler.read(task) # the updated item should no longer be present
def test_list(scheduler, task):
# create two tasks, then list them
scheduler.create(task)
scheduler.update(task)
task.name = "test1"
task.next_task_id = task.get_next_task_id()
scheduler.create(task)
scheduler.update(task)
schedules = [s for s in scheduler.list()]
assert len(schedules) == 2
assert "test" in schedules
assert "test1" in schedules
def test_scheduler_create_handler(scheduler, scheduler_stepfunctions_target_arn):
create_schedule(
{
"name": "test",
"schedule": "cron(* * * * ? *)",
"state_machine": {
"arn": scheduler_stepfunctions_target_arn,
"input": {},
},
},
None,
)
def test_scheduler_update_handler(task, scheduler, scheduler_stepfunctions_target_arn):
scheduler.create(task)
assert scheduler.read(task).schedule == task.schedule
new_schedule = Schedule("cron(10 * * * ? *)")
update_schedule(
{
"name": "test",
"schedule": new_schedule.expression,
"state_machine": {
"arn": scheduler_stepfunctions_target_arn,
"input": {},
},
},
None,
)
assert scheduler.read(task).schedule == new_schedule
assert scheduler.read(task).latest == 2
def test_read_schedule_handler(task, scheduler):
scheduler.create(task)
result = read_schedule(
{
"name": "test",
},
None,
)
assert result.get("name") == task.name
assert result.get("schedule") == task.schedule.expression
def test_delete_schedule_handler(task, scheduler):
scheduler.create(task)
assert scheduler.read(task.name)
delete_schedule(
{
"name": "test",
},
None,
)
assert not scheduler.read(task.name)
def test_delete_as_create(scheduler):
task = Task("testing", schedule="delete")
scheduler.create(task)
assert not scheduler.read(task.name)
@pytest.mark.parametrize(
"expression",
[
"cron(0 12 * * ? * *)", # too many fields
"cron(0 12 * * ?)", # too few fields
"cron(5,35 14 * * * *)", # both day of week and day of month specified
"cron(5,35 14 * * ? 1888)", # year too early
"not-cron", # not a cron expression
],
)
def test_configuration_cron_invalid(expression):
with pytest.raises(ScheduleError):
Schedule(expression)
| 31.452 | 120 | 0.57052 |
acf3481a17a66f16906d907f14d433680bbfa4dd | 1,310 | py | Python | migrations/versions/0040.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 1 | 2018-10-12T15:04:31.000Z | 2018-10-12T15:04:31.000Z | migrations/versions/0040.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 169 | 2017-11-07T00:45:25.000Z | 2022-03-12T00:08:59.000Z | migrations/versions/0040.py | NewAcropolis/api | 61ffe14cb64407ffe1f58d0e970703bf07d60ea3 | [
"MIT"
] | 1 | 2019-08-15T14:51:31.000Z | 2019-08-15T14:51:31.000Z | """empty message
Revision ID: 0040 add email providers
Revises: 0039 add subject to emails
Create Date: 2020-06-18 01:38:30.911104
"""
# revision identifiers, used by Alembic.
revision = '0040 add email providers'
down_revision = '0039 add subject to emails'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('email_providers',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('daily_limit', sa.Integer(), nullable=True),
sa.Column('api_key', sa.String(), nullable=True),
sa.Column('api_url', sa.String(), nullable=True),
sa.Column('data_struct', sa.String(), nullable=True),
sa.Column('pos', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('pos')
)
op.add_column(u'email_to_member', sa.Column('emailed_by', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'email_to_member', 'emailed_by')
op.drop_table('email_providers')
# ### end Alembic commands ###
| 32.75 | 90 | 0.691603 |
acf34887f267fcb0393b4314976270ea3835bec6 | 17,564 | py | Python | Functions/basic_DEA_data&code/DEA.py | PO-LAB/DEA | f17f261e013ad7a0d7ff48affe67174b572e17ba | [
"MIT"
] | 74 | 2018-01-29T06:40:57.000Z | 2022-02-17T20:41:11.000Z | Functions/basic_DEA_data&code/DEA.py | PO-LAB/DEA | f17f261e013ad7a0d7ff48affe67174b572e17ba | [
"MIT"
] | 2 | 2018-02-09T13:42:33.000Z | 2021-05-20T10:03:41.000Z | Functions/basic_DEA_data&code/DEA.py | PO-LAB/DEA | f17f261e013ad7a0d7ff48affe67174b572e17ba | [
"MIT"
] | 45 | 2018-01-30T07:32:13.000Z | 2022-01-20T09:49:59.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
from gurobipy import*
from itertools import islice
import csv
# TODO reading csv file including inputs and outputs for DMUs, transfered to 'dict' types for Linear Programming Modeling in Gurobi Software(package)
def csv2dict(dea_data, in_range, out_range, assign=False):
f=open(dea_data)
reader=csv.reader(f)
DMU = []
X,Y={},{}
# All values in in_range should be greater than 0; otherwise, stop the function
if all(value > 0 for value in in_range):
in_range[:]=[x-1 for x in in_range]
else:
print("Error: all values given in in_range should be greater than 0")
# Return nothing to stop the function
return
# Same as the in_range
if all(value > 0 for value in out_range):
out_range[:] = [y-1 for y in out_range]
else:
print("Error: all values given in out_range should be greater than 0")
# Return nothing to stop the function
return
for line in islice(reader,1,None):
obs = line
key=obs[0] # Remove line breaks '/n'
DMU.append(key) # Get DMU names
# Create dictionaries
try:
if (assign==False):
# Give a range to get input and ouput data
X[key]= [float(v) for v in obs[(in_range[0]):(in_range[1]+1)]] # List comprehension
Y[key]= [float(v) for v in obs[(out_range[0]):(out_range[1]+1)]]
elif (assign==True):
# Get specific lines as input and output data
# X and Y are input and output of DMU separately
X[key]= [float(v) for v in (list(obs[i] for i in in_range))] # List comprehension
Y[key]= [float(v) for v in (list(obs[i] for i in out_range))]
except ValueError :
print("which means your data include string not number")
return DMU, X, Y
# TODO reading csv file including inputs and outputs for DMUs if they are separated in different two files (one for inputs, another for outputs)
# then, transfered to 'dict' types for Linear Programming Modeling in Gurobi Software(package)
def csv2dict_sep(dea_data, vrange=[0,0], assign=False):
# The input and output data are separated into different files
f=open(dea_data)
reader=csv.reader(f)
DMU = []
value={}
vrange[:]=[v-1 for v in vrange]
for line in islice(reader,1,None):
obs = line
obs_len = len(obs)
key=obs[0] #Get DMU names
DMU.append(key)
# Create dictionaries
try:
if (assign==False):
value[key]= [float(v) for v in obs[1:(obs_len)]]
elif (assign==True):
# Get specific lines as input or output data
value[key]= [float(v) for v in (list(obs[i] for i in vrange))]
except IOError :
print("which means your data include string not number")
return DMU, value
# TODO solve DEA_CRS models with LP technique by Gurobi Software (package)
def CRS(DMU, X, Y, orientation, dual):
I=len(X[DMU[0]])
O=len(Y[DMU[0]])
E={}# Efficiency
if (orientation=='input' and dual==False):
for r in DMU:
try:
# The decision variables
v,u={},{}
# Initialize LP model
m=Model("CRS_model")
m.setParam('OutputFlag', 0) # Muting the optimize function
# Add decision variables
for i in range(I):
v[r,i]=m.addVar(vtype=GRB.CONTINUOUS,name="v_%s%d"%(r,i))
for j in range(O):
u[r,j]=m.addVar(vtype=GRB.CONTINUOUS,name="u_%s%d"%(r,j))
m.update()
# Add objective function
m.setObjective(quicksum(u[r,j]*Y[r][j] for j in range(O)),GRB.MAXIMIZE)
# Add constraints
m.addConstr(quicksum(v[r,i]*X[r][i] for i in range(I))==1)
for k in DMU:
m.addConstr(quicksum(u[r,j]*Y[k][j] for j in range(O))-quicksum(v[r,i]*X[k][i] for i in range(I))<=0)
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,m.objVal)
except GurobiError:
print ('GurobiError reported')
# Print result
print (E[r])
elif (orientation=='input' and dual==True):
# TODO solve dual of input-oriented CRS DEA model with LP technique by Gurobi Software (package)
for r in DMU:
try:
# The decision variables
theta,λ={},{}
# Initialize LP model
m=Model("Dual_of_CRS_model")
m.setParam('OutputFlag',False) # Muting the optimize function
# Add decision variables
for k in DMU:
λ[k]=m.addVar(vtype=GRB.CONTINUOUS,name="λ_%s"%k)
theta[r]=m.addVar(vtype=GRB.CONTINUOUS,lb=-1000,name="theta_%s"%r)
m.update()
# Add objective function
m.setObjective(theta[r],GRB.MINIMIZE)
# Add constraints
for i in range(I):
m.addConstr(quicksum(λ[k]*X[k][i] for k in DMU)<= theta[r]*X[r][i])
for j in range(O):
m.addConstr(quicksum(λ[k]*Y[k][j] for k in DMU)>= Y[r][j])
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,m.objVal)
# for c in m.getConstrs():
# print ("The slack value of %s : %g"%(c.constrName,c.Slack))
# print(m.getAttr('slack', m.getConstrs()))
# print(m.getAttr('x', m.getVars()))
except GurobiError:
print ('GurobiError reported')
# Print efficiency
print (E[r])
elif(orientation=='output' and dual==False):
# TODO solve output-oriented DEA_CRS model with LP technique by Gurobi Software (package)
for r in DMU:
try:
# The decision variables
v,u={},{}
# Initialize LP model
m=Model("CRS_model")
m.setParam('OutputFlag', 0) # Muting the optimize function
# Add decision variables
for i in range(I):
v[r,i]=m.addVar(vtype=GRB.CONTINUOUS,name="v_%s%d"%(r,i))
for j in range(O):
u[r,j]=m.addVar(vtype=GRB.CONTINUOUS,name="u_%s%d"%(r,j))
m.update()
# Add objective function
m.setObjective(quicksum(v[r,i]*X[r][i] for i in range(I)),GRB.MINIMIZE)
# Add constraints
m.addConstr(quicksum(u[r,j]*Y[r][j] for j in range(O))==1)
for k in DMU:
m.addConstr(quicksum(v[r,i]*X[k][i] for i in range(I))-quicksum(u[r,j]*Y[k][j] for j in range(O))>=0)
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,1/m.objVal)
except GurobiError:
print ('GurobiError reported')
# Print result
print (E[r])
elif(orientation=='output' and dual==True):
# TODO solve dual of output-oriented CRS DEA model with LP technique by Gurobi Software (package)
for r in DMU:
try:
# The decision variables
theta, λ={}, {}
# Initialize LP model
m=Model("Dual_of_CRS_model")
m.setParam('OutputFlag',False) # Muting the optimize function
# Add decision variables
for k in DMU:
λ[k]=m.addVar(vtype=GRB.CONTINUOUS,name="λ_%s"%k)
theta[r]=m.addVar(vtype=GRB.CONTINUOUS,lb=-1000,name="theta_%s"%r)
m.update()
# Add objective function
m.setObjective(theta[r],GRB.MAXIMIZE)
# Add constraints
for j in range(O):
m.addConstr(quicksum(λ[k]*Y[k][j] for k in DMU)>= theta[r]*Y[r][j])
for i in range(I):
m.addConstr(quicksum(λ[k]*X[k][i] for k in DMU)<= X[r][i])
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,1/m.objVal)
# for c in m.getConstrs():
# print ("The slack value of %s : %g"%(c.constrName,c.Slack))
# print(m.getAttr('slack', m.getConstrs()))
# print(m.getAttr('x', λ))
except GurobiError:
print ('GurobiError reported')
# Print efficiency
print (E[r])
# TODO solve DEA_VRS models with LP technique by Gurobi Software (package)
def VRS(DMU, X, Y, orientation, dual):
I=len(X[DMU[0]])
O=len(Y[DMU[0]])
E={}
u0_v={}
if(orientation=="input" and dual==False):
for r in DMU:
try:
# Initialize LP model
m=Model("VRS_model")
m.setParam('OutputFlag',0) # Muting the optimize function
# The decision variable
v,u,u0={},{},{}
# Add decision variables
for i in range(I):
v[r,i]=m.addVar(vtype=GRB.CONTINUOUS,name="v_%s%d"%(r,i))
for j in range(O):
u[r,j]=m.addVar(vtype=GRB.CONTINUOUS,name="u_%s%d"%(r,j))
u0[r]=m.addVar(lb=-1000,vtype=GRB.CONTINUOUS,name="u0_%s"%r)
m.update()
# Add objective function
m.setObjective(quicksum(u[r,j]*Y[r][j] for j in range(O))-u0[r],GRB.MAXIMIZE)
# Add constraints
m.addConstr(quicksum(v[r,i]*X[r][i] for i in range(I))==1)
for k in DMU:
m.addConstr(quicksum(u[r,j]*Y[k][j] for j in range(O))-quicksum(v[r,i]*X[k][i] for i in range(I))-u0[r] <=0)
m.optimize()
# Print efficiency
E[r]="The efficiency of DMU %s:%0.3f"%(r,m.objVal)
print (E[r])
# if RTS_check==True:
# u0_v[r]='%s = %0.3f'%(u0[r].varName,u0[r].X)
# print(u0_v[r])
except GurobiError:
print ('GurobiError reported')
elif(orientation=="input" and dual==True):
# TODO solve dual of input-oriented VRS DEA model with LP technique by Gurobi Software (package)
for r in DMU:
try:
# The decision variables
theta, λ={}, {}
# Initialize LP model
m=Model("Dual_of_CRS_model")
m.setParam('OutputFlag',False) # Muting the optimize function
# Add decision variables
for k in DMU:
λ[k]=m.addVar(vtype=GRB.CONTINUOUS,name="λ_%s"%k)
theta[r]=m.addVar(vtype=GRB.CONTINUOUS,lb=-1000,name="theta_%s"%r)
m.update()
# Add objective function
m.setObjective(theta[r],GRB.MINIMIZE)
# Add constraints
for i in range(I):
m.addConstr(quicksum(λ[k]*X[k][i] for k in DMU)<= theta[r]*X[r][i])
for j in range(O):
m.addConstr(quicksum(λ[k]*Y[k][j] for k in DMU)>= Y[r][j])
m.addConstr(quicksum(λ[k] for k in DMU)==1,name='sum of λ')
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,m.objVal)
val=m.getAttr('X',λ)
# for c in m.getConstrs():
# print ("The slack value of %s : %g"%(c.constrName,c.Slack))
# print(m.getAttr('slack', m.getConstrs()))
# print(m.getAttr('x', m.getVars()))
except GurobiError:
print ('GurobiError reported')
# Print efficiency
print (E[r])
# TODO solve output-oriented DEA_VRS model with LP technique by Gurobi Software (package)
elif(orientation=="output" and dual==False):
v0_v={}
for r in DMU:
try:
# Initialize LP model
m=Model("VRS_output_model")
m.setParam('OutputFlag',0) # Muting the optimize function
# The decision variable
v,u,v0={},{},{}
# Add decision variables
for i in range(I):
v[r,i]=m.addVar(vtype=GRB.CONTINUOUS,name="v_%s%d"%(r,i))
for j in range(O):
u[r,j]=m.addVar(vtype=GRB.CONTINUOUS,name="u_%s%d"%(r,j))
v0[r]=m.addVar(lb=-1000,vtype=GRB.CONTINUOUS,name="v0_%s"%r)
m.update()
# Add objective function
m.setObjective(quicksum(v[r,i]*X[r][i] for i in range(I))+v0[r],GRB.MINIMIZE)
# Add constraints
m.addConstr(quicksum(u[r,j]*Y[r][j] for j in range(O))==1)
for k in DMU:
m.addConstr(quicksum(v[r,i]*X[k][i] for i in range(I))-quicksum(u[r,j]*Y[k][j] for j in range(O))+v0[r] >=0)
m.optimize()
# Print efficiency
E[r]="The efficiency of DMU %s:%0.3f"%(r,1/m.objVal)
print (E[r])
# if RTS_check==True:
# v0_v[r]='%s = %0.3f'%(v0[r].varName,v0[r].X)
# print(v0_v[r])
except GurobiError:
print ('GurobiError reported')
elif(orientation=="output" and dual==True):
# TODO solve dual of output-oriented VRS DEA model with LP technique by Gurobi Software (package)
for r in DMU:
try:
# The decision variables
theta, λ={}, {}
# Initialize LP model
m=Model("Dual_of_output-oriented_VRS_model")
m.setParam('OutputFlag',False) # Muting the optimize function
# Add decision variables
for k in DMU:
λ[k]=m.addVar(vtype=GRB.CONTINUOUS,name="λ_%s"%k)
theta[r]=m.addVar(vtype=GRB.CONTINUOUS,lb=-1000,name="theta_%s"%r)
m.update()
# Add objective function
m.setObjective(theta[r],GRB.MAXIMIZE)
# Add constraints
for j in range(O):
m.addConstr(quicksum(λ[k]*Y[k][j] for k in DMU)>= theta[r]*Y[r][j])
for i in range(I):
m.addConstr(quicksum(λ[k]*X[k][i] for k in DMU)<= X[r][i])
m.addConstr(quicksum(λ[k] for k in DMU)==1,name='sum of λ')
# Start optimize the formulation
m.optimize()
# Store the result
E[r]="The efficiency of DMU %s:%0.3f"%(r,1/m.objVal)
# for c in m.getConstrs():
# print ("The slack value of %s : %g"%(c.constrName,c.Slack))
# print(m.getAttr('slack', m.getConstrs()))
# print(m.getAttr('x', λ))
except GurobiError:
print ('GurobiError reported')
# Print efficiency
print (E[r])
| 37.690987 | 150 | 0.449442 |
acf34948df3ff6c9e6dc5e203082869aca6e003b | 2,741 | py | Python | generateassign.py | aabbcco/ssn-3d-pytorch | 3b5a1bb807ce751b03501772ed9da48ac7f9f30b | [
"MIT"
] | null | null | null | generateassign.py | aabbcco/ssn-3d-pytorch | 3b5a1bb807ce751b03501772ed9da48ac7f9f30b | [
"MIT"
] | null | null | null | generateassign.py | aabbcco/ssn-3d-pytorch | 3b5a1bb807ce751b03501772ed9da48ac7f9f30b | [
"MIT"
] | null | null | null | import os
import math
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import SSNModel
from lib.dataset import bsds
from lib.ssn.ssn import sparse_ssn_iter
from skimage.color import rgb2lab
@torch.no_grad()
def getQ(image, nspix, n_iter, fdim=None, color_scale=0.26, pos_scale=2.5, weight=None, enforce_connectivity=True):
if weight is not None:
from model import SSNModel
model = SSNModel(fdim, nspix, n_iter).to("cuda")
model.load_state_dict(torch.load(weight))
model.eval()
else:
def model(data): return sparse_ssn_iter(data, nspix, n_iter)
height, width = image.shape[:2]
nspix_per_axis = int(math.sqrt(nspix))
pos_scale = pos_scale * max(nspix_per_axis/height, nspix_per_axis/width)
coords = torch.stack(torch.meshgrid(torch.arange(
height, device="cuda"), torch.arange(width, device="cuda")), 0)
coords = coords[None].float()
image = rgb2lab(image)
image = torch.from_numpy(image).permute(2, 0, 1)[None].to("cuda").float()
inputs = torch.cat([color_scale*image, pos_scale*coords], 1)
Q, H, features = model(inputs)
labels = H.reshape(height, width).to("cpu").detach().numpy()
return Q, labels, features
if __name__ == "__main__":
import time
import argparse
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
parser = argparse.ArgumentParser()
parser.add_argument(
"--image", type=str, default='../BSR/BSDS500/data/images/val/3096.jpg', help="/path/to/image")
parser.add_argument("--weight", default=None, type=str,
help="/path/to/pretrained_weight")
parser.add_argument("--fdim", default=20, type=int,
help="embedding dimension")
parser.add_argument("--niter", default=10, type=int,
help="number of iterations for differentiable SLIC")
parser.add_argument("--nspix", default=100, type=int,
help="number of superpixels")
parser.add_argument("--color_scale", default=0.26, type=float)
parser.add_argument("--pos_scale", default=2.5, type=float)
args = parser.parse_args()
image = plt.imread(args.image)
s = time.time()
Q, label, features = getQ(
image, args.nspix, args.niter, args.fdim, args.color_scale, args.pos_scale, args.weight)
print(f"time {time.time() - s}sec")
Q = Q.detach().to('cpu').to_dense().numpy().squeeze(0)
features = features.detach().to('cpu').numpy().squeeze(0)
print(Q.shape)
print(label.shape)
np.savetxt('Q.csv',Q,delimiter=',')
np.savetxt('label.csv',label,delimiter=',')
np.savetxt('features.csv',features,delimiter=',')
| 35.141026 | 115 | 0.661802 |
acf34b829a4ab32deec732ce47a60d863dc2e700 | 8,153 | py | Python | molecule/scenario.py | dj-wasabi/molecule | 97a7b60fc47b2b83f3f772aafbf044294be168a6 | [
"MIT"
] | null | null | null | molecule/scenario.py | dj-wasabi/molecule | 97a7b60fc47b2b83f3f772aafbf044294be168a6 | [
"MIT"
] | null | null | null | molecule/scenario.py | dj-wasabi/molecule | 97a7b60fc47b2b83f3f772aafbf044294be168a6 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import shutil
import os
import fnmatch
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
from molecule import logger
from molecule import scenarios
from molecule import util
LOG = logger.get_logger(__name__)
class Scenario(object):
"""
A scenario allows Molecule test a role in a particular way, this is a
fundamental change from Molecule v1.
A scenario is a self-contained directory containing everything necessary
for testing the role in a particular way. The default scenario is named
``default``, and every role should contain a default scenario.
Unless mentioned explicitly, the scenario name will be the directory name
hosting the files.
Any option set in this section will override the defaults.
.. code-block:: yaml
scenario:
name: default # optional
create_sequence:
- dependency
- create
- prepare
check_sequence:
- dependency
- cleanup
- destroy
- create
- prepare
- converge
- check
- destroy
converge_sequence:
- dependency
- create
- prepare
- converge
destroy_sequence:
- dependency
- cleanup
- destroy
test_sequence:
- dependency
- lint
- cleanup
- destroy
- syntax
- create
- prepare
- converge
- idempotence
- side_effect
- verify
- cleanup
- destroy
""" # noqa
def __init__(self, config):
"""
Initialize a new scenario class and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
self.config = config
self._setup()
def _remove_scenario_state_directory(self):
"""Remove scenario cached disk stored state.
:return: None
"""
directory = str(Path(self.ephemeral_directory).parent)
LOG.info('Removing {}'.format(directory))
shutil.rmtree(directory)
def prune(self):
"""
Prune the scenario ephemeral directory files and returns None.
"safe files" will not be pruned, including the ansible configuration
and inventory used by this scenario, the scenario state file, and
files declared as "safe_files" in the ``driver`` configuration
declared in ``molecule.yml``.
:return: None
"""
LOG.info('Pruning extra files from scenario ephemeral directory')
safe_files = [
self.config.provisioner.config_file,
self.config.provisioner.inventory_file,
self.config.state.state_file,
] + self.config.driver.safe_files
files = util.os_walk(self.ephemeral_directory, '*')
for f in files:
if not any(sf for sf in safe_files if fnmatch.fnmatch(f, sf)):
os.remove(f)
# Remove empty directories.
for dirpath, dirs, files in os.walk(self.ephemeral_directory, topdown=False):
if not dirs and not files:
os.removedirs(dirpath)
@property
def name(self):
return self.config.config['scenario']['name']
@property
def directory(self):
if self.config.molecule_file:
return os.path.dirname(self.config.molecule_file)
else:
return os.getcwd()
@property
def ephemeral_directory(self):
_ephemeral_directory = os.getenv('MOLECULE_EPHEMERAL_DIRECTORY')
if _ephemeral_directory:
return _ephemeral_directory
project_directory = os.path.basename(self.config.project_directory)
if self.config.is_parallel:
project_directory = '{}-{}'.format(project_directory, self.config._run_uuid)
project_scenario_directory = os.path.join(
self.config.cache_directory, project_directory, self.name
)
path = ephemeral_directory(project_scenario_directory)
return ephemeral_directory(path)
@property
def inventory_directory(self):
return os.path.join(self.ephemeral_directory, "inventory")
@property
def check_sequence(self):
return self.config.config['scenario']['check_sequence']
@property
def cleanup_sequence(self):
return self.config.config['scenario']['cleanup_sequence']
@property
def converge_sequence(self):
return self.config.config['scenario']['converge_sequence']
@property
def create_sequence(self):
return self.config.config['scenario']['create_sequence']
@property
def dependency_sequence(self):
return ['dependency']
@property
def destroy_sequence(self):
return self.config.config['scenario']['destroy_sequence']
@property
def idempotence_sequence(self):
return ['idempotence']
@property
def lint_sequence(self):
# see https://github.com/ansible/molecule/issues/2216
return ['dependency', 'lint']
@property
def prepare_sequence(self):
return ['prepare']
@property
def side_effect_sequence(self):
return ['side_effect']
@property
def syntax_sequence(self):
return ['syntax']
@property
def test_sequence(self):
return self.config.config['scenario']['test_sequence']
@property
def verify_sequence(self):
return ['verify']
@property
def sequence(self):
"""
Select the sequence based on scenario and subcommand of the provided
scenario object and returns a list.
:param scenario: A scenario object.
:param skipped: An optional bool to include skipped scenarios.
:return: list
"""
s = scenarios.Scenarios([self.config])
matrix = s._get_matrix()
try:
return matrix[self.name][self.config.subcommand]
except KeyError:
# TODO(retr0h): May change this handling in the future.
return []
def _setup(self):
"""
Prepare the scenario for Molecule and returns None.
:return: None
"""
if not os.path.isdir(self.inventory_directory):
os.makedirs(self.inventory_directory)
def ephemeral_directory(path=None):
"""
Returns temporary directory to be used by molecule. Molecule users should
not make any assumptions about its location, permissions or its content as
this may change in future release.
"""
d = os.getenv('MOLECULE_EPHEMERAL_DIRECTORY')
if not d:
d = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
d = os.path.abspath(os.path.join(d, path if path else 'molecule'))
if not os.path.isdir(d):
os.umask(0o077)
Path(d).mkdir(mode=0o700, parents=True, exist_ok=True)
return d
| 30.084871 | 88 | 0.635104 |
acf34c8e85641a2e1fc802a4ebb1d0237b83ec96 | 5,426 | py | Python | tools/n64cksum.py | zestydevy/dinosaur-planet-precomp | 783f03afd6c210755fc1b81cb2622cfd0d5a58c7 | [
"Unlicense"
] | 3 | 2021-05-27T01:32:39.000Z | 2021-12-15T14:35:05.000Z | tools/n64cksum.py | zestydevy/dinosaur-planet-precomp | 783f03afd6c210755fc1b81cb2622cfd0d5a58c7 | [
"Unlicense"
] | null | null | null | tools/n64cksum.py | zestydevy/dinosaur-planet-precomp | 783f03afd6c210755fc1b81cb2622cfd0d5a58c7 | [
"Unlicense"
] | 1 | 2022-01-07T21:18:11.000Z | 2022-01-07T21:18:11.000Z | #!/usr/bin/env python3
import sys
import struct
# Original code from: https://gist.github.com/dkosmari/ee7bb471ea12c21b008d0ecffebd6384
# Modified to not print
# tool to recalculate N64 rom checksums
# reference code:
# https://github.com/queueRAM/sm64tools/blob/master/n64cksum.c
# https://github.com/queueRAM/sm64tools/blob/master/libsm64.c
mask32 = 0xffffffff
def read_u32_be(buffer : bytearray, offset):
return struct.unpack_from(">I", buffer, offset)[0]
def write_u32_be(buffer : bytearray, offset, value):
struct.pack_into(">I", buffer, offset, value)
def sm64_calc_checksums(buf : bytearray):
#local t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
#local s0, s6
#local a0, a1, a2, a3, at
#local lo
#local v0, v1
#local ra
# derived from the SM64 boot code
s6 = 0x3f
a0 = 0x1000 # 59c: 8d640008 lw a0,8(t3)
a1 = s6 # 5a0: 02c02825 move a1,s6
at = 0x5d588b65 # 5a4: 3c015d58 lui at,0x5d58
# 5a8: 34218b65 ori at,at,0x8b65
lo = (a1 * at) & mask32 # 5ac: 00a10019 multu a1,at 16 F8CA 4DDB
ra = 0x100000 # 5bc: 3c1f0010 lui ra,0x10
v1 = 0 # 5c0: 00001825 move v1,zero
t0 = 0 # 5c4: 00004025 move t0,zero
t1 = a0 # 5c8: 00804825 move t1,a0
t5 = 32 # 5cc: 240d0020 li t5,32
v0 = lo # 5d0: 00001012 mflo v0
v0 = (v0 + 1) & mask32 # 5d4: 24420001 addiu v0,v0,1
a3 = v0 # 5d8: 00403825 move a3,v0
t2 = v0 # 5dc: 00405025 move t2,v0
t3 = v0 # 5e0: 00405825 move t3,v0
s0 = v0 # 5e4: 00408025 move s0,v0
a2 = v0 # 5e8: 00403025 move a2,v0
t4 = v0 # 5ec: 00406025 move t4,v0
while t0 != ra:
v0 = read_u32_be(buf, t1) # 5f0: 8d220000 lw v0,0(t1)
v1 = (a3 + v0) & mask32 # 5f4: 00e21821 addu v1,a3,v0
at = v1 < a3 # 5f8: 0067082b sltu at,v1,a3
a1 = v1 # 600: 00602825 move a1,v1 branch delay slot
if at: # 5fc: 10200002 beqz at,0x608
t2 = (t2 + 1) & mask32 # 604: 254a0001 addiu t2,t2,1
v1 = v0 & 0x1F # 608: 3043001f andi v1,v0,0x1f
t7 = (t5 - v1) & mask32 # 60c: 01a37823 subu t7,t5,v1
t8 = v0 >> t7 # 610: 01e2c006 srlv t8,v0,t7
t6 = (v0 << v1) & mask32 # 614: 00627004 sllv t6,v0,v1
a0 = t6 | t8 # 618: 01d82025 or a0,t6,t8
at = a2 < v0 # 61c: 00c2082b sltu at,a2,v0
a3 = a1 # 620: 00a03825 move a3,a1
t3 = (t3 ^ v0) & mask32 # 624: 01625826 xor t3,t3,v0
s0 = (s0 + a0) & mask32 # 62c: 02048021 addu s0,s0,a0 branch delay slot
if at: # 628: 10200004 beqz at,0x63c
t9 = (a3 ^ v0) & mask32 # 630: 00e2c826 xor t9,a3,v0
# 634: 10000002 b 0x640
a2 = (a2 ^ t9) & mask32 # 638: 03263026 xor a2,t9,a2 branch delay
else:
a2 = (a2 ^ a0) & mask32 # 63c: 00c43026 xor a2,a2,a0
t0 += 4 # 640: 25080004 addiu t0,t0,4
t7 = (v0 ^ s0) & mask32 # 644: 00507826 xor t7,v0,s0
t1 += 4 # 648: 25290004 addiu t1,t1,4
t4 = (t4 + t7) & mask32 # 650: 01ec6021 addu t4,t7,t4 branch delay
# 64c: 151fffe8 bne t0,ra,0x5f0
t6 = (a3 ^ t2) & mask32 # 654: 00ea7026 xor t6,a3,t2
a3 = (t6 ^ t3) & mask32 # 658: 01cb3826 xor a3,t6,t3
t8 = (s0 ^ a2) & mask32 # 65c: 0206c026 xor t8,s0,a2
s0 = (t8 ^ t4) & mask32 # 660: 030c8026 xor s0,t8,t4
return a3, s0
def sm64_update_checksums(buf: bytearray):
cksum_offsets = [0x10, 0x14]
read_cksum = [0, 0]
calc_cksum = [0, 0]
# assume CIC-NUS-6102
# print("BootChip: CIC-NUS-6102");
# calculate new N64 header checksum
calc_cksum = sm64_calc_checksums(buf)
# mimic the n64sums output
for i in range(2):
read_cksum[i] = read_u32_be(buf, cksum_offsets[i])
# print("CRC{}: 0x{:08X} ".format(i+1, read_cksum[i]), end="")
# print("Calculated: 0x{:08X} ".format(calc_cksum[i]), end="")
# write checksums into header
# print("Writing back calculated Checksum")
write_u32_be(buf, cksum_offsets[0], calc_cksum[0])
write_u32_be(buf, cksum_offsets[1], calc_cksum[1])
def print_usage():
print("TODO: show help")
def read_file(fname):
with open(fname, "rb") as f:
return bytearray(f.read())
def write_file(fname, data):
with open(fname, "wb") as f:
f.write(data)
if len(sys.argv) < 2:
print_usage()
sys.exit(1)
input_name = sys.argv[1]
output_name = input_name
if len(sys.argv) > 2:
output_name = sys.argv[2]
rom_data = read_file(input_name)
sm64_update_checksums(rom_data)
write_file(output_name, rom_data) | 36.662162 | 87 | 0.505713 |
acf34d034448f48c943438217465bbcd7746c39e | 2,606 | py | Python | jacket/objects/compute/volume_usage.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/objects/compute/volume_usage.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | null | null | null | jacket/objects/compute/volume_usage.py | bopopescu/jacket | d7ad3147fcb43131098c2a5210847634ff5fb325 | [
"Apache-2.0"
] | 2 | 2016-08-10T02:21:49.000Z | 2020-07-24T01:57:21.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.db import compute as db
from jacket.objects.compute import base
from jacket.objects.compute import fields
@base.NovaObjectRegistry.register
class VolumeUsage(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'volume_id': fields.UUIDField(),
'instance_uuid': fields.UUIDField(nullable=True),
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'tot_last_refreshed': fields.DateTimeField(nullable=True,
read_only=True),
'tot_reads': fields.IntegerField(read_only=True),
'tot_read_bytes': fields.IntegerField(read_only=True),
'tot_writes': fields.IntegerField(read_only=True),
'tot_write_bytes': fields.IntegerField(read_only=True),
'curr_last_refreshed': fields.DateTimeField(nullable=True,
read_only=True),
'curr_reads': fields.IntegerField(),
'curr_read_bytes': fields.IntegerField(),
'curr_writes': fields.IntegerField(),
'curr_write_bytes': fields.IntegerField()
}
@staticmethod
def _from_db_object(context, vol_usage, db_vol_usage):
for field in vol_usage.fields:
setattr(vol_usage, field, db_vol_usage[field])
vol_usage._context = context
vol_usage.obj_reset_changes()
return vol_usage
@base.remotable
def save(self, update_totals=False):
db_vol_usage = db.vol_usage_update(
self._context, self.volume_id, self.curr_reads,
self.curr_read_bytes, self.curr_writes, self.curr_write_bytes,
self.instance_uuid, self.project_id, self.user_id,
self.availability_zone, update_totals=update_totals)
self._from_db_object(self._context, self, db_vol_usage)
| 43.433333 | 78 | 0.676516 |
acf34d081634fc5f5112df5144ad9a408e2466ef | 12,970 | py | Python | scripts/openstack.py | shrek-github/teuthology | 5a89fb217aaf2cd7948f7419f431230ab25053c6 | [
"MIT"
] | null | null | null | scripts/openstack.py | shrek-github/teuthology | 5a89fb217aaf2cd7948f7419f431230ab25053c6 | [
"MIT"
] | 1 | 2020-03-05T03:00:08.000Z | 2020-03-05T03:00:08.000Z | scripts/openstack.py | shrek-github/teuthology | 5a89fb217aaf2cd7948f7419f431230ab25053c6 | [
"MIT"
] | 1 | 2020-03-04T03:04:06.000Z | 2020-03-04T03:04:06.000Z | import argparse
import sys
import os
import teuthology.openstack
def main(argv=sys.argv[1:]):
sys.exit(teuthology.openstack.main(parse_args(argv), argv))
def get_key_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--key-name',
help='OpenStack keypair name',
)
parser.add_argument(
'--key-filename',
help='path to the ssh private key. Default: %(default)s',
default=[
os.environ['HOME'] + '/.ssh/id_rsa',
os.environ['HOME'] + '/.ssh/id_dsa',
os.environ['HOME'] + '/.ssh/id_ecdsa'
]
)
return parser
def get_suite_parser():
parser = argparse.ArgumentParser()
# copy/pasted from scripts/suite.py
parser.add_argument(
'config_yaml',
nargs='*',
help='Optional extra job yaml to include',
)
parser.add_argument(
'-v', '--verbose',
action='store_true', default=None,
help='be more verbose',
)
parser.add_argument(
'--dry-run',
action='store_true', default=None,
help='Do a dry run; do not schedule anything',
)
parser.add_argument(
'-s', '--suite',
help='The suite to schedule',
)
parser.add_argument(
'-c', '--ceph',
help='The ceph branch to run against',
default=os.getenv('TEUTH_CEPH_BRANCH', 'master'),
)
parser.add_argument(
'-k', '--kernel',
help=('The kernel branch to run against; if not '
'supplied, the installed kernel is unchanged'),
)
parser.add_argument(
'-f', '--flavor',
help=("The kernel flavor to run against: ('basic',"
"'gcov', 'notcmalloc')"),
default='basic',
)
parser.add_argument(
'-d', '--distro',
help='Distribution to run against',
)
parser.add_argument(
'--suite-branch',
help='Use this suite branch instead of the ceph branch',
default=os.getenv('TEUTH_SUITE_BRANCH', 'master'),
)
parser.add_argument(
'-e', '--email',
help='When tests finish or time out, send an email here',
)
parser.add_argument(
'-N', '--num',
help='Number of times to run/queue the job',
type=int,
default=1,
)
parser.add_argument(
'-l', '--limit',
metavar='JOBS',
help='Queue at most this many jobs',
type=int,
)
parser.add_argument(
'--subset',
help=('Instead of scheduling the entire suite, break the '
'set of jobs into <outof> pieces (each of which will '
'contain each facet at least once) and schedule '
'piece <index>. Scheduling 0/<outof>, 1/<outof>, '
'2/<outof> ... <outof>-1/<outof> will schedule all '
'jobs in the suite (many more than once).')
)
parser.add_argument(
'-p', '--priority',
help='Job priority (lower is sooner)',
type=int,
default=1000,
)
parser.add_argument(
'--timeout',
help=('How long, in seconds, to wait for jobs to finish '
'before sending email. This does not kill jobs.'),
type=int,
default=43200,
)
parser.add_argument(
'--filter',
help=('Only run jobs whose description contains at least one '
'of the keywords in the comma separated keyword '
'string specified. ')
)
parser.add_argument(
'--filter-out',
help=('Do not run jobs whose description contains any of '
'the keywords in the comma separated keyword '
'string specified. ')
)
parser.add_argument(
'--throttle',
help=('When scheduling, wait SLEEP seconds between jobs. '
'Useful to avoid bursts that may be too hard on '
'the underlying infrastructure or exceed OpenStack API '
'limits (server creation per minute for instance).'),
type=int,
default=15,
)
parser.add_argument(
'--suite-relpath',
help=('Look for tasks and suite definitions in this'
'subdirectory of the suite repo.'),
)
parser.add_argument(
'-r', '--rerun',
help=('Attempt to reschedule a run, selecting only those'
'jobs whose status are mentioned by'
'--rerun-status.'
'Note that this is implemented by scheduling an'
'entirely new suite and including only jobs whose'
'descriptions match the selected ones. It does so'
'using the same logic as --filter.'
'Of all the flags that were passed when scheduling'
'the original run, the resulting one will only'
'inherit the suite value. Any others must be'
'passed as normal while scheduling with this'
'feature.'),
)
parser.add_argument(
'-R', '--rerun-statuses',
help=("A comma-separated list of statuses to be used"
"with --rerun. Supported statuses are: 'dead',"
"'fail', 'pass', 'queued', 'running', 'waiting'"),
default='fail,dead',
)
parser.add_argument(
'-D', '--distroversion', '--distro-version',
help='Distro version to run against',
)
parser.add_argument(
'-n', '--newest',
help=('Search for the newest revision built on all'
'required distro/versions, starting from'
'either --ceph or --sha1, backtracking'
'up to <newest> commits'),
type=int,
default=0,
)
parser.add_argument(
'-S', '--sha1',
help=('The ceph sha1 to run against (overrides -c)'
'If both -S and -c are supplied, -S wins, and'
'there is no validation that sha1 is contained'
'in branch')
)
parser.add_argument(
'--ceph-repo',
help=("Query this repository for Ceph branch and SHA1"),
default=os.getenv('TEUTH_CEPH_REPO', 'https://github.com/ceph/ceph'),
)
parser.add_argument(
'--suite-repo',
help=("Use tasks and suite definition in this repository"),
default=os.getenv('TEUTH_SUITE_REPO', 'https://github.com/ceph/ceph'),
)
return parser
def get_openstack_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--wait',
action='store_true', default=None,
help='block until the suite is finished',
)
parser.add_argument(
'--name',
help='OpenStack primary instance name',
default='teuthology',
)
parser.add_argument(
'--nameserver',
help='nameserver ip address (optional)',
)
parser.add_argument(
'--simultaneous-jobs',
help='maximum number of jobs running in parallel',
type=int,
default=1,
)
parser.add_argument(
'--controller-cpus',
help='override default minimum vCPUs when selecting flavor for teuthology VM',
type=int,
default=0,
)
parser.add_argument(
'--controller-ram',
help='override default minimum RAM (in megabytes) when selecting flavor for teuthology VM',
type=int,
default=0,
)
parser.add_argument(
'--controller-disk',
help='override default minimum disk size (in gigabytes) when selecting flavor for teuthology VM',
type=int,
default=0,
)
parser.add_argument(
'--setup',
action='store_true', default=False,
help='deploy the cluster, if it does not exist',
)
parser.add_argument(
'--teardown',
action='store_true', default=None,
help='destroy the cluster, if it exists',
)
parser.add_argument(
'--teuthology-git-url',
help="git clone url for teuthology",
default=os.getenv('TEUTH_REPO', 'https://github.com/ceph/teuthology'),
)
parser.add_argument(
'--teuthology-branch',
help="use this teuthology branch instead of master",
default=os.getenv('TEUTH_BRANCH', 'master'),
)
parser.add_argument(
'--ceph-workbench-git-url',
help="git clone url for ceph-workbench",
)
parser.add_argument(
'--ceph-workbench-branch',
help="use this ceph-workbench branch instead of master",
default='master',
)
parser.add_argument(
'--upload',
action='store_true', default=False,
help='upload archives to an rsync server',
)
parser.add_argument(
'--archive-upload',
help='rsync destination to upload archives',
default='ubuntu@teuthology-logs.public.ceph.com:./',
)
parser.add_argument(
'--archive-upload-url',
help='Public facing URL where archives are uploaded',
default='http://teuthology-logs.public.ceph.com',
)
parser.add_argument(
'--test-repo',
action='append',
help=('Package repository to be added on test nodes, which are specified '
'as NAME:URL, NAME!PRIORITY:URL or @FILENAME, for details see below.'),
default=None,
)
parser.add_argument(
'--no-canonical-tags',
action='store_true', default=False,
help='configure remote teuthology to not fetch tags from http://github.com/ceph/ceph.git in buildpackages task',
)
return parser
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[
get_suite_parser(),
get_key_parser(),
get_openstack_parser(),
],
conflict_handler='resolve',
add_help=False,
epilog="""test repos:
Test repository can be specified using --test-repo optional argument
with value in the following formats: NAME:URL, NAME!PRIORITY:URL
or @FILENAME. See examples:
1) Essential usage requires to provide repo name and url:
--test-repo foo:http://example.com/repo/foo
2) Repo can be prioritized by adding a number after '!' symbol
in the name:
--test-repo 'bar!10:http://example.com/repo/bar'
3) Repo data can be taken from a file by simply adding '@' symbol
at the beginning argument value, for example from yaml:
--test-repo @path/to/foo.yaml
where `foo.yaml` contains one or more records like:
- name: foo
priority: 1
url: http://example.com/repo/foo
4) Or from json file:
--test-repo @path/to/foo.json
where `foo.json` content is:
[{"name":"foo","priority":1,"url":"http://example.com/repo/foo"}]
Several repos can be provided with multiple usage of --test-repo and/or
you can provide several repos within one yaml or json file.
The repositories are added in the order they appear in the command line or
in the file. Example:
---
# The foo0 repo will be included first, after all that have any priority,
# in particular after foo1 because it has lowest priority
- name: foo0
url: http://example.com/repo/foo0
# The foo1 will go after foo2 because it has lower priority then foo2
- name: foo1
url: http://example.com/repo/foo1
priority: 2
# The foo2 will go first because it has highest priority
- name: foo2
url: http://example.com/repo/foo2
priority: 1
# The foo3 will go after foo0 because it appears after it in this file
- name: foo3
url: http://example.com/repo/foo3
Equivalent json file content below:
[
{
"name": "foo0",
"url": "http://example.com/repo/foo0"
},
{
"name": "foo1",
"url": "http://example.com/repo/foo1",
"priority": 2
},
{
"name": "foo2",
"url": "http://example.com/repo/foo2",
"priority": 1
},
{
"name": "foo3",
"url": "http://example.com/repo/foo3"
}
]
At the moment supported only files with extensions: .yaml, .yml, .json, .jsn.
teuthology-openstack %s
""" % teuthology.__version__,
description="""
Run a suite of ceph integration tests. A suite is a directory containing
facets. A facet is a directory containing config snippets. Running a suite
means running teuthology for every configuration combination generated by
taking one config snippet from each facet. Any config files passed on the
command line will be used for every combination, and will override anything in
the suite. By specifying a subdirectory in the suite argument, it is possible
to limit the run to a specific facet. For instance -s upgrade/dumpling-x only
runs the dumpling-x facet of the upgrade suite.
Display the http and ssh access to follow the progress of the suite
and analyze results.
firefox http://183.84.234.3:8081/
ssh -i teuthology-admin.pem ubuntu@183.84.234.3
""")
return parser
def parse_args(argv):
return get_parser().parse_args(argv)
| 32.024691 | 120 | 0.599306 |
acf34d5e8c9f2901c2ea0684474a0527d5d3846c | 16,891 | py | Python | lib/rucio/client/replicaclient.py | sahandilshan/rucio | 253c2f12f6e99e5eca5f59c6538e5a2bcd5f7c48 | [
"Apache-2.0"
] | null | null | null | lib/rucio/client/replicaclient.py | sahandilshan/rucio | 253c2f12f6e99e5eca5f59c6538e5a2bcd5f7c48 | [
"Apache-2.0"
] | null | null | null | lib/rucio/client/replicaclient.py | sahandilshan/rucio | 253c2f12f6e99e5eca5f59c6538e5a2bcd5f7c48 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2014-2018
# - Ralph Vigne <ralph.vigne@cern.ch>, 2015
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Martin Barisits <martin.barisits@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019
# - Ilija Vukotic <ivukotic@cern.ch>, 2020
# - Luc Goossens <luc.goossens@cern.ch>, 2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Eric Vaandering <ewv@fnal.gov>, 2020
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
from datetime import datetime
from json import dumps, loads
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url, render_json
class ReplicaClient(BaseClient):
"""Replica client class for working with replicas"""
REPLICAS_BASEURL = 'replicas'
def __init__(self, rucio_host=None, auth_host=None, account=None, ca_cert=None, auth_type=None, creds=None, timeout=600, user_agent='rucio-clients', vo=None):
super(ReplicaClient, self).__init__(rucio_host, auth_host, account, ca_cert, auth_type, creds, timeout, user_agent, vo=vo)
def declare_bad_file_replicas(self, pfns, reason):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
"""
data = {'reason': reason, 'pfns': pfns}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad']))
headers = {}
r = self._send_request(url, headers=headers, type='POST', data=dumps(data))
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def declare_bad_did_replicas(self, rse, dids, reason):
"""
Declare a list of bad replicas.
:param rse: The RSE where the bad replicas reside
:param dids: The DIDs of the bad replicas
:param reason: The reason of the loss.
"""
data = {'reason': reason, 'rse': rse, 'dids': dids}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad/dids']))
headers = {}
r = self._send_request(url, headers=headers, type='POST', data=dumps(data))
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def declare_suspicious_file_replicas(self, pfns, reason):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
"""
data = {'reason': reason, 'pfns': pfns}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'suspicious']))
headers = {}
r = self._send_request(url, headers=headers, type='POST', data=dumps(data))
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_did_from_pfns(self, pfns, rse=None):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse: The RSE name.
:returns: A list of dictionaries {pfn: {'scope': scope, 'name': name}}
"""
data = {'rse': rse, 'pfns': pfns}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'dids']))
headers = {}
r = self._send_request(url, headers=headers, type='POST', data=dumps(data))
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_replicas(self, dids, schemes=None, unavailable=False,
all_states=False, metalink=False, rse_expression=None,
client_location=None, sort=None, domain=None,
signature_lifetime=None,
resolve_archives=True, resolve_parents=False,
updated_after=None):
"""
List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs) like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: Also include unavailable replicas in the list.
:param metalink: ``False`` (default) retrieves as JSON,
``True`` retrieves as metalink4+xml.
:param rse_expression: The RSE expression to restrict replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site'}
:param sort: Sort the replicas: ``geoip`` - based on src/dst IP topographical distance
``closeness`` - based on src/dst closeness
``dynamic`` - Rucio Dynamic Smart Sort (tm)
:param domain: Define the domain. None is fallback to 'wan', otherwise 'wan, 'lan', or 'all'
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to True, find archives which contain the replicas.
:param resolve_parents: When set to True, find all parent datasets which contain the replicas.
:param updated_after: epoch timestamp or datetime object (UTC time), only return replicas updated after this time
:returns: A list of dictionaries with replica information.
"""
data = {'dids': dids,
'domain': domain}
if schemes:
data['schemes'] = schemes
if unavailable:
data['unavailable'] = True
data['all_states'] = all_states
if rse_expression:
data['rse_expression'] = rse_expression
if client_location:
data['client_location'] = client_location
if sort:
data['sort'] = sort
if updated_after:
if isinstance(updated_after, datetime):
# encode in UTC string with format '%Y-%m-%dT%H:%M:%S' e.g. '2020-03-02T12:01:38'
data['updated_after'] = updated_after.strftime('%Y-%m-%dT%H:%M:%S')
else:
data['updated_after'] = updated_after
if signature_lifetime:
data['signature_lifetime'] = signature_lifetime
data['resolve_archives'] = resolve_archives
data['resolve_parents'] = resolve_parents
url = build_url(choice(self.list_hosts),
path='/'.join([self.REPLICAS_BASEURL, 'list']))
headers = {}
if metalink:
headers['Accept'] = 'application/metalink4+xml'
# pass json dict in querystring
r = self._send_request(url, headers=headers, type='POST', data=dumps(data), stream=True)
if r.status_code == codes.ok:
if not metalink:
return self._load_json_data(r)
return r.text
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_replica(self, rse, scope, name, bytes, adler32, pfn=None, md5=None, meta={}):
"""
Add file replicas to a RSE.
:param rse: the RSE name.
:param scope: The scope of the file.
:param name: The name of the file.
:param bytes: The size in bytes.
:param adler32: adler32 checksum.
:param pfn: PFN of the file for non deterministic RSE.
:param md5: md5 checksum.
:param meta: Metadata attributes.
:return: True if files were created successfully.
"""
dict = {'scope': scope, 'name': name, 'bytes': bytes, 'meta': meta, 'adler32': adler32}
if md5:
dict['md5'] = md5
if pfn:
dict['pfn'] = pfn
return self.add_replicas(rse=rse, files=[dict])
def add_replicas(self, rse, files, ignore_availability=True):
"""
Bulk add file replicas to a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param ignore_availability: Ignore the RSE blacklisting.
:return: True if files were created successfully.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files, 'ignore_availability': ignore_availability}
r = self._send_request(url, type='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_replicas(self, rse, files, ignore_availability=True):
"""
Bulk delete file replicas from a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>}, {'scope': <scope2>, 'name': <name2>}, ...]
:param ignore_availability: Ignore the RSE blacklisting.
:return: True if files have been deleted successfully.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files, 'ignore_availability': ignore_availability}
r = self._send_request(url, type='DEL', data=render_json(**data))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def update_replicas_states(self, rse, files):
"""
Bulk update the file replicas states from a RSE.
:param rse: the RSE name.
:param files: The list of files. This is a list of DIDs like :
[{'scope': <scope1>, 'name': <name1>, 'state': <state1>}, {'scope': <scope2>, 'name': <name2>, 'state': <state2>}, ...],
where a state value can be either of:
'A' (available)
'S' (suspicious)
'U' (unavailable)
'R' (recovered)
'B' (bad)
'L' (lost)
'D' (deleted)
:return: True if replica states have been updated successfully, otherwise an exception is raised.
"""
url = build_url(choice(self.list_hosts), path=self.REPLICAS_BASEURL)
data = {'rse': rse, 'files': files}
r = self._send_request(url, type='PUT', data=render_json(**data))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas(self, scope, name, deep=False):
"""
List dataset replicas for a did (scope:name).
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:returns: A list of dict dataset replicas.
"""
payload = {}
if deep:
payload = {'deep': True}
url = build_url(self.host,
path='/'.join([self.REPLICAS_BASEURL, quote_plus(scope), quote_plus(name), 'datasets']),
params=payload)
r = self._send_request(url, type='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas_bulk(self, dids):
"""
List dataset replicas for a did (scope:name).
:param dids: The list of DIDs of the datasets.
:returns: A list of dict dataset replicas.
"""
payload = {'dids': list(dids)}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'datasets_bulk']))
r = self._send_request(url, type='POST', data=dumps(payload))
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dataset_replicas_vp(self, scope, name, deep=False):
"""
List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:returns: If VP exists a list of dicts of sites
"""
payload = {}
if deep:
payload = {'deep': True}
url = build_url(self.host,
path='/'.join([self.REPLICAS_BASEURL, quote_plus(scope), quote_plus(name), 'datasets_vp']),
params=payload)
r = self._send_request(url, type='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_datasets_per_rse(self, rse, filters=None, limit=None):
"""
List datasets at a RSE.
:param rse: the rse name.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:returns: A list of dict dataset replicas.
"""
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'rse', rse]))
r = self._send_request(url, type='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_bad_pfns(self, pfns, reason, state, expires_at):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param state: The state of the replica. Either BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:return: True if PFNs were created successfully.
"""
data = {'reason': reason, 'pfns': pfns, 'state': state, 'expires_at': expires_at}
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'bad/pfns']))
headers = {}
r = self._send_request(url, headers=headers, type='POST', data=dumps(data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_tombstone(self, replicas):
"""
Set a tombstone on a list of replicas.
:param replicas: list of replicas.
"""
url = build_url(self.host, path='/'.join([self.REPLICAS_BASEURL, 'tombstone']))
data = {'replicas': replicas}
r = self._send_request(url, type='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
| 41.809406 | 162 | 0.620982 |
acf34dbd4499ff95c660904165fd776f3dc17fad | 2,039 | py | Python | payment_system/urls.py | Iva-khar/Data_converter | 9e991f479e2bf1c2ab430a9ad1be2da936ea139b | [
"MIT"
] | null | null | null | payment_system/urls.py | Iva-khar/Data_converter | 9e991f479e2bf1c2ab430a9ad1be2da936ea139b | [
"MIT"
] | null | null | null | payment_system/urls.py | Iva-khar/Data_converter | 9e991f479e2bf1c2ab430a9ad1be2da936ea139b | [
"MIT"
] | null | null | null | from django.urls import path
from payment_system.views import (
ProjectCreateView,
ProjectUpdateView,
ProjectListForUserView,
ProjectRetrieveView,
ProjectRefreshTokenView,
ProjectDisableView,
ProjectActivateView,
ProjectSubscriptionCreateView,
ProjectSubscriptionDisableView,
SubscriptionsListView,
ProjectDeactivateUserView,
ProjectActivateUserView,
InvoiceListView,
InvoiceRetrieveView,
ProjectInviteUserView,
ProjectUserConfirmInviteView,
InvitationListView,
ProjectCancelInviteView,
ProjectUserRejectInviteView,
)
urlpatterns = [
# project urls
path('project/create/', ProjectCreateView.as_view()),
path('project/<int:pk>/update/', ProjectUpdateView.as_view()),
path('project/<int:pk>/refresh-token/', ProjectRefreshTokenView.as_view()),
path('project/<int:pk>/disable/', ProjectDisableView.as_view()),
path('project/<int:pk>/activate/', ProjectActivateView.as_view()),
path('project/<int:pk>/deactivate-user/<int:user_id>/', ProjectDeactivateUserView.as_view()),
path('project/<int:pk>/activate-user/<int:user_id>/', ProjectActivateUserView.as_view()),
path('project/<int:pk>/invite/', ProjectInviteUserView.as_view()),
path('project/<int:pk>/cancel-invite/<int:invite_id>/', ProjectCancelInviteView.as_view()),
path('project/<int:pk>/confirm-invite/', ProjectUserConfirmInviteView.as_view()),
path('project/<int:pk>/reject-invite/', ProjectUserRejectInviteView.as_view()),
path('project/<int:pk>/', ProjectRetrieveView.as_view()),
path('project/', ProjectListForUserView.as_view()),
path('invitations/', InvitationListView.as_view()),
path('subscriptions/', SubscriptionsListView.as_view()),
path('invoice/<int:pk>/', InvoiceRetrieveView.as_view()),
path('invoice/', InvoiceListView.as_view()),
path('project-subscription/create/', ProjectSubscriptionCreateView.as_view()),
path('project-subscription/<int:pk>/disable/', ProjectSubscriptionDisableView.as_view()),
]
| 36.410714 | 97 | 0.734183 |
acf34f7262783ef4e05b1d090399fbf6d92624ac | 5,438 | py | Python | django/contrib/gis/geos/coordseq.py | kix/django | 5262a288df07daa050a0e17669c3f103f47a8640 | [
"BSD-3-Clause"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.5/django/contrib/gis/geos/coordseq.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.5/django/contrib/gis/geos/coordseq.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | """
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import c_double, c_uint, byref
from django.contrib.gis.geos.base import GEOSBase, numpy
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.geos import prototypes as capi
from django.utils.six.moves import xrange
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
#### Python 'magic' routines ####
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in xrange(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d: self.setZ(index, value[2])
#### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
#### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz: substr = '%s,%s,%s '
else: substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join([substr % self[i] for i in xrange(len(self))]).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1: return self[0]
else: return tuple([self[i] for i in xrange(n)])
| 34.417722 | 97 | 0.623391 |
acf34f7bb158b00e3edf0104a1f0ca8ce94fa02c | 4,269 | py | Python | lol9k1/auth/auth.py | hubwoop/lol9k1 | a33bd97fc0473d6700f608851c07c99c0bea15cb | [
"MIT"
] | 2 | 2018-11-12T20:45:49.000Z | 2018-12-13T18:01:39.000Z | lol9k1/auth/auth.py | hubwoop/lol9k1 | a33bd97fc0473d6700f608851c07c99c0bea15cb | [
"MIT"
] | null | null | null | lol9k1/auth/auth.py | hubwoop/lol9k1 | a33bd97fc0473d6700f608851c07c99c0bea15cb | [
"MIT"
] | 1 | 2018-12-13T18:02:36.000Z | 2018-12-13T18:02:36.000Z | import functools
import sqlite3
from typing import Optional
from flask import (Blueprint, flash, g, redirect, render_template, request, session, url_for, abort)
from werkzeug.security import check_password_hash, generate_password_hash
import lol9k1.database as database
from lol9k1 import utilities
from lol9k1.auth.forms import RegistrationForm
from lol9k1.auth.types import User, RegistrationError
from lol9k1.utilities import STYLE
bp = Blueprint('auth', __name__, url_prefix='/auth', template_folder='templates')
def login_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for('auth.login'))
return view(**kwargs)
return wrapped_view
def admin_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
if not current_user_is_admin():
return abort(403)
return view(**kwargs)
return wrapped_view
def current_user_is_admin() -> bool:
return g.user and session and not session.modified and 'is_admin' in session and session['is_admin']
@bp.route('/login', methods=('GET', 'POST'))
def login() -> None:
if request.method == 'POST':
user = get_user_by_name(request.form['username'])
if user and check_password_hash(user.password, request.form['password']):
initialize_session_for(user)
flash("You've logged in successfully. Congratulations!", STYLE.message)
return redirect(url_for('landing.landing'))
else:
flash('Invalid username and/or password.', STYLE.error)
return render_template('auth/login.html')
def get_user_by_name(name) -> Optional[User]:
try:
cursor = database.get_db().execute('select * from users where name = (?)', [name])
except sqlite3.Error:
flash(utilities.NAVY_SEAL, STYLE.warning)
return None
return User(*cursor.fetchone())
def initialize_session_for(user):
session['user_id'] = int(user.id)
session['username'] = user.name
session['logged_in'] = True
if user.is_admin == 1:
session['is_admin'] = True
@login_required
@bp.route('/logout')
def logout() -> None:
session.pop('logged_in', None)
session.pop('user_id', None)
session.pop('is_admin', None)
session.pop('username', None)
flash("You've logged out.", STYLE.message)
return redirect(url_for('landing.landing'))
@bp.before_app_request
def load_logged_in_user() -> None:
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = database.get_db().execute('select * from users where id = ?', (user_id,)).fetchone()
@bp.route('/register/<string:token>')
def register_with_token(token) -> None:
return redirect(url_for('.register', token=token))
@bp.route('/register', methods=['GET', 'POST'])
def register() -> None:
form = RegistrationForm()
if form.validate_on_submit():
try:
add_user(form)
flash("Your registration was successful, you may now login.", STYLE.message)
return redirect(url_for('landing.landing'))
except RegistrationError as registration_error:
flash(str(registration_error), STYLE.error)
return render_template('auth/register.html',
form=form,
token=request.args['token'] if 'token' in request.args else None)
def add_user(form: RegistrationForm) -> None:
is_admin = is_admin_token(form.token.data)
db = database.get_db()
try:
# add user
db.execute('insert into users (name, password, email, gender, is_admin, token_used) '
'values (?, ?, ?, ?, ?, ?)',
[request.form['name'], generate_password_hash(request.form['password']),
request.form['email'], request.form['gender'], is_admin, request.form['token']])
except sqlite3.IntegrityError:
raise RegistrationError("Registration failed.")
db.execute('update invites set used = 1 where token = ?', [request.form['token']])
db.commit()
def is_admin_token(token):
# checks if the user was invited via the create_admin_command
is_admin = 1 if token.added_by == 0 else 0
return is_admin
| 32.587786 | 104 | 0.661279 |
acf34f9e139cdc6473c4041874aa9758a147e3aa | 9,323 | py | Python | test/get_previous_releases.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | test/get_previous_releases.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | test/get_previous_releases.py | hiphopcoin24/hiphopcoin24 | 09b780546ba9e28b452a8641863aafa90def40d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2018-2020 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Download or build previous releases.
# Needs curl and tar to download a release, or the build dependencies when
# building a release.
import argparse
import contextlib
from fnmatch import fnmatch
import os
from pathlib import Path
import re
import shutil
import subprocess
import sys
import hashlib
SHA256_SUMS = {
"d40f18b4e43c6e6370ef7db9131f584fbb137276ec2e3dba67a4b267f81cb644": "hiphopcoin-0.15.2-aarch64-linux-gnu.tar.gz",
"54fb877a148a6ad189a1e1ab1ff8b11181e58ff2aaf430da55b3fd46ae549a6b": "hiphopcoin-0.15.2-arm-linux-gnueabihf.tar.gz",
"2b843506c3f1af0eeca5854a920264f9a829f02d0d50328005950ddcbe88874d": "hiphopcoin-0.15.2-i686-pc-linux-gnu.tar.gz",
"87e9340ff3d382d543b2b69112376077f0c8b4f7450d372e83b68f5a1e22b2df": "hiphopcoin-0.15.2-osx64.tar.gz",
"566be44190fd76daa01f13d428939dadfb8e3daacefc8fa17f433cad28f73bd5": "hiphopcoin-0.15.2-x86_64-linux-gnu.tar.gz",
"0768c6c15caffbaca6524824c9563b42c24f70633c681c2744649158aa3fd484": "hiphopcoin-0.16.3-aarch64-linux-gnu.tar.gz",
"fb2818069854a6ad20ea03b28b55dbd35d8b1f7d453e90b83eace5d0098a2a87": "hiphopcoin-0.16.3-arm-linux-gnueabihf.tar.gz",
"75a537844313b0a84bdb61ffcdc5c4ce19a738f7ddf71007cd2edf664efd7c37": "hiphopcoin-0.16.3-i686-pc-linux-gnu.tar.gz",
"78c3bff3b619a19aed575961ea43cc9e142959218835cf51aede7f0b764fc25d": "hiphopcoin-0.16.3-osx64.tar.gz",
"5d422a9d544742bc0df12427383f9c2517433ce7b58cf672b9a9b17c2ef51e4f": "hiphopcoin-0.16.3-x86_64-linux-gnu.tar.gz",
"5a6b35d1a348a402f2d2d6ab5aed653a1a1f13bc63aaaf51605e3501b0733b7a": "hiphopcoin-0.17.2-aarch64-linux-gnu.tar.gz",
"d1913a5d19c8e8da4a67d1bd5205d03c8614dfd2e02bba2fe3087476643a729e": "hiphopcoin-0.17.2-arm-linux-gnueabihf.tar.gz",
"d295fc93f39bbf0fd937b730a93184899a2eb6c3a6d53f3d857cbe77ef89b98c": "hiphopcoin-0.17.2-i686-pc-linux-gnu.tar.gz",
"a783ba20706dbfd5b47fbedf42165fce70fbbc7d78003305d964f6b3da14887f": "hiphopcoin-0.17.2-osx64.tar.gz",
"943f9362b9f11130177839116f48f809d83478b4c28591d486ee9a7e35179da6": "hiphopcoin-0.17.2-x86_64-linux-gnu.tar.gz",
"88f343af72803b851c7da13874cc5525026b0b55e63e1b5e1298390c4688adc6": "hiphopcoin-0.18.1-aarch64-linux-gnu.tar.gz",
"cc7d483e4b20c5dabd4dcaf304965214cf4934bcc029ca99cbc9af00d3771a1f": "hiphopcoin-0.18.1-arm-linux-gnueabihf.tar.gz",
"989e847b3e95fc9fedc0b109cae1b4fa43348f2f712e187a118461876af9bd16": "hiphopcoin-0.18.1-i686-pc-linux-gnu.tar.gz",
"b7bbcee7a7540f711b171d6981f939ca8482005fde22689bc016596d80548bb1": "hiphopcoin-0.18.1-osx64.tar.gz",
"425ee5ec631ae8da71ebc1c3f5c0269c627cf459379b9b030f047107a28e3ef8": "hiphopcoin-0.18.1-riscv64-linux-gnu.tar.gz",
"600d1db5e751fa85903e935a01a74f5cc57e1e7473c15fd3e17ed21e202cfe5a": "hiphopcoin-0.18.1-x86_64-linux-gnu.tar.gz",
"3a80431717842672df682bdb619e66523b59541483297772a7969413be3502ff": "hiphopcoin-0.19.1-aarch64-linux-gnu.tar.gz",
"657f28213823d240dd3324d14829702f9ad6f0710f8bdd1c379cb3c447197f48": "hiphopcoin-0.19.1-arm-linux-gnueabihf.tar.gz",
"10d1e53208aa7603022f4acc084a046299ab4ccf25fe01e81b3fb6f856772589": "hiphopcoin-0.19.1-i686-pc-linux-gnu.tar.gz",
"1ae1b87de26487075cd2fd22e0d4ead87d969bd55c44f2f1d873ecdc6147ebb3": "hiphopcoin-0.19.1-osx64.tar.gz",
"aa7a9563b48aa79252c8e7b6a41c07a5441bd9f14c5e4562cc72720ea6cb0ee5": "hiphopcoin-0.19.1-riscv64-linux-gnu.tar.gz",
"5fcac9416e486d4960e1a946145566350ca670f9aaba99de6542080851122e4c": "hiphopcoin-0.19.1-x86_64-linux-gnu.tar.gz"
}
@contextlib.contextmanager
def pushd(new_dir) -> None:
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
def download_binary(tag, args) -> int:
if Path(tag).is_dir():
if not args.remove_dir:
print('Using cached {}'.format(tag))
return 0
shutil.rmtree(tag)
Path(tag).mkdir()
bin_path = 'bin/hiphopcoin-core-{}'.format(tag[1:])
match = re.compile('v(.*)(rc[0-9]+)$').search(tag)
if match:
bin_path = 'bin/hiphopcoin-core-{}/test.{}'.format(
match.group(1), match.group(2))
tarball = 'hiphopcoin-{tag}-{platform}.tar.gz'.format(
tag=tag[1:], platform=args.platform)
tarballUrl = 'https://hiphopcoincore.org/{bin_path}/{tarball}'.format(
bin_path=bin_path, tarball=tarball)
print('Fetching: {tarballUrl}'.format(tarballUrl=tarballUrl))
header, status = subprocess.Popen(
['curl', '--head', tarballUrl], stdout=subprocess.PIPE).communicate()
if re.search("404 Not Found", header.decode("utf-8")):
print("Binary tag was not found")
return 1
curlCmds = [
['curl', '--remote-name', tarballUrl]
]
for cmd in curlCmds:
ret = subprocess.run(cmd).returncode
if ret:
return ret
hasher = hashlib.sha256()
with open(tarball, "rb") as afile:
hasher.update(afile.read())
tarballHash = hasher.hexdigest()
if tarballHash not in SHA256_SUMS or SHA256_SUMS[tarballHash] != tarball:
print("Checksum did not match")
return 1
print("Checksum matched")
# Extract tarball
ret = subprocess.run(['tar', '-zxf', tarball, '-C', tag,
'--strip-components=1',
'hiphopcoin-{tag}'.format(tag=tag[1:])]).returncode
if ret:
return ret
Path(tarball).unlink()
return 0
def build_release(tag, args) -> int:
githubUrl = "https://github.com/hiphopcoin/hiphopcoin"
if args.remove_dir:
if Path(tag).is_dir():
shutil.rmtree(tag)
if not Path(tag).is_dir():
# fetch new tags
subprocess.run(
["git", "fetch", githubUrl, "--tags"])
output = subprocess.check_output(['git', 'tag', '-l', tag])
if not output:
print('Tag {} not found'.format(tag))
return 1
ret = subprocess.run([
'git', 'clone', githubUrl, tag
]).returncode
if ret:
return ret
with pushd(tag):
ret = subprocess.run(['git', 'checkout', tag]).returncode
if ret:
return ret
host = args.host
if args.depends:
with pushd('depends'):
ret = subprocess.run(['make', 'NO_QT=1']).returncode
if ret:
return ret
host = os.environ.get(
'HOST', subprocess.check_output(['./config.guess']))
config_flags = '--prefix={pwd}/depends/{host} '.format(
pwd=os.getcwd(),
host=host) + args.config_flags
cmds = [
'./autogen.sh',
'./configure {}'.format(config_flags),
'make',
]
for cmd in cmds:
ret = subprocess.run(cmd.split()).returncode
if ret:
return ret
# Move binaries, so they're in the same place as in the
# release download
Path('bin').mkdir(exist_ok=True)
files = ['hiphopcoind', 'hiphopcoin-cli', 'hiphopcoin-tx']
for f in files:
Path('src/'+f).rename('bin/'+f)
return 0
def check_host(args) -> int:
args.host = os.environ.get('HOST', subprocess.check_output(
'./depends/config.guess').decode())
if args.download_binary:
platforms = {
'x86_64-*-linux*': 'x86_64-linux-gnu',
'x86_64-apple-darwin*': 'osx64',
}
args.platform = ''
for pattern, target in platforms.items():
if fnmatch(args.host, pattern):
args.platform = target
if not args.platform:
print('Not sure which binary to download for {}'.format(args.host))
return 1
return 0
def main(args) -> int:
Path(args.target_dir).mkdir(exist_ok=True, parents=True)
print("Releases directory: {}".format(args.target_dir))
ret = check_host(args)
if ret:
return ret
if args.download_binary:
with pushd(args.target_dir):
for tag in args.tags:
ret = download_binary(tag, args)
if ret:
return ret
return 0
args.config_flags = os.environ.get('CONFIG_FLAGS', '')
args.config_flags += ' --without-gui --disable-tests --disable-bench'
with pushd(args.target_dir):
for tag in args.tags:
ret = build_release(tag, args)
if ret:
return ret
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--remove-dir', action='store_true',
help='remove existing directory.')
parser.add_argument('-d', '--depends', action='store_true',
help='use depends.')
parser.add_argument('-b', '--download-binary', action='store_true',
help='download release binary.')
parser.add_argument('-t', '--target-dir', action='store',
help='target directory.', default='releases')
parser.add_argument('tags', nargs='+',
help="release tags. e.g.: v0.18.1 v0.20.0rc2")
args = parser.parse_args()
sys.exit(main(args))
| 40.71179 | 115 | 0.668133 |
acf34fea28fe3a8642af6049f4d29dbcc0c50116 | 5,791 | py | Python | examples/ExB_drift/exb_drift.py | tien-vo/tpsim | 8ea7153ff252774533a9567b5eb8f4c11edd5dde | [
"MIT"
] | 1 | 2021-12-30T05:49:09.000Z | 2021-12-30T05:49:09.000Z | examples/ExB_drift/exb_drift.py | tien-vo/tpsim | 8ea7153ff252774533a9567b5eb8f4c11edd5dde | [
"MIT"
] | null | null | null | examples/ExB_drift/exb_drift.py | tien-vo/tpsim | 8ea7153ff252774533a9567b5eb8f4c11edd5dde | [
"MIT"
] | null | null | null | from itertools import product
import matplotlib.pyplot as plt
import mpl_extras as me
import tpsim as tp
import numpy as np
import warnings
import os
# --------------------------------------------------------------------------- #
# Simulation parameters
# --------------------------------------------------------------------------- #
## ---------- Simulation time
# Start time [1/wce]
t_start = 0
# Stop time [1/wce]
t_stop = 5 * 2 * np.pi
# Time step [1/wce]
dt = np.pi * 1e-3
# Number of time steps
Nt = int(t_stop / dt)
# Interval to log
log_interval = Nt // 10
## ---------- Background parameters
# Background magnetic field [nT]
B0 = 10
# Background electric field [mV/m]
eps = 1e-3
E0 = eps * tp.c * B0 * 1e-3
# Number density [1/cc]
n = 5
## ---------- Particle parameters
KE = np.array([10, 50])
GP = np.array([0, 0])
PA = np.array([45, 90])
Np = len(KE)
# Normalized position
xn, yn, zn = np.zeros((3, Np))
# Normalized velocity
uxn, uyn, uzn = tp.ES2US(KE, np.radians(GP), np.radians(PA)) / tp.c
## ---------- Electromagnetic field model
## Define the electromagnetic field here (background + perturbations)
def EM_model(t, x, y, z, ux, uy, uz):
"""Returns `Np`-dimensional arrays `Ex`, `Ey`, `Ez`, `Bx`, `By`, `Bz` in
normalized units.
"""
Ex = np.ones(Np) * eps
Ey = np.zeros(Np)
Ez = np.zeros(Np)
Bx = np.zeros(Np)
By = np.zeros(Np)
Bz = np.ones(Np)
return Ex, Ey, Ez, Bx, By, Bz
# --------------------------------------------------------------------------- #
# Post-processing
# --------------------------------------------------------------------------- #
def check_solution(X, Y, Z, UX, UY, UZ, s, tol=1e-3):
r"""
The analytical solution is given by
x(t) = x_0 + v_\bot \sin(t+\delta)
y(t) = y_0 + qq v_\bot \cos(t+\delta) - |ExB| t
v_x(t) = v_\bot\cos(t + \delta)
v_y(t) = -qq v_\bot\sin(t + \delta) - |ExB| (|B|=1)
"""
qq = tp.qq[s]
T = np.arange(Nt) * dt
vperp = np.sqrt(uxn ** 2 + uyn ** 2)
delta = np.arctan2(-qq * (uyn + eps), uxn)
# Solve for IC
x0 = xn - vperp * np.sin(delta)
y0 = yn - qq * vperp * np.cos(delta)
# Create solution arrays
XS, YS, ZS, UXS, UYS, UZS = np.zeros((6, Np, Nt))
# Loop through particles
for i in range(X.shape[0]):
XS[i, :] = x0[i] + vperp[i] * np.sin(T + delta[i])
YS[i, :] = y0[i] + qq * vperp[i] * np.cos(T + delta[i]) - eps * T
ZS[i, :] = zn[i] + uzn[i] * T
UXS[i, :] = vperp[i] * np.cos(T + delta[i])
UYS[i, :] = -qq * vperp[i] * np.sin(T + delta[i]) - eps
UZS[i, :] = uzn[i]
# Check
assert np.isclose(X, XS, rtol=tol, atol=tol).all()
assert np.isclose(Y, YS, rtol=tol, atol=tol).all()
assert np.isclose(Z, ZS, rtol=tol, atol=tol).all()
assert np.isclose(UX, UXS, rtol=tol, atol=tol).all()
assert np.isclose(UY, UYS, rtol=tol, atol=tol).all()
assert np.isclose(UZ, UZS, rtol=tol, atol=tol).all()
me.setup_mpl(tex=True)
# Loop through particles
for i in range(X.shape[0]):
# Create figure
fig, axes = plt.subplots(3, 2, figsize=(12, 6), sharex=True)
fig.subplots_adjust(wspace=0.3)
fig.suptitle(
f"Particle = {s}; KE0 = {KE[i]} eV; P0 = {PA[i]}$^\circ$"
)
# Plot solved solutions
axes[0, 0].plot(T, X[i, :], "-k")
axes[1, 0].plot(T, Y[i, :], "-k")
axes[2, 0].plot(T, Z[i, :], "-k")
axes[0, 1].plot(T, UX[i, :], "-k")
axes[1, 1].plot(T, UY[i, :], "-k")
axes[2, 1].plot(T, UZ[i, :], "-k")
# Plot analytical solutions
axes[0, 0].plot(T, XS[i, :], "--r")
axes[1, 0].plot(T, YS[i, :], "--r")
axes[2, 0].plot(T, ZS[i, :], "--r")
axes[0, 1].plot(T, UXS[i, :], "--r")
axes[1, 1].plot(T, UYS[i, :], "--r")
axes[2, 1].plot(T, UZS[i, :], "--r")
# Formats
axes[0, 0].set_ylabel("$x\\Omega_{c}/c$")
axes[1, 0].set_ylabel("$y\\Omega_{c}/c$")
axes[2, 0].set_ylabel("$z\\Omega_{c}/c$")
axes[0, 1].set_ylabel("$u_x/c$")
axes[1, 1].set_ylabel("$u_y/c$")
axes[2, 1].set_ylabel("$u_z/c$")
for (m, n) in np.ndindex(axes.shape):
ax = axes[m, n]
ax.tick_params(**me.params)
ax.set_xlim(T.min(), T.max())
if n == 2:
ax.set_xlabel("$t\\Omega_{c}$")
string = "electron" if s == "e-" else "ion"
fig.savefig(f"{string}_trajectories_{i}.png")
plt.close(fig)
# --------------------------------------------------------------------------- #
# Run simulation
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
for s in ["e-", "i"]:
# Initial conditions
t, x, y, z, ux, uy, uz = t_start, xn, yn, zn, uxn, uyn, uzn
# History arrays
X, Y, Z, UX, UY, UZ = np.zeros((6, Np, Nt))
X[:, 0] = x
Y[:, 0] = y
Z[:, 0] = z
UX[:, 0] = ux
UY[:, 0] = uy
UZ[:, 0] = uz
# Main loop
print(f"Starting main loop for {s}")
advance = tp.advance
for n in range(1, Nt):
# Advance particles
t, x, y, z, ux, uy, uz = advance(
t, x, y, z, ux, uy, uz, EM_model, dt, s=s
)
# Save to history arrays
X[:, n] = x
Y[:, n] = y
Z[:, n] = z
UX[:, n] = ux
UY[:, n] = uy
UZ[:, n] = uz
# Log
if n % log_interval == 0: print(f"Pushed {n} steps")
print(f"Done!")
# Post-processing
check_solution(X, Y, Z, UX, UY, UZ, s)
| 32.533708 | 79 | 0.448627 |
acf34ff57374530fa2df90162a3bb9966931c7ce | 23,062 | py | Python | flask_jwt_extended/jwt_manager.py | BnGx/flask-jwt-extended | 890d2c83e7456365a953c9f3c30947163c7e7183 | [
"MIT"
] | null | null | null | flask_jwt_extended/jwt_manager.py | BnGx/flask-jwt-extended | 890d2c83e7456365a953c9f3c30947163c7e7183 | [
"MIT"
] | null | null | null | flask_jwt_extended/jwt_manager.py | BnGx/flask-jwt-extended | 890d2c83e7456365a953c9f3c30947163c7e7183 | [
"MIT"
] | null | null | null | import datetime
from warnings import warn
from jwt import (
ExpiredSignatureError, InvalidTokenError, InvalidAudienceError,
InvalidIssuerError, DecodeError
)
try:
from flask import _app_ctx_stack as ctx_stack
except ImportError: # pragma: no cover
from flask import _request_ctx_stack as ctx_stack
from flask_jwt_extended.config import config
from flask_jwt_extended.exceptions import (
JWTDecodeError, NoAuthorizationError, InvalidHeaderError, WrongTokenError,
RevokedTokenError, FreshTokenRequired, CSRFError, UserLoadError,
UserClaimsVerificationError
)
from flask_jwt_extended.default_callbacks import (
default_expired_token_callback, default_user_claims_callback,
default_user_identity_callback, default_invalid_token_callback,
default_unauthorized_callback, default_needs_fresh_token_callback,
default_revoked_token_callback, default_user_loader_error_callback,
default_claims_verification_callback, default_verify_claims_failed_callback,
default_decode_key_callback, default_encode_key_callback,
default_jwt_headers_callback)
from flask_jwt_extended.tokens import (
encode_refresh_token, encode_access_token
)
from flask_jwt_extended.utils import get_jwt_identity
class JWTManager(object):
"""
An object used to hold JWT settings and callback functions for the
Flask-JWT-Extended extension.
Instances of :class:`JWTManager` are *not* bound to specific apps, so
you can create one in the main body of your code and then bind it
to your app in a factory function.
"""
def __init__(self, app=None):
"""
Create the JWTManager instance. You can either pass a flask application
in directly here to register this extension with the flask app, or
call init_app after creating this object (in a factory pattern).
:param app: A flask application
"""
# Register the default error handler callback methods. These can be
# overridden with the appropriate loader decorators
self._user_claims_callback = default_user_claims_callback
self._user_identity_callback = default_user_identity_callback
self._expired_token_callback = default_expired_token_callback
self._invalid_token_callback = default_invalid_token_callback
self._unauthorized_callback = default_unauthorized_callback
self._needs_fresh_token_callback = default_needs_fresh_token_callback
self._revoked_token_callback = default_revoked_token_callback
self._user_loader_callback = None
self._user_loader_error_callback = default_user_loader_error_callback
self._token_in_blacklist_callback = None
self._claims_verification_callback = default_claims_verification_callback
self._verify_claims_failed_callback = default_verify_claims_failed_callback
self._decode_key_callback = default_decode_key_callback
self._encode_key_callback = default_encode_key_callback
self._jwt_additional_header_callback = default_jwt_headers_callback
# Register this extension with the flask app now (if it is provided)
if app is not None:
self.init_app(app)
def init_app(self, app):
"""
Register this extension with the flask app.
:param app: A flask application
"""
# Save this so we can use it later in the extension
if not hasattr(app, 'extensions'): # pragma: no cover
app.extensions = {}
app.extensions['flask-jwt-extended'] = self
# Set all the default configurations for this extension
self._set_default_configuration_options(app)
self._set_error_handler_callbacks(app)
def _set_error_handler_callbacks(self, app):
"""
Sets the error handler callbacks used by this extension
"""
@app.errorhandler(NoAuthorizationError)
def handle_auth_error(e):
return self._unauthorized_callback(str(e))
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return self._unauthorized_callback(str(e))
@app.errorhandler(ExpiredSignatureError)
def handle_expired_error(e):
try:
token = ctx_stack.top.expired_jwt
return self._expired_token_callback(token)
except TypeError:
msg = (
"jwt.expired_token_loader callback now takes the expired token "
"as an additional parameter. Example: expired_callback(token)"
)
warn(msg, DeprecationWarning)
return self._expired_token_callback()
@app.errorhandler(InvalidHeaderError)
def handle_invalid_header_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(DecodeError)
def handle_invalid_header_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(InvalidTokenError)
def handle_invalid_token_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(JWTDecodeError)
def handle_jwt_decode_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(WrongTokenError)
def handle_wrong_token_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(InvalidAudienceError)
def handle_invalid_audience_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(InvalidIssuerError)
def handle_invalid_issuer_error(e):
return self._invalid_token_callback(str(e))
@app.errorhandler(RevokedTokenError)
def handle_revoked_token_error(e):
return self._revoked_token_callback()
@app.errorhandler(FreshTokenRequired)
def handle_fresh_token_required(e):
return self._needs_fresh_token_callback()
@app.errorhandler(UserLoadError)
def handler_user_load_error(e):
# The identity is already saved before this exception was raised,
# otherwise a different exception would be raised, which is why we
# can safely call get_jwt_identity() here
identity = get_jwt_identity()
return self._user_loader_error_callback(identity)
@app.errorhandler(UserClaimsVerificationError)
def handle_failed_user_claims_verification(e):
return self._verify_claims_failed_callback()
@staticmethod
def _set_default_configuration_options(app):
"""
Sets the default configuration options used by this extension
"""
# Where to look for the JWT. Available options are cookies or headers
app.config.setdefault('JWT_TOKEN_LOCATION', ('headers',))
# Options for JWTs when the TOKEN_LOCATION is headers
app.config.setdefault('JWT_HEADER_NAME', 'Authorization')
app.config.setdefault('JWT_HEADER_TYPE', 'Bearer')
# Options for JWTs then the TOKEN_LOCATION is query_string
app.config.setdefault('JWT_QUERY_STRING_NAME', 'jwt')
# Option for JWTs when the TOKEN_LOCATION is cookies
app.config.setdefault('JWT_ACCESS_COOKIE_NAME', 'access_token_cookie')
app.config.setdefault('JWT_REFRESH_COOKIE_NAME', 'refresh_token_cookie')
app.config.setdefault('JWT_ACCESS_COOKIE_PATH', '/')
app.config.setdefault('JWT_REFRESH_COOKIE_PATH', '/')
app.config.setdefault('JWT_COOKIE_SECURE', False)
app.config.setdefault('JWT_COOKIE_DOMAIN', None)
app.config.setdefault('JWT_SESSION_COOKIE', True)
app.config.setdefault('JWT_COOKIE_SAMESITE', None)
# Option for JWTs when the TOKEN_LOCATION is json
app.config.setdefault('JWT_JSON_KEY', 'access_token')
app.config.setdefault('JWT_REFRESH_JSON_KEY', 'refresh_token')
# Options for using double submit csrf protection
app.config.setdefault('JWT_COOKIE_CSRF_PROTECT', True)
app.config.setdefault('JWT_CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE'])
app.config.setdefault('JWT_ACCESS_CSRF_HEADER_NAME', 'X-CSRF-TOKEN')
app.config.setdefault('JWT_REFRESH_CSRF_HEADER_NAME', 'X-CSRF-TOKEN')
app.config.setdefault('JWT_CSRF_IN_COOKIES', True)
app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_NAME', 'csrf_access_token')
app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_NAME', 'csrf_refresh_token')
app.config.setdefault('JWT_ACCESS_CSRF_COOKIE_PATH', '/')
app.config.setdefault('JWT_REFRESH_CSRF_COOKIE_PATH', '/')
app.config.setdefault('JWT_CSRF_CHECK_COOKIES', False)
app.config.setdefault('JWT_CSRF_CHECK_FORM', False)
app.config.setdefault('JWT_ACCESS_CSRF_FIELD_NAME', 'csrf_token')
app.config.setdefault('JWT_REFRESH_CSRF_FIELD_NAME', 'csrf_token')
# How long an a token will live before they expire.
app.config.setdefault('JWT_ACCESS_TOKEN_EXPIRES', datetime.timedelta(minutes=15))
app.config.setdefault('JWT_REFRESH_TOKEN_EXPIRES', datetime.timedelta(days=30))
# What algorithm to use to sign the token. See here for a list of options:
# https://github.com/jpadilla/pyjwt/blob/master/jwt/api_jwt.py
app.config.setdefault('JWT_ALGORITHM', 'HS256')
# What algorithms are allowed to decode a token
app.config.setdefault('JWT_DECODE_ALGORITHMS', None)
# Secret key to sign JWTs with. Only used if a symmetric algorithm is
# used (such as the HS* algorithms). We will use the app secret key
# if this is not set.
app.config.setdefault('JWT_SECRET_KEY', None)
# Keys to sign JWTs with when use when using an asymmetric
# (public/private key) algorithm, such as RS* or EC*
app.config.setdefault('JWT_PRIVATE_KEY', None)
app.config.setdefault('JWT_PUBLIC_KEY', None)
# Options for blacklisting/revoking tokens
app.config.setdefault('JWT_BLACKLIST_ENABLED', False)
app.config.setdefault('JWT_BLACKLIST_TOKEN_CHECKS', ('access', 'refresh'))
app.config.setdefault('JWT_IDENTITY_CLAIM', 'identity')
app.config.setdefault('JWT_USER_CLAIMS', 'user_claims')
app.config.setdefault('JWT_DECODE_AUDIENCE', None)
app.config.setdefault('JWT_ENCODE_ISSUER', None)
app.config.setdefault('JWT_DECODE_ISSUER', None)
app.config.setdefault('JWT_DECODE_LEEWAY', 0)
app.config.setdefault('JWT_CLAIMS_IN_REFRESH_TOKEN', False)
app.config.setdefault('JWT_ERROR_MESSAGE_KEY', 'msg')
def user_claims_loader(self, callback):
"""
This decorator sets the callback function for adding custom claims to an
access token when :func:`~flask_jwt_extended.create_access_token` is
called. By default, no extra user claims will be added to the JWT.
*HINT*: The callback function must be a function that takes only **one** argument,
which is the object passed into
:func:`~flask_jwt_extended.create_access_token`, and returns the custom
claims you want included in the access tokens. This returned claims
must be *JSON serializable*.
"""
self._user_claims_callback = callback
return callback
def user_identity_loader(self, callback):
"""
This decorator sets the callback function for getting the JSON
serializable identity out of whatever object is passed into
:func:`~flask_jwt_extended.create_access_token` and
:func:`~flask_jwt_extended.create_refresh_token`. By default, this will
return the unmodified object that is passed in as the `identity` kwarg
to the above functions.
*HINT*: The callback function must be a function that takes only **one** argument,
which is the object passed into
:func:`~flask_jwt_extended.create_access_token` or
:func:`~flask_jwt_extended.create_refresh_token`, and returns the
*JSON serializable* identity of this token.
"""
self._user_identity_callback = callback
return callback
def expired_token_loader(self, callback):
"""
This decorator sets the callback function that will be called if an
expired JWT attempts to access a protected endpoint. The default
implementation will return a 401 status code with the JSON:
{"msg": "Token has expired"}
*HINT*: The callback must be a function that takes **one** argument,
which is a dictionary containing the data for the expired token, and
and returns a *Flask response*.
"""
self._expired_token_callback = callback
return callback
def invalid_token_loader(self, callback):
"""
This decorator sets the callback function that will be called if an
invalid JWT attempts to access a protected endpoint. The default
implementation will return a 422 status code with the JSON:
{"msg": "<error description>"}
*HINT*: The callback must be a function that takes only **one** argument, which is
a string which contains the reason why a token is invalid, and returns
a *Flask response*.
"""
self._invalid_token_callback = callback
return callback
def unauthorized_loader(self, callback):
"""
This decorator sets the callback function that will be called if an
no JWT can be found when attempting to access a protected endpoint.
The default implementation will return a 401 status code with the JSON:
{"msg": "<error description>"}
*HINT*: The callback must be a function that takes only **one** argument, which is
a string which contains the reason why a JWT could not be found, and
returns a *Flask response*.
"""
self._unauthorized_callback = callback
return callback
def needs_fresh_token_loader(self, callback):
"""
This decorator sets the callback function that will be called if a
valid and non-fresh token attempts to access an endpoint protected with
the :func:`~flask_jwt_extended.fresh_jwt_required` decorator. The
default implementation will return a 401 status code with the JSON:
{"msg": "Fresh token required"}
*HINT*: The callback must be a function that takes **no** arguments, and returns
a *Flask response*.
"""
self._needs_fresh_token_callback = callback
return callback
def revoked_token_loader(self, callback):
"""
This decorator sets the callback function that will be called if a
revoked token attempts to access a protected endpoint. The default
implementation will return a 401 status code with the JSON:
{"msg": "Token has been revoked"}
*HINT*: The callback must be a function that takes **no** arguments, and returns
a *Flask response*.
"""
self._revoked_token_callback = callback
return callback
def user_loader_callback_loader(self, callback):
"""
This decorator sets the callback function that will be called to
automatically load an object when a protected endpoint is accessed.
By default this is not used.
*HINT*: The callback must take **one** argument which is the identity JWT
accessing the protected endpoint, and it must return any object (which can
then be accessed via the :attr:`~flask_jwt_extended.current_user` LocalProxy
in the protected endpoint), or `None` in the case of a user not being
able to be loaded for any reason. If this callback function returns
`None`, the :meth:`~flask_jwt_extended.JWTManager.user_loader_error_loader`
will be called.
"""
self._user_loader_callback = callback
return callback
def user_loader_error_loader(self, callback):
"""
This decorator sets the callback function that will be called if `None`
is returned from the
:meth:`~flask_jwt_extended.JWTManager.user_loader_callback_loader`
callback function. The default implementation will return
a 401 status code with the JSON:
{"msg": "Error loading the user <identity>"}
*HINT*: The callback must be a function that takes **one** argument, which is the
identity of the user who failed to load, and must return a *Flask response*.
"""
self._user_loader_error_callback = callback
return callback
def token_in_blacklist_loader(self, callback):
"""
This decorator sets the callback function that will be called when
a protected endpoint is accessed and will check if the JWT has been
been revoked. By default, this callback is not used.
*HINT*: The callback must be a function that takes **one** argument, which is the
decoded JWT (python dictionary), and returns *`True`* if the token
has been blacklisted (or is otherwise considered revoked), or *`False`*
otherwise.
"""
self._token_in_blacklist_callback = callback
return callback
def claims_verification_loader(self, callback):
"""
This decorator sets the callback function that will be called when
a protected endpoint is accessed, and will check if the custom claims
in the JWT are valid. By default, this callback is not used. The
error returned if the claims are invalid can be controlled via the
:meth:`~flask_jwt_extended.JWTManager.claims_verification_failed_loader`
decorator.
*HINT*: This callback must be a function that takes **one** argument, which is the
custom claims (python dict) present in the JWT, and returns *`True`* if the
claims are valid, or *`False`* otherwise.
"""
self._claims_verification_callback = callback
return callback
def claims_verification_failed_loader(self, callback):
"""
This decorator sets the callback function that will be called if
the :meth:`~flask_jwt_extended.JWTManager.claims_verification_loader`
callback returns False, indicating that the user claims are not valid.
The default implementation will return a 400 status code with the JSON:
{"msg": "User claims verification failed"}
*HINT*: This callback must be a function that takes **no** arguments, and returns
a *Flask response*.
"""
self._verify_claims_failed_callback = callback
return callback
def decode_key_loader(self, callback):
"""
This decorator sets the callback function for getting the JWT decode key and
can be used to dynamically choose the appropriate decode key based on token
contents.
The default implementation returns the decode key specified by
`JWT_SECRET_KEY` or `JWT_PUBLIC_KEY`, depending on the signing algorithm.
*HINT*: The callback function should be a function that takes
**two** arguments, which are the unverified claims and headers of the jwt
(dictionaries). The function must return a *string* which is the decode key
in PEM format to verify the token.
"""
self._decode_key_callback = callback
return callback
def encode_key_loader(self, callback):
"""
This decorator sets the callback function for getting the JWT encode key and
can be used to dynamically choose the appropriate encode key based on the
token identity.
The default implementation returns the encode key specified by
`JWT_SECRET_KEY` or `JWT_PRIVATE_KEY`, depending on the signing algorithm.
*HINT*: The callback function must be a function that takes only **one**
argument, which is the identity as passed into the create_access_token
or create_refresh_token functions, and must return a *string* which is
the decode key to verify the token.
"""
self._encode_key_callback = callback
return callback
def additional_headers_loader(self, callback):
"""
This decorator sets the callback function for adding custom headers to an
access token when :func:`~flask_jwt_extended.create_access_token` is
called. By default, two headers will be added the type of the token, which is JWT,
and the signing algorithm being used, such as HMAC SHA256 or RSA.
*HINT*: The callback function must be a function that takes **no** argument,
which is the object passed into
:func:`~flask_jwt_extended.create_access_token`, and returns the custom
claims you want included in the access tokens. This returned claims
must be *JSON serializable*.
"""
self._jwt_additional_header_callback = callback
return callback
def _create_refresh_token(self, identity, expires_delta=None, user_claims=None,
headers=None):
if expires_delta is None:
expires_delta = config.refresh_expires
if user_claims is None and config.user_claims_in_refresh_token:
user_claims = self._user_claims_callback(identity)
if headers is None:
headers = self._jwt_additional_header_callback(identity)
refresh_token = encode_refresh_token(
identity=self._user_identity_callback(identity),
secret=self._encode_key_callback(identity),
algorithm=config.algorithm,
expires_delta=expires_delta,
user_claims=user_claims,
csrf=config.csrf_protect,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
json_encoder=config.json_encoder,
headers=headers
)
return refresh_token
def _create_access_token(self, identity, fresh=False, expires_delta=None,
user_claims=None, headers=None):
if expires_delta is None:
expires_delta = config.access_expires
if user_claims is None:
user_claims = self._user_claims_callback(identity)
if headers is None:
headers = self._jwt_additional_header_callback(identity)
access_token = encode_access_token(
identity=self._user_identity_callback(identity),
secret=self._encode_key_callback(identity),
algorithm=config.algorithm,
expires_delta=expires_delta,
fresh=fresh,
user_claims=user_claims,
csrf=config.csrf_protect,
identity_claim_key=config.identity_claim_key,
user_claims_key=config.user_claims_key,
json_encoder=config.json_encoder,
headers=headers,
issuer=config.encode_issuer,
)
return access_token
| 43.67803 | 90 | 0.689359 |
acf351685ee4c24cae61a1146b5aae17697bcbfb | 927 | py | Python | corehq/motech/migrations/0003_auto_20200102_1006.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/motech/migrations/0003_auto_20200102_1006.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/motech/migrations/0003_auto_20200102_1006.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('motech', '0002_requestlog_payload_id'),
]
operations = [
migrations.AlterField(
model_name='requestlog',
name='payload_id',
field=models.CharField(blank=True, db_index=True, max_length=126, null=True),
),
migrations.AlterField(
model_name='requestlog',
name='request_url',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='requestlog',
name='response_status',
field=models.IntegerField(db_index=True, null=True),
),
migrations.AlterField(
model_name='requestlog',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
]
| 28.96875 | 89 | 0.586839 |
acf352a5a6a0bb91d8ebbf8a417635bc6507d04c | 6,065 | py | Python | test/system/test_api_ipinterfaces.py | kirankumarcelestial/pyeapi | 5174ae6c98a8ddcef26320b0e6a8a6640fee9e77 | [
"BSD-3-Clause"
] | null | null | null | test/system/test_api_ipinterfaces.py | kirankumarcelestial/pyeapi | 5174ae6c98a8ddcef26320b0e6a8a6640fee9e77 | [
"BSD-3-Clause"
] | null | null | null | test/system/test_api_ipinterfaces.py | kirankumarcelestial/pyeapi | 5174ae6c98a8ddcef26320b0e6a8a6640fee9e77 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import unittest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from systestlib import DutSystemTest, random_interface
class TestResourceIpinterfaces(DutSystemTest):
def test_get(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['default interface %s' % intf, 'interface %s' % intf,
'no switchport', 'ip address 99.98.99.99/24',
'mtu 1800'])
result = dut.api('ipinterfaces').get(intf)
values = dict(name=intf, address='99.98.99.99/24',
mtu=1800)
self.assertEqual(values, result, 'dut=%s' % dut)
def test_get_interface_wo_ip_adddress(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['default interface %s' % intf, 'interface %s' % intf,
'no switchport'])
result = dut.api('ipinterfaces').get(intf)
self.assertIsNone(result['address'])
def test_getall(self):
for dut in self.duts:
result = dut.api('interfaces').getall()
self.assertIsInstance(result, dict)
for intf in ['Management1']:
self.assertIn(intf, result)
def test_create_and_return_true(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config('default interface %s' % intf)
resource = dut.api('ipinterfaces')
result = resource.create(intf)
self.assertTrue(result, 'dut=%s' % dut)
config = dut.run_commands('show running-config interfaces %s' %
intf, 'text')
self.assertIn('no switchport', config[0]['output'])
dut.config('default interface %s' % intf)
def test_delete_and_return_true(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['interface %s' % intf, 'ip address 199.1.1.1/24'])
resource = dut.api('ipinterfaces')
result = resource.delete(intf)
self.assertTrue(result, 'dut=%s' % dut)
config = dut.run_commands('show running-config interfaces %s' %
intf, 'text')
self.assertNotIn('ip address 199.1.1.1/24', config[0]['output'],
'dut=%s' % dut)
dut.config('default interface %s' % intf)
def test_set_address(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['default interface %s' % intf, 'interface %s' % intf,
'no switchport'])
resource = dut.api('ipinterfaces')
result = resource.set_address(intf, '111.111.111.111/24')
self.assertTrue(result, 'dut=%s' % dut)
config = dut.run_commands('show running-config interfaces %s' %
intf, 'text')
self.assertIn('ip address 111.111.111.111/24',
config[0]['output'], 'dut=%s' % dut)
dut.config('default interface %s' % intf)
def test_set_mtu(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['default interface %s' % intf, 'interface %s' % intf,
'ip address 111.111.111.111/24'])
resource = dut.api('ipinterfaces')
result = resource.set_mtu(intf, 2000)
self.assertTrue(result, 'dut=%s' % dut)
config = dut.run_commands('show running-config interfaces %s' %
intf, 'text')
self.assertIn('mtu 2000', config[0]['output'], 'dut=%s' % dut)
dut.config('default interface %s' % intf)
def test_set_mtu_value_as_string(self):
for dut in self.duts:
intf = random_interface(dut)
dut.config(['default interface %s' % intf, 'interface %s' % intf,
'ip address 111.111.111.111/24'])
resource = dut.api('ipinterfaces')
result = resource.set_mtu(intf, '2000')
self.assertTrue(result, 'dut=%s' % dut)
config = dut.run_commands('show running-config interfaces %s' %
intf, 'text')
self.assertIn('mtu 2000', config[0]['output'], 'dut=%s' % dut)
dut.config('default interface %s' % intf)
if __name__ == '__main__':
unittest.main()
| 44.270073 | 77 | 0.59934 |
acf352db794550cdd29c1d30c7312bbc1624ea69 | 1,399 | py | Python | isochrones/scripts.py | pablo-cardenas/isochrone | 1e0aad0b32cb57ea045089ed7f01e6075a9dcd16 | [
"MIT"
] | null | null | null | isochrones/scripts.py | pablo-cardenas/isochrone | 1e0aad0b32cb57ea045089ed7f01e6075a9dcd16 | [
"MIT"
] | null | null | null | isochrones/scripts.py | pablo-cardenas/isochrone | 1e0aad0b32cb57ea045089ed7f01e6075a9dcd16 | [
"MIT"
] | null | null | null | import click
from .utils import to_graph, compute_distances, add_field
import json
@click.group()
def cli():
pass
@cli.command('to-graph')
@click.option('--link', required=True)
@click.option('--node', required=True)
@click.option('--output', type=click.File('w'), required=True)
def to_graph_command(link, node, output):
"""
Create a json file with the adjacency list representation of
the graph.
"""
graph = to_graph(link, node)
json.dump(graph, output)
@cli.command('compute-distances')
@click.option('--graph', type=click.File(), required=True)
@click.option('--source', required=True)
@click.option('--output', type=click.File('w'), required=True)
def compute_distances_command(graph, source, output):
"""Compute distance from a single source to all nodes"""
graph_dict = json.load(graph)
distances = compute_distances(graph_dict, source)
json.dump(distances, output)
@cli.command('add-field')
@click.option('--shapefile', required=True)
@click.option('--field', type=click.File(), required=True)
@click.option('--output', required=True)
@click.option('--field-name', default='dist')
@click.option('--field-type', default='float')
def add_field_command(shapefile, field, output, field_name, field_type):
"""Add field to a shapefile"""
field_dict = json.load(field)
add_field(shapefile, field_dict, output, field_name, field_type)
| 31.088889 | 72 | 0.707648 |
acf35405b1057c99883b5db3a4324bb313259a6a | 2,395 | py | Python | cv2_test.py | Fabriceli/MachingLearning | d983f87c26f2ced2921030562a82dcd19c02171b | [
"MIT"
] | null | null | null | cv2_test.py | Fabriceli/MachingLearning | d983f87c26f2ced2921030562a82dcd19c02171b | [
"MIT"
] | null | null | null | cv2_test.py | Fabriceli/MachingLearning | d983f87c26f2ced2921030562a82dcd19c02171b | [
"MIT"
] | null | null | null | # -*-coding:utf-8 -*-
# Reference:**********************************************
# @Time : 2019-08-22 21:30
# @Author : Fabrice LI
# @File : cv2_test.py
# @User : liyihao
# @Software: PyCharm
# @Description: line regression
# Reference:**********************************************
import numpy as np
import random
import torch
# hypothesis function
def inference(theta1, theta0, x):
pred_h = theta1 * x + theta0 # (theta1, theta0) = theta [theta0: bias, theta1: weight]
return pred_h
# cost function
def eval_loss(theta1, theta0, x_list, gt_y_list):
avg_loss = 0.0
for i in range(len(x_list)):
avg_loss += 0.5 * (theta1 * x_list[i] + theta0 - gt_y_list[i]) ** 2
avg_loss /= len(gt_y_list)
return avg_loss
def gradient(pred_h, gt_y, x): # 求导
diff = pred_h - gt_y
d_theta1 = diff * x
d_theta0 = diff
return d_theta1, d_theta0
def cal_step_gradient(batch_x_list, batch_gt_y_list, w, b, lr):
avg_dw, avg_db = 0, 0
batch_size = len(batch_x_list)
for i in range(batch_size):
pred_y = inference(w, b, batch_x_list[i])
dw, db = gradient(pred_y, batch_gt_y_list[i], batch_x_list[i])
avg_dw += dw
avg_db += db
avg_db /= batch_size
avg_dw /= batch_size
w -= lr * avg_dw
b -= lr * avg_db
return w, b
def train(x_list, gt_y_list, batch_size, lr, max_iter):
w = 0
b = 0
num_samples = len(x_list)
for i in range(max_iter):
batch_idxs = np.random.choice(num_samples, batch_size)
batch_x = [x_list[j] for j in batch_idxs]
batch_y = [gt_y_list[j] for j in batch_idxs]
w, b = cal_step_gradient(batch_x, batch_y, w, b, lr)
print("w: {0}, b: {1}".format(w, b))
print("loss is : {0}".format(eval_loss(w, b, x_list, gt_y_list)))
def gen_sample_data():
w = random.randint(0, 10) + random.random()
b = random.randint(0, 5) + random.random()
num_samples = 100
x_list = []
y_list = []
for i in range(num_samples):
x = random.randint(0, 100) * random.random()
y = w * x + b + random.random() * random.randint(-1, 1)
x_list.append(x)
y_list.append(y)
return x_list, y_list, w, b
def run():
x_list, y_list, w, b = gen_sample_data()
lr = 0.0009
max_iter = 10000
train(x_list, y_list, 50, lr, max_iter)
if __name__ == '__main__':
run()
| 27.215909 | 91 | 0.586221 |
acf3551e77ce0f356b08a8621a131152c1e03f13 | 2,708 | py | Python | src/sentry/tasks/commits.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/tasks/commits.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 6 | 2019-12-29T00:50:11.000Z | 2022-02-10T13:27:24.000Z | src/sentry/tasks/commits.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import logging
import six
from sentry.exceptions import InvalidIdentity, PluginError
from sentry.models import Deploy, Release, ReleaseHeadCommit, Repository, User
from sentry.plugins import bindings
from sentry.tasks.base import instrumented_task, retry
logger = logging.getLogger(__name__)
@instrumented_task(name='sentry.tasks.commits.fetch_commits', queue='commits',
default_retry_delay=60 * 5, max_retries=5)
@retry(exclude=(Release.DoesNotExist, User.DoesNotExist,))
def fetch_commits(release_id, user_id, refs, prev_release_id=None, **kwargs):
commit_list = []
release = Release.objects.get(id=release_id)
user = User.objects.get(id=user_id)
prev_release = None
if prev_release_id is not None:
try:
prev_release = Release.objects.get(id=prev_release_id)
except Release.DoesNotExist:
pass
for ref in refs:
try:
repo = Repository.objects.get(
organization_id=release.organization_id,
name=ref['repository'],
)
except Repository.DoesNotExist:
continue
try:
provider_cls = bindings.get('repository.provider').get(repo.provider)
except KeyError:
continue
# if previous commit isn't provided, try to get from
# previous release otherwise, try to get
# recent commits from provider api
start_sha = None
if ref.get('previousCommit'):
start_sha = ref['previousCommit']
elif prev_release:
try:
start_sha = ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id,
release=prev_release,
repository_id=repo.id,
).values_list('commit__key', flat=True)[0]
except IndexError:
pass
end_sha = ref['commit']
provider = provider_cls(id=repo.provider)
try:
repo_commits = provider.compare_commits(
repo, start_sha, end_sha, actor=user
)
except NotImplementedError:
pass
except (PluginError, InvalidIdentity) as e:
logger.exception(six.text_type(e))
else:
commit_list.extend(repo_commits)
if commit_list:
release.set_commits(commit_list)
deploys = Deploy.objects.filter(
organization_id=release.organization_id,
release=release,
notified=False,
).values_list('id', flat=True)
for d_id in deploys:
Deploy.notify_if_ready(d_id, fetch_complete=True)
| 33.02439 | 81 | 0.625185 |
acf355d47cc86f0f9c7cc1e48f92b42c3de67565 | 16,102 | py | Python | Python/python-scripts/skrype.py | fundor333/StuffWithScript | 43b0345f4b4319a084d018f9ebfc0f1f68377645 | [
"MIT"
] | null | null | null | Python/python-scripts/skrype.py | fundor333/StuffWithScript | 43b0345f4b4319a084d018f9ebfc0f1f68377645 | [
"MIT"
] | null | null | null | Python/python-scripts/skrype.py | fundor333/StuffWithScript | 43b0345f4b4319a084d018f9ebfc0f1f68377645 | [
"MIT"
] | 1 | 2020-10-06T09:10:50.000Z | 2020-10-06T09:10:50.000Z | #!/usr/bin/env python2.6
#
# Copyright (C) 2010 Richard Mortier <mort@cantab.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import os
import pprint
import re
import string
import struct
import sys
import traceback
_recordsz_re = re.compile("(?P<ty>[^0-9]+)(?P<sz>[0-9]+)\.dbb")
NUL = '\x00'
NULNUL = NUL + NUL
HDR_SZ = 8
SKR_MARKER = struct.pack("4B", 0x6c, 0x33, 0x33, 0x6c)
SKR_MARKER_LEN = 4
SKR_RECSZ_LEN = 4
SKR_HDR_LEN = SKR_MARKER_LEN + SKR_RECSZ_LEN
SKR_SEQNO_LEN = 4
Verbose = 0
class Logtype:
calls = ('call',) ## cdr for call initiator
cdrs = ('callmember',) ## cdr for other call members, one incl. duration
mucs = ('chat',) ## chat meta-data for mucs; incl. chat msgs for 1-1 chats
messages = ('chatmsg',) ## chat messages;
chatmembers = ('chatmember',) ## chat metadata: speakers
profiles = ('user', 'profile',) ## user profiles: others, mine
## contactgroup: list of usernames mapping to contact groups
## transfer: file transfer metadata (dstpath, sourcename, size, sender)
## voicemail: voicemail metadata (filename of local file containing msg you left)
unknown = ('call', 'callmember', 'chat', 'chatmsg', 'user', 'profile',
'chatmember', 'contactgroup', 'transfer', 'voicemail',
)
class SkrypeExc(Exception): pass
def fmtexc(e, with_tb=False):
tb = traceback.extract_tb(sys.exc_info()[2])
s = '%s: %s' % (e.__class__.__name__, str(e))
if with_tb:
s += '\n%s' % ('\n'.join(['# %s@%s:%s' % (filename, lineno, func)
for (filename, lineno, func, _) in tb]),)
return s
def isprintable(b):
return ((b in string.printable)
and (b == " " or b not in string.whitespace))
def btos(bs, ascii=False, sep='.'):
if bs == None or bs == "": return ""
def _fmt(b):
if ascii and isprintable(b): return b
return '%0.2x' % (ord(b),)
return sep.join(map(_fmt, bs))
def fmtbs(bs, prefix=" : ", ascii=False):
def _fmt():
for i in range(0, len(bs), 16):
yield '\n%s0x%s' % (prefix, btos(bs[i:i + 16], ascii))
return "".join(_fmt())
#
# item parsers
#
def parse_number(label, bs, i):
try:
j = i + 2
shift = n = 0
while ord(bs[j]) & 0x80:
n |= ((ord(bs[j]) & 0x7f) << shift)
shift += 7
j += 1
n |= ((ord(bs[j]) & 0x7f) << shift)
return label, n, j + 2
except IndexError, ie:
raise SkrypeExc("bad %s exc:%s i:%s bs:%s" % (
label, fmtexc(ie), i, fmtbs(bs[i + 2:j + 2], prefix="# :")))
def parse_string(label, bs, i):
try:
j = i + 2
while bs[j] != NUL: j += 1
return label, ''.join(bs[i + 2:j]), j + 1
except IndexError, ie:
raise SkrypeExc("bad %s exc:%s i:%s bs:%s" % (
label, fmtexc(ie), i, fmtbs(bs[i + 2:j + 2], prefix="# :")))
class MessageIndicator:
chatid = b'\xe0\x03'
chatid2 = b'\xb8\x03'
timestamp = b'\xe5\x03'
username = b'\xe8\x03'
username2 = b'\xc0\x03'
username3 = b'\xc8\x03'
displayname = b'\xec\x03'
message = b'\xfc\x03'
message2 = b'\xf4\x03'
message3 = b'\x03\x37'
displaymsg = b'\xd8\x03'
MessageParsers = {
MessageIndicator.chatid: lambda bs, i: parse_string('chatid', bs, i),
MessageIndicator.chatid2: lambda bs, i: parse_string('chatid', bs, i),
MessageIndicator.timestamp: lambda bs, i: parse_number("timestamp", bs, i),
MessageIndicator.username: lambda bs, i: parse_string('username', bs, i),
MessageIndicator.username2: lambda bs, i: parse_string('username', bs, i),
MessageIndicator.username3: lambda bs, i: parse_string('username', bs, i),
MessageIndicator.displayname: lambda bs, i: parse_string('displayname', bs, i),
MessageIndicator.message: lambda bs, i: parse_string('message', bs, i),
MessageIndicator.message2: lambda bs, i: parse_string('message', bs, i),
MessageIndicator.message3: lambda bs, i: parse_string('message', bs, i),
MessageIndicator.displaymsg: lambda bs, i: parse_string('displaymsg', bs, i),
}
class MucIndicator:
chatname = b'\xd8\x03'
actives = b'\xcc\x03'
members = b'\xc8\x03'
members2 = b'\xd4\x03'
speaker = b'\xbc\x06'
member = b'\x03\x00'
chatid = b'\xb8\x03'
description = b'\xb8\x06'
message = b'\x037'
timestamp = b'\xb5\x04'
MucParsers = {
MucIndicator.chatid: lambda bs, i: parse_string('chatid', bs, i),
MucIndicator.timestamp: lambda bs, i: parse_number('timestamp', bs, i),
MucIndicator.chatname: lambda bs, i: parse_string('chatname', bs, i),
MucIndicator.actives: lambda bs, i: parse_string('actives', bs, i),
MucIndicator.members: lambda bs, i: parse_string('members', bs, i),
## MucIndicator.members2: lambda bs, i: parse_string('members', bs,i),
MucIndicator.speaker: lambda bs, i: parse_string('speaker', bs, i),
## MucIndicator.member: lambda bs, i: parse_string('member', bs,i),
MucIndicator.description: lambda bs, i: parse_string('description', bs, i),
MucIndicator.message: lambda bs, i: parse_string('message', bs, i),
}
class ProfileIndicator:
username = b'\x03\x10'
displayname = b'\x03\x14'
country = b'\x03\x28'
language = b'\x03\x24'
city = b'\x03\x30'
phone = b'\x03\x34'
office = b'\x03\x38'
mobile = b'\x03\x3c'
pstn = b'\x03\x18'
label = b'\x84\x01'
ProfileParsers = {
ProfileIndicator.username: lambda bs, i: parse_string('username', bs, i),
ProfileIndicator.displayname: lambda bs, i: parse_string('displayname', bs, i),
ProfileIndicator.language: lambda bs, i: parse_string('language', bs, i),
ProfileIndicator.country: lambda bs, i: parse_string('country', bs, i),
ProfileIndicator.city: lambda bs, i: parse_string('city', bs, i),
ProfileIndicator.phone: lambda bs, i: parse_string('phone', bs, i),
ProfileIndicator.office: lambda bs, i: parse_string('office', bs, i),
ProfileIndicator.mobile: lambda bs, i: parse_string('mobile', bs, i),
ProfileIndicator.pstn: lambda bs, i: parse_string('pstn', bs, i),
ProfileIndicator.label: lambda bs, i: parse_string('label', bs, i),
}
class CallIndicator:
timestamp = b'\xa1\x01'
cdrid = b'\xe4\x06'
username = b'\xa4\x01'
usernamex = b'\xc8\x06'
duration = b'\x85\x02'
pstn_number = b'\x80\x02'
pstn_status = b'\x8c\x02'
chatname = b'\xfc\x01'
CallParsers = {
CallIndicator.timestamp: lambda bs, i: parse_number("timestamp", bs, i),
CallIndicator.username: lambda bs, i: parse_string('username', bs, i),
CallIndicator.usernamex: lambda bs, i: parse_string('username', bs, i),
CallIndicator.pstn_number: lambda bs, i: parse_string('pstn-number', bs, i),
CallIndicator.pstn_status: lambda bs, i: parse_string('pstn-status', bs, i),
CallIndicator.cdrid: lambda bs, i: parse_string('cdr-id', bs, i),
CallIndicator.chatname: lambda bs, i: parse_string('chatname', bs, i),
}
class CdrIndicator:
duration = b'\xa5\x07'
username = b'\x98\x07'
displayname = b'\x9c\x07'
cdrid = b'\xb8\x01'
forwarder = b'\x84\x07'
pickup = b'\xe5\x19'
CdrParsers = {
CdrIndicator.duration: lambda bs, i: parse_number("duration", bs, i),
CdrIndicator.username: lambda bs, i: parse_string("username", bs, i),
CdrIndicator.displayname: lambda bs, i: parse_string("displayname", bs, i),
CdrIndicator.cdrid: lambda bs, i: parse_string('cdr-id', bs, i),
CdrIndicator.forwarder: lambda bs, i: parse_string('forwarder', bs, i),
CdrIndicator.pickup: lambda bs, i: parse_number('pickup', bs, i),
}
class ChatmemberIndicator:
chatid = b'\xc8\x04'
member = b'\xcc\x04'
ChatmemberParsers = {
ChatmemberIndicator.chatid: lambda bs, i: parse_string("chatid", bs, i),
ChatmemberIndicator.member: lambda bs, i: parse_string("member", bs, i),
}
UnknownParsers = {
}
#
# parse harness
#
def resync(ps, bs, i):
j = i
while j < len(bs) and bs[j:j + 2] not in ps.keys():
j += 1
return i, j, bs[i:j]
def parse_items(ps, bs, with_junk=False):
## skip to recognised indicator
oi, i, junk = resync(ps, bs, 0)
d = {}
if with_junk: d['junk'] = [(oi, btos(junk, ascii=True)), ]
while i < len(bs):
try:
(indicator,) = struct.unpack("2s", bs[i:i + 2])
key, value, i = ps[indicator](bs, i)
if key not in d:
d[key] = value
else:
if not isinstance(d[key], list): d[key] = [d[key]]
d[key].append(value)
except struct.error, se:
print >> sys.stderr, "# struct.%s" % (fmtexc(se, with_tb=True),)
oi, i, junk = resync(ps, bs, i + 1)
if with_junk: d['junk'].append((oi, btos(junk, ascii=True)))
except KeyError:
print >> sys.stderr, "# unknown indicator: i:%s ind:%s" % (
i, btos(indicator),)
oi, i, junk = resync(ps, bs, i + 1)
if with_junk: d['junk'].append((oi, btos(junk, ascii=True)))
except SkrypeExc, se:
print >> sys.stderr, "# %s" % (fmtexc(se, with_tb=True),)
oi, i, junk = resync(ps, bs, i + 1)
if with_junk: d['junk'].append((oi, btos(junk, ascii=True)))
except Exception, e:
print >> sys.stderr, "%s\ni:%s%s" % (
fmtexc(e, with_tb=True), i, fmtbs(bs[i:]))
oi, i, junk = resync(ps, bs, i + 1)
if with_junk: d['junk'].append((oi, btos(junk, ascii=True)))
return d
def parse(ps, bs, with_junk=False):
(seqno,) = struct.unpack("<L", bs[:SKR_SEQNO_LEN])
return {'seqno': seqno,
'items': parse_items(ps, bs[SKR_SEQNO_LEN:], with_junk),
}
#
# entry points
#
def record(bs, ps, with_junk=False, with_raw=False):
(marker, skr_len,) = struct.unpack("<4s L", bs[:SKR_HDR_LEN])
if marker != SKR_MARKER: raise FormatExc("bad marker")
record = {'marker': marker,
'length': skr_len,
'value': parse(ps, bs[SKR_HDR_LEN:SKR_HDR_LEN + skr_len],
with_junk),
}
if with_raw: record['raw'] = bs
return record
def records(m, ps, with_junk=False, with_raw=False):
sz = int(m.group('sz'))
with open(m.string, 'rb') as f:
while True:
bs = f.read(HDR_SZ + sz)
if Verbose > 1:
print "sz:%d bs:\n%s" % (sz, fmtbs(bs, ascii=True))
if len(bs) == 0: break
(marker, skr_len,) = struct.unpack("<4s L", bs[:SKR_HDR_LEN])
if marker != SKR_MARKER: raise FormatExc("bad marker")
if skr_len == 0: break
record = {'marker': marker,
'length': skr_len,
'value': parse(ps, bs[SKR_HDR_LEN:SKR_HDR_LEN + skr_len],
with_junk),
}
if with_raw: record['raw'] = bs
yield record
def messages(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.messages:
raise SkrypeExc("bad messages fn:%s" % (fn,))
ps = MessageParsers
return records(m, ps, with_junk, with_raw)
def mucs(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.mucs:
raise SkrypeExc("bad mucs fn:%s" % (fn,))
ps = MucParsers
return records(m, ps, with_junk, with_raw)
def profiles(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.profiles:
raise SkrypeExc("bad profiles fn:%s" % (fn,))
ps = ProfileParsers
return records(m, ps, with_junk, with_raw)
def calls(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.calls:
raise SkrypeExc("bad calls fn:%s" % (fn,))
ps = CallParsers
return records(m, ps, with_junk, with_raw)
def cdrs(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.cdrs:
raise SkrypeExc("bad calls fn:%s" % (fn,))
ps = CdrParsers
return records(m, ps, with_junk, with_raw)
def chatmembers(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.chatmembers:
raise SkrypeExc("bad chatmembers fn:%s" % (fn,))
ps = ChatmemberParsers
return records(m, ps, with_junk, with_raw)
def unknown(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty not in Logtype.unknown:
raise SkrypeExc("bad calls fn:%s" % (fn,))
ps = UnknownParsers
return records(m, ps, with_junk, with_raw)
def process(fn, with_junk=False, with_raw=False):
m = _recordsz_re.match(fn)
if not m: raise SkrypeExc("bad log filename")
ty = os.path.basename(m.group("ty"))
if ty in Logtype.calls:
return ("calls", calls(fn, with_junk, with_raw))
elif ty in Logtype.cdrs:
return ("cdrs", cdrs(fn, with_junk, with_raw))
elif ty in Logtype.messages:
return ("messages", messages(fn, with_junk, with_raw))
elif ty in Logtype.mucs:
return ("mucs", mucs(fn, with_junk, with_raw))
elif ty in Logtype.profiles:
return ("profiles", profiles(fn, with_junk, with_raw))
elif ty in Logtype.chatmembers:
return ("chatmembers", chatmembers(fn, with_junk, with_raw))
elif ty in Logtype.unknown:
return ("unknown", unknown(fn, with_junk, with_raw))
#
# splice multiple streams together in seqno order
#
class R:
def __init__(self, ty, rec, stream):
self.ty = ty
self.rec = rec
self.stream = stream
self.seqno = rec['value']['seqno']
def __cmp__(self, other):
if self.seqno < other.seqno:
return -1
elif self.seqno == other.seqno:
return 0
else:
return 1
def __repr__(self):
return "%s<%s> [%s]" % (self.ty, self.seqno, pprint.pformat(self.rec))
def splice(fns, with_junk=False, with_raw=False):
def nex((ty, recs)):
try:
return R(ty, recs.next(), recs)
except StopIteration:
pass
records = filter(None, map(nex, map(
lambda fn: process(fn, with_junk, with_raw), fns)))
records.sort()
while len(records) > 0:
r = records[0]
yield (r.ty, r.rec)
try:
records[0] = R(r.ty, records[0].stream.next(), r.stream)
except StopIteration:
del records[0]
records.sort()
#
# main
#
if __name__ == '__main__':
if sys.argv[1] == '-v':
Verbose = 2
fns = sys.argv[2:]
else:
fns = sys.argv[1:]
for r in splice(fns): pprint.pprint(r)
| 31.326848 | 85 | 0.604707 |
acf357aa97aff2e400ce373128f6ba24338e95a9 | 859 | py | Python | openpype/tools/loader/__main__.py | Tilix4/OpenPype | 8909bd890170880aa7ec8b673abaa25a9bdf40f2 | [
"MIT"
] | 1 | 2022-02-08T15:40:41.000Z | 2022-02-08T15:40:41.000Z | openpype/tools/loader/__main__.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | 2 | 2022-03-18T01:46:03.000Z | 2022-03-18T01:46:16.000Z | openpype/tools/loader/__main__.py | zafrs/OpenPype | 4b8e7e1ed002fc55b31307efdea70b0feaed474f | [
"MIT"
] | null | null | null | """Main entrypoint for standalone debugging
Used for running 'avalon.tool.loader.__main__' as a module (-m), useful for
debugging without need to start host.
Modify AVALON_MONGO accordingly
"""
import os
import sys
from . import cli
def my_exception_hook(exctype, value, traceback):
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
if __name__ == '__main__':
os.environ["OPENPYPE_MONGO"] = "mongodb://localhost:27017"
os.environ["AVALON_DB"] = "avalon"
os.environ["AVALON_TIMEOUT"] = "1000"
os.environ["OPENPYPE_DEBUG"] = "1"
os.environ["AVALON_ASSET"] = "Jungle"
# Set the exception hook to our wrapping function
sys.excepthook = my_exception_hook
sys.exit(cli(sys.argv[1:]))
| 26.84375 | 79 | 0.699651 |
acf358c347adece9e955671bb9d900a1c277a042 | 5,163 | py | Python | tests/models/programdb/control/control_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 4 | 2018-08-26T09:11:36.000Z | 2019-05-24T12:01:02.000Z | tests/models/programdb/control/control_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 52 | 2018-08-24T12:51:22.000Z | 2020-12-28T04:59:42.000Z | tests/models/programdb/control/control_unit_test.py | weibullguy/ramstk | 3ec41d7e2933045a7a8028aed6c6b04365495095 | [
"BSD-3-Clause"
] | 1 | 2018-10-11T07:57:55.000Z | 2018-10-11T07:57:55.000Z | # pylint: skip-file
# type: ignore
# -*- coding: utf-8 -*-
#
# tests.models.programdb.control.control_unit_test.py is part of The RAMSTK
# Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Class for testing FMEA Control algorithms and models."""
# Third Party Imports
import pytest
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models.dbrecords import RAMSTKControlRecord
from ramstk.models.dbtables import RAMSTKControlTable
from tests import (
MockDAO,
UnitTestDeleteMethods,
UnitTestGetterSetterMethods,
UnitTestInsertMethods,
UnitTestSelectMethods,
)
@pytest.mark.usefixtures("test_record_model", "unit_test_table_model")
class TestCreateControlModels:
"""Class for unit testing Control model __init__() methods.
Because each table model contains unique attributes, these methods must be
local to the module being tested.
"""
@pytest.mark.unit
def test_record_model_create(self, test_record_model):
"""Return a Control record model instance."""
assert isinstance(test_record_model, RAMSTKControlRecord)
# Verify class attributes are properly initialized.
assert test_record_model.__tablename__ == "ramstk_control"
assert test_record_model.description == "Test FMEA Control #1 for Cause ID #3."
assert test_record_model.type_id == "Detection"
@pytest.mark.unit
def test_table_model_create(self, unit_test_table_model):
"""Return a Control table model instance."""
assert isinstance(unit_test_table_model, RAMSTKControlTable)
assert isinstance(unit_test_table_model.tree, Tree)
assert isinstance(unit_test_table_model.dao, MockDAO)
assert unit_test_table_model._db_id_colname == "fld_control_id"
assert unit_test_table_model._db_tablename == "ramstk_control"
assert unit_test_table_model._tag == "control"
assert unit_test_table_model._root == 0
assert unit_test_table_model._revision_id == 0
assert unit_test_table_model._parent_id == 0
assert unit_test_table_model.last_id == 0
assert pub.isSubscribed(
unit_test_table_model.do_select_all, "selected_revision"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_attributes, "request_get_control_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_set_attributes, "request_set_control_attributes"
)
assert pub.isSubscribed(
unit_test_table_model.do_set_attributes, "wvw_editing_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_update, "request_update_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_get_tree, "request_get_control_tree"
)
assert pub.isSubscribed(
unit_test_table_model.do_delete, "request_delete_control"
)
assert pub.isSubscribed(
unit_test_table_model.do_insert, "request_insert_control"
)
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestSelectControl(UnitTestSelectMethods):
"""Class for unit testing Control table do_select() and do_select_all()."""
__test__ = True
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestInsertControl(UnitTestInsertMethods):
"""Class for unit testing Control table do_insert() method."""
__test__ = True
_next_id = 0
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.skip(reason="Control records are non-hierarchical.")
def test_do_insert_child(self, test_attributes, unit_test_table_model):
"""Should not run because Controls are not hierarchical."""
pass
@pytest.mark.usefixtures("test_attributes", "unit_test_table_model")
class TestDeleteControl(UnitTestDeleteMethods):
"""Class for unit testing Control table do_delete() method."""
__test__ = True
_next_id = 0
_record = RAMSTKControlRecord
_tag = "control"
@pytest.mark.usefixtures("test_attributes", "test_record_model")
class TestGetterSetterControl(UnitTestGetterSetterMethods):
"""Class for unit testing Control table methods that get or set."""
__test__ = True
_id_columns = [
"revision_id",
"hardware_id",
"mode_id",
"mechanism_id",
"cause_id",
"control_id",
]
_test_attr = "type_id"
_test_default_value = ""
@pytest.mark.unit
def test_get_record_model_attributes(self, test_record_model):
"""Should return a dict of attribute key:value pairs.
This method must be local because the attributes are different for each
database record model.
"""
_attributes = test_record_model.get_attributes()
assert isinstance(_attributes, dict)
assert _attributes["description"] == "Test FMEA Control #1 for Cause ID #3."
assert _attributes["type_id"] == "Detection"
| 33.967105 | 88 | 0.709278 |
acf35998245f9833e14d403b1694a3b10d081886 | 8,862 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/12-19_7.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/12-19_7.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_nonlinear_software/12-19_7.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(z, i_3))
loc0.set_progress(0, mgr.GT(x_z, z))
h_z = Hint("h_z1", env, frozenset([z]), symbs)
h_z.set_locs([loc0])
res.append(h_z)
loc0 = Location(env, mgr.Equals(pc, i_0))
loc0.set_progress(1, mgr.Equals(x_pc, i_1))
loc1 = Location(env, mgr.Equals(pc, i_1))
loc1.set_progress(2, mgr.Equals(x_pc, i_2))
loc2 = Location(env, mgr.Equals(pc, i_2))
loc2.set_progress(0, mgr.Equals(x_pc, i_3))
loc3 = Location(env, mgr.Equals(pc, i_3))
loc3.set_progress(0, mgr.Equals(x_pc, i_0))
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2, loc3])
res.append(h_pc)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_2))
loc1 = Location(env, mgr.Equals(pc, i_2))
loc1.set_progress(2, mgr.Equals(x_pc, i_3))
loc2 = Location(env, mgr.Equals(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.GT(y, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Times(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3))
loc1.set_progress(2, mgr.Equals(x_y, y))
loc2 = Location(env, mgr.GE(y, i_3))
loc2.set_progress(2, mgr.Equals(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.GE(x_z, mgr.Plus(z, y)))
loc1 = Location(env, mgr.GE(z, i_3), mgr.GE(x, i_0))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, i_0)))
h_z = Hint("h_z2", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(x, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, x)))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
stutter = mgr.Equals(x_y, y)
loc0 = Location(env, mgr.GE(y, i_3))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, i_1)))
loc1 = Location(env, mgr.GE(y, i_3), mgr.GE(z, i_2))
loc1.set_progress(0, mgr.Equals(x_y, mgr.Plus(y, z)))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
return frozenset(res)
| 34.617188 | 81 | 0.541751 |
acf359fe5638f6109cf9d6b25bc107c32e4ee5f4 | 6,175 | py | Python | models/networks/revunet_3D_dsv.py | Myyyr/segmentation | 6b9423e327cff1eb23599404031b7fb8e9ecf75d | [
"MIT"
] | null | null | null | models/networks/revunet_3D_dsv.py | Myyyr/segmentation | 6b9423e327cff1eb23599404031b7fb8e9ecf75d | [
"MIT"
] | null | null | null | models/networks/revunet_3D_dsv.py | Myyyr/segmentation | 6b9423e327cff1eb23599404031b7fb8e9ecf75d | [
"MIT"
] | null | null | null | import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import revtorch.revtorch as rv
import random
from .utils import UnetDsv3
id = random.getrandbits(64)
#restore experiment
#VALIDATE_ALL = False
#PREDICT = True
#RESTORE_ID = 7420189804603519207
#RESTORE_EPOCH = 6
#LOG_COMETML_EXISTING_EXPERIMENT = ""
#general settings
SAVE_CHECKPOINTS = False #set to true to create a checkpoint at every epoch
EXPERIMENT_TAGS = ["bugfreeFinalDrop"]
EXPERIMENT_NAME = "Reversible NO_NEW60, dropout"
EPOCHS = 1000
BATCH_SIZE = 1
VIRTUAL_BATCHSIZE = 1
VALIDATE_EVERY_K_EPOCHS = 1
INPLACE = True
#hyperparameters
#CHANNELS = [36, 72, 144, 288, 576] #normal doubling strategy
# CHANNELS = [60, 120, 240, 360, 480]
CHANNELS = [64, 128, 256, 512, 1024]
CHANNELS = [int(x / 4) for x in CHANNELS]
INITIAL_LR = 1e-4
L2_REGULARIZER = 1e-5
#logging settings
LOG_EVERY_K_ITERATIONS = 5 #0 to disable logging
LOG_MEMORY_EVERY_K_ITERATIONS = False
LOG_MEMORY_EVERY_EPOCH = True
LOG_EPOCH_TIME = True
LOG_VALIDATION_TIME = True
LOG_HAUSDORFF_EVERY_K_EPOCHS = 0 #must be a multiple of VALIDATE_EVERY_K_EPOCHS
LOG_COMETML = False
LOG_PARAMCOUNT = True
LOG_LR_EVERY_EPOCH = True
#data and augmentation
TRAIN_ORIGINAL_CLASSES = False #train on original 5 classes
DATASET_WORKERS = 1
SOFT_AUGMENTATION = False #Soft augmetation directly works on the 3 classes. Hard augmentation augments on the 5 orignal labels, then takes the argmax
NN_AUGMENTATION = True #Has priority over soft/hard augmentation. Uses nearest-neighbor interpolation
DO_ROTATE = True
DO_SCALE = True
DO_FLIP = True
DO_ELASTIC_AUG = True
DO_INTENSITY_SHIFT = True
#RANDOM_CROP = [128, 128, 128]
ROT_DEGREES = 20
SCALE_FACTOR = 1.1
SIGMA = 10
MAX_INTENSITY_SHIFT = 0.1
class ResidualInner(nn.Module):
def __init__(self, channels, groups):
super(ResidualInner, self).__init__()
self.gn1 = nn.BatchNorm3d(channels)
self.conv1 = nn.Conv3d(channels, channels, 3, padding=1, bias=False)
self.gn2 = nn.BatchNorm3d(channels)
self.conv2 = nn.Conv3d(channels, channels, 3, padding=1, bias=False)
def forward(self, x):
x = F.leaky_relu(self.gn1(self.conv1(x)), inplace=INPLACE)
x = F.leaky_relu(self.gn2(self.conv2(x)), inplace=INPLACE)
return x
def makeReversibleSequence(channels):
innerChannels = channels // 2
groups = CHANNELS[0] // 2
fBlock = ResidualInner(innerChannels, groups)
gBlock = ResidualInner(innerChannels, groups)
#gBlock = nn.Sequential()
return rv.ReversibleBlock(fBlock, gBlock)
def makeReversibleComponent(channels, blockCount):
modules = []
for i in range(blockCount):
modules.append(makeReversibleSequence(channels))
return rv.ReversibleSequence(nn.ModuleList(modules))
def getChannelsAtIndex(index):
if index < 0: index = 0
if index >= len(CHANNELS): index = len(CHANNELS) - 1
return CHANNELS[index]
class EncoderModule(nn.Module):
def __init__(self, inChannels, outChannels, depth, downsample=True):
super(EncoderModule, self).__init__()
self.downsample = downsample
if downsample:
self.conv = nn.Conv3d(inChannels, outChannels, 1)
self.reversibleBlocks = makeReversibleComponent(outChannels, depth)
def forward(self, x):
if self.downsample:
x = F.max_pool3d(x, 2)
x = self.conv(x) #increase number of channels
x = self.reversibleBlocks(x)
return x
class DecoderModule(nn.Module):
def __init__(self, inChannels, outChannels, depth, upsample=True):
super(DecoderModule, self).__init__()
self.reversibleBlocks = makeReversibleComponent(inChannels, depth)
self.upsample = upsample
if self.upsample:
self.conv = nn.Conv3d(inChannels, outChannels, 1)
def forward(self, x):
x = self.reversibleBlocks(x)
if self.upsample:
x = self.conv(x)
x = F.interpolate(x, scale_factor=2, mode="trilinear", align_corners=False)
return x
class NoNewReversible_dsv(nn.Module):
def __init__(self):
super(NoNewReversible_dsv, self).__init__()
depth = 1
self.levels = 5
n_classes = 2
self.firstConv = nn.Conv3d(1, CHANNELS[0], 3, padding=1, bias=False)
#self.dropout = nn.Dropout3d(0.2, True)
self.lastConv = nn.Conv3d(n_classes*4, n_classes, 1, bias=True)
#create encoder levels
encoderModules = []
for i in range(self.levels):
encoderModules.append(EncoderModule(getChannelsAtIndex(i - 1), getChannelsAtIndex(i), depth, i != 0))
self.encoders = nn.ModuleList(encoderModules)
#create decoder levels
decoderModules = []
for i in range(self.levels):
decoderModules.append(DecoderModule(getChannelsAtIndex(self.levels - i - 1), getChannelsAtIndex(self.levels - i - 2), depth, i != (self.levels -1)))
self.decoders = nn.ModuleList(decoderModules)
self.dsv4 = UnetDsv3(in_size=CHANNELS[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=CHANNELS[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=CHANNELS[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=CHANNELS[0], out_channels=n_classes, kernel_size=1)
def forward(self, x):
x = self.firstConv(x)
#x = self.dropout(x)
inputStack = []
for i in range(self.levels):
x = self.encoders[i](x)
if i < self.levels - 1:
inputStack.append(x)
up = []
for i in range(self.levels):
x = self.decoders[i](x)
if i < self.levels - 1:
up.append(x)
x = x + inputStack.pop()
dsv4 = self.dsv4(up[0])
dsv3 = self.dsv3(up[1])
dsv2 = self.dsv2(up[2])
dsv1 = self.dsv1(up[3])
x = self.lastConv(torch.cat([dsv1,dsv2,dsv3,dsv4], dim=1))
#x = torch.sigmoid(x)
return x
@staticmethod
def apply_argmax_softmax(pred):
pred = F.softmax(pred, dim=1)
return pred | 32.671958 | 160 | 0.67336 |
acf35a59711f9770cd0bfce4994c56f93ea46340 | 1,429 | py | Python | creme/datasets/base.py | tweakyllama/creme | 6bb8e34789947a943e7e6a8a1af1341e4c1de144 | [
"BSD-3-Clause"
] | 1 | 2019-08-24T00:04:19.000Z | 2019-08-24T00:04:19.000Z | creme/datasets/base.py | koaning/creme | fea9594b0620b57edef0d986edc735a3a0977dc9 | [
"BSD-3-Clause"
] | null | null | null | creme/datasets/base.py | koaning/creme | fea9594b0620b57edef0d986edc735a3a0977dc9 | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
import urllib
import zipfile
from .. import stream
def get_data_home(data_home=None):
"""Return the path of the creme data directory."""
if data_home is None:
data_home = os.environ.get('CREME_DATA', os.path.join('~', 'creme_data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def fetch_csv_dataset(data_home, url, name, **iter_csv_params):
data_home = get_data_home(data_home=data_home)
# If the CSV file exists then iterate over it
csv_path = os.path.join(data_home, f'{name}.csv')
if os.path.exists(csv_path):
return stream.iter_csv(csv_path, **iter_csv_params)
# If the ZIP file exists then unzip it
zip_path = os.path.join(data_home, f'{name}.zip')
if os.path.exists(zip_path):
print('Unzipping data...')
# Unzip the ZIP file
with zipfile.ZipFile(zip_path, 'r') as zf:
zf.extractall(data_home)
# Delete the ZIP file now that the CSV file is available
os.remove(zip_path)
return fetch_csv_dataset(data_home, url, name, **iter_csv_params)
# Download the ZIP file
print('Downloading data...')
with urllib.request.urlopen(url) as r, open(zip_path, 'wb') as f:
shutil.copyfileobj(r, f)
return fetch_csv_dataset(data_home, url, name, **iter_csv_params)
| 29.770833 | 81 | 0.669699 |
acf35b748c49e06a868abf639d7173422157f4ae | 5,202 | py | Python | train_EEG_Sz_with_miniimagenet.py | MTynes/MAML-Pytorch | 43f3a45e3761b71542e484f363cd2f87b98a08c3 | [
"MIT"
] | null | null | null | train_EEG_Sz_with_miniimagenet.py | MTynes/MAML-Pytorch | 43f3a45e3761b71542e484f363cd2f87b98a08c3 | [
"MIT"
] | null | null | null | train_EEG_Sz_with_miniimagenet.py | MTynes/MAML-Pytorch | 43f3a45e3761b71542e484f363cd2f87b98a08c3 | [
"MIT"
] | null | null | null | import torch, os
import numpy as np
from MiniImagenet import MiniImagenet
import scipy.stats
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import random, sys, pickle
import argparse
from meta import Meta
def mean_confidence_interval(accs, confidence=0.95):
n = accs.shape[0]
m, se = np.mean(accs), scipy.stats.sem(accs)
h = se * scipy.stats.t._ppf((1 + confidence) / 2, n - 1)
return m, h
def main():
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
print(args)
config = [
('conv2d', [32, 3, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 2, 0]),
('conv2d', [32, 32, 3, 3, 1, 0]),
('relu', [True]),
('bn', [32]),
('max_pool2d', [2, 1, 0]),
('flatten', []),
('linear', [args.n_way, 32 * 5 * 5])
]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
print(maml)
print('Total trainable tensors:', num)
# batchsz here means total episode number
train_image_directory = args.train_dir
test_image_directory = args.test_dir
mini = MiniImagenet(train_image_directory, mode='train', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=50, resize=args.imgsz)
mini_test = MiniImagenet(test_image_directory, mode='test', n_way=args.n_way, k_shot=args.k_spt,
k_query=args.k_qry,
batchsz=50, resize=args.imgsz)
mean_accs = []
for epoch in range(args.epoch//10000):
# fetch meta_batchsz num of episode each time
db = DataLoader(mini, args.task_num, shuffle=True, num_workers=1, pin_memory=True)
for step, (x_spt, y_spt, x_qry, y_qry) in enumerate(db):
x_spt, y_spt, x_qry, y_qry = x_spt.to(device), y_spt.to(device), x_qry.to(device), y_qry.to(device)
accs = maml(x_spt, y_spt, x_qry, y_qry)
if step % 30 == 0:
print('step:', step, '\ttraining acc:', accs)
if step % 500 == 0: # evaluation
db_test = DataLoader(mini_test, 1, shuffle=True, num_workers=1, pin_memory=True)
accs_all_test = []
for x_spt, y_spt, x_qry, y_qry in db_test:
x_spt, y_spt, x_qry, y_qry = x_spt.squeeze(0).to(device), y_spt.squeeze(0).to(device), \
x_qry.squeeze(0).to(device), y_qry.squeeze(0).to(device)
accs = maml.fine_tuning(x_spt, y_spt, x_qry, y_qry)
accs_all_test.append(accs)
# [b, update_step+1]
accs = np.array(accs_all_test).mean(axis=0).astype(np.float16)
print('Epoch ', epoch, '. Test acc:', accs)
print('Mean test acc: ', np.mean(accs))
mean_accs.append(np.mean(accs))
print('\nHighest test accuracy: ', max(mean_accs))
# log the mean test accuracy data for display later
with open(args.accuracy_log_file, 'w') as f:
f.write("\n".join([str(s) for s in mean_accs]))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--train_dir', type=str, help='train data directory', default='/content/miniimagenet/images')
argparser.add_argument('--test_dir', type=str, help='test data directory', default='/content/all_test_images')
argparser.add_argument('--epoch', type=int, help='epoch number', default=(300 * 10000))##6
argparser.add_argument('--n_way', type=int, help='n way', default=2) #cannot be larger than the number of categories
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=1)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=1)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=84) #
argparser.add_argument('--imgc', type=int, help='imgc', default=3)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=4)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.01)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for fine_tuning', default=10)
argparser.add_argument('--accuracy_log_file', type=str, help='Output file for mean test accuracy',
default='/content/mean_test_accuracy.txt')
args = argparser.parse_args()
main() | 41.285714 | 120 | 0.603037 |
acf35dbe76f8acd53a8db628d36d672688d7f68a | 3,175 | py | Python | qa/rpc-tests/invalidateblock.py | utopiacoin/utopiacoin1 | d1cb408de3dd693f9dcd3ce8b92afa6eb9b8325e | [
"MIT"
] | null | null | null | qa/rpc-tests/invalidateblock.py | utopiacoin/utopiacoin1 | d1cb408de3dd693f9dcd3ce8b92afa6eb9b8325e | [
"MIT"
] | null | null | null | qa/rpc-tests/invalidateblock.py | utopiacoin/utopiacoin1 | d1cb408de3dd693f9dcd3ce8b92afa6eb9b8325e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework import NewcoinTestFramework
from newcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class InvalidateTest(NewcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:"
print "Mine 4 blocks on Node 0"
self.nodes[0].setgenerate(True, 4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].setgenerate(True, 6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print "\nMake sure we won't reorg to a lower work chain:"
connect_nodes_bi(self.nodes,1,2)
print "Sync node 2 to node 1 so both have 6 blocks"
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print "Invalidate block 5 on node 1 so its tip is now at 4"
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print "Invalidate block 3 on node 2, so its tip is now 2"
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print "..and then mine a block"
self.nodes[2].setgenerate(True, 1)
print "Verify all nodes are at the right height"
time.sleep(5)
for i in xrange(3):
print i,self.nodes[i].getblockcount()
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| 41.233766 | 95 | 0.650079 |
acf35e3871d0e83f9b7a1191c479dec37fc596d1 | 875 | py | Python | archive/p/python/bubble_sort.py | Ayush7-BIT/sample-programs | 827d8961d3a548daf8fe3b674642a1562daaa5c4 | [
"MIT"
] | 4 | 2019-10-18T13:04:23.000Z | 2020-10-03T16:07:14.000Z | archive/p/python/bubble_sort.py | Ayush7-BIT/sample-programs | 827d8961d3a548daf8fe3b674642a1562daaa5c4 | [
"MIT"
] | 2 | 2021-09-07T05:33:23.000Z | 2021-12-04T08:30:06.000Z | archive/p/python/bubble_sort.py | Ayush7-BIT/sample-programs | 827d8961d3a548daf8fe3b674642a1562daaa5c4 | [
"MIT"
] | 1 | 2020-07-09T03:26:23.000Z | 2020-07-09T03:26:23.000Z | #!/usr/bin/env python
import sys
from functools import reduce
def bubble_sort(xs):
def pass_list(xs):
if len(xs) <= 1:
return xs
x0 = xs[0]
x1 = xs[1]
if x1 < x0:
del xs[1]
return [x1] + pass_list(xs)
return [x0] + pass_list(xs[1:])
return reduce(lambda acc, _ : pass_list(acc), xs, xs[:])
def input_list(list_str):
return [int(x.strip(" "), 10) for x in list_str.split(',')]
def exit_with_error():
print('Usage: please provide a list of at least two integers to sort in the format "1, 2, 3, 4, 5"')
sys.exit(1)
def main(args):
try:
xs = input_list(args[0])
if len(xs) <= 1:
exit_with_error()
print(bubble_sort(xs))
except (IndexError,ValueError):
exit_with_error()
if __name__ == "__main__":
main(sys.argv[1:])
| 21.341463 | 104 | 0.56 |
acf35e77f44f3e9322b1c6bcb08831e22071446b | 1,978 | py | Python | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/domain/test_domain_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/domain/test_domain_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/domain/test_domain_forms.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | from django.test import TestCase
from dfirtrack_main.forms import DomainForm
class DomainFormTestCase(TestCase):
""" domain form tests """
def test_domain_name_form_label(self):
""" test form label """
# get object
form = DomainForm()
# compare
self.assertEqual(form.fields['domain_name'].label, 'Domain name (*)')
def test_domain_note_form_label(self):
""" test form label """
# get object
form = DomainForm()
# compare
self.assertEqual(form.fields['domain_note'].label, 'Domain note')
def test_domain_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = DomainForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_domain_name_form_filled(self):
""" test minimum form requirements / VALID """
# get object
form = DomainForm(data = {'domain_name': 'domain_1'})
# compare
self.assertTrue(form.is_valid())
def test_domain_note_form_filled(self):
""" test additional form content """
# get object
form = DomainForm(data = {
'domain_name': 'domain_1',
'domain_note': 'lorem ipsum',
})
# compare
self.assertTrue(form.is_valid())
def test_domain_name_proper_chars(self):
""" test for max length """
# get object
form = DomainForm(data = {'domain_name': 'dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'})
# compare
self.assertTrue(form.is_valid())
def test_domain_name_too_many_chars(self):
""" test for max length """
# get object
form = DomainForm(data = {'domain_name': 'ddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd'})
# compare
self.assertFalse(form.is_valid())
| 30.430769 | 154 | 0.631446 |
acf3600486019533227a094d01b59a4eeb1a5132 | 16,249 | py | Python | databricks/koalas/tests/test_typedef.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 3,211 | 2019-04-22T04:40:50.000Z | 2022-03-31T10:42:31.000Z | databricks/koalas/tests/test_typedef.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 2,017 | 2019-04-21T23:37:12.000Z | 2022-03-24T03:48:51.000Z | databricks/koalas/tests/test_typedef.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 375 | 2019-04-21T23:58:57.000Z | 2022-03-30T00:42:19.000Z | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import unittest
import datetime
import decimal
from typing import List
import pandas
import pandas as pd
from pandas.api.types import CategoricalDtype
import numpy as np
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
FloatType,
IntegerType,
LongType,
StringType,
StructField,
StructType,
ByteType,
ShortType,
DateType,
DecimalType,
DoubleType,
TimestampType,
)
from databricks.koalas.typedef import (
as_spark_type,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
infer_return_type,
koalas_dtype,
)
from databricks import koalas as ks
class TypeHintTests(unittest.TestCase):
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_from_pandas_instances(self):
def func() -> pd.Series[int]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.Series[np.float]:
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.float64)
self.assertEqual(inferred.spark_type, DoubleType())
def func() -> "pd.DataFrame[np.float, str]":
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pandas.DataFrame[np.float]":
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.Series[int]":
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, np.int64)
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[np.float, str]:
pass
expected = StructType([StructField("c0", DoubleType()), StructField("c1", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> pd.DataFrame[np.float]:
pass
expected = StructType([StructField("c0", DoubleType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.Series[pdf.b.dtype]: # type: ignore
pass
inferred = infer_return_type(func)
self.assertEqual(inferred.dtype, CategoricalDtype(categories=["a", "b", "c"]))
self.assertEqual(inferred.spark_type, LongType())
def func() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
expected = StructType([StructField("c0", LongType()), StructField("c1", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
def test_if_pandas_implements_class_getitem(self):
# the current type hint implementation of pandas DataFrame assumes pandas doesn't
# implement '__class_getitem__'. This test case is to make sure pandas
# doesn't implement them.
assert not ks._frame_has_class_getitem
assert not ks._series_has_class_getitem
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances(self):
def func() -> 'pd.DataFrame["a" : np.float, "b":str]': # noqa: F821
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", StringType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.unicode_])
self.assertEqual(inferred.spark_type, expected)
def func() -> "pd.DataFrame['a': np.float, 'b': int]": # noqa: F821
pass
expected = StructType([StructField("a", DoubleType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.float64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({("x", "a"): [1, 2, 3], ("y", "b"): [3, 4, 5]})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType(
[StructField("(x, a)", LongType()), StructField("(y, b)", LongType())]
)
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, np.int64])
self.assertEqual(inferred.spark_type, expected)
pdf = pd.DataFrame({"a": [1, 2, 3], "b": pd.Categorical(["a", "b", "c"])})
def func() -> pd.DataFrame[zip(pdf.columns, pdf.dtypes)]:
pass
expected = StructType([StructField("a", LongType()), StructField("b", LongType())])
inferred = infer_return_type(func)
self.assertEqual(inferred.dtypes, [np.int64, CategoricalDtype(categories=["a", "b", "c"])])
self.assertEqual(inferred.spark_type, expected)
@unittest.skipIf(
sys.version_info < (3, 7),
"Type inference from pandas instances is supported with Python 3.7+",
)
def test_infer_schema_with_names_pandas_instances_negative(self):
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F821
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> pd.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'pd.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F821
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> pd.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> pd.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_infer_schema_with_names_negative(self):
def try_infer_return_type():
def f() -> 'ks.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F821
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
class A:
pass
def try_infer_return_type():
def f() -> ks.DataFrame[A]:
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> 'ks.DataFrame["a" : np.float : 1, "b":str:2]': # noqa: F821
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "Type hints should be specified", try_infer_return_type)
# object type
pdf = pd.DataFrame({"a": ["a", 2, None]})
def try_infer_return_type():
def f() -> ks.DataFrame[pdf.dtypes]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def try_infer_return_type():
def f() -> ks.Series[pdf.a.dtype]: # type: ignore
pass
infer_return_type(f)
self.assertRaisesRegex(TypeError, "object.*not understood", try_infer_return_type)
def test_as_spark_type_koalas_dtype(self):
type_mapper = {
# binary
np.character: (np.character, BinaryType()),
np.bytes_: (np.bytes_, BinaryType()),
np.string_: (np.bytes_, BinaryType()),
bytes: (np.bytes_, BinaryType()),
# integer
np.int8: (np.int8, ByteType()),
np.byte: (np.int8, ByteType()),
np.int16: (np.int16, ShortType()),
np.int32: (np.int32, IntegerType()),
np.int64: (np.int64, LongType()),
np.int: (np.int64, LongType()),
int: (np.int64, LongType()),
# floating
np.float32: (np.float32, FloatType()),
np.float: (np.float64, DoubleType()),
np.float64: (np.float64, DoubleType()),
float: (np.float64, DoubleType()),
# string
np.str: (np.unicode_, StringType()),
np.unicode_: (np.unicode_, StringType()),
str: (np.unicode_, StringType()),
# bool
np.bool: (np.bool, BooleanType()),
bool: (np.bool, BooleanType()),
# datetime
np.datetime64: (np.datetime64, TimestampType()),
datetime.datetime: (np.dtype("datetime64[ns]"), TimestampType()),
# DateType
datetime.date: (np.dtype("object"), DateType()),
# DecimalType
decimal.Decimal: (np.dtype("object"), DecimalType(38, 18)),
# ArrayType
np.ndarray: (np.dtype("object"), ArrayType(StringType())),
List[bytes]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.character]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.bytes_]: (np.dtype("object"), ArrayType(BinaryType())),
List[np.string_]: (np.dtype("object"), ArrayType(BinaryType())),
List[bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[np.bool]: (np.dtype("object"), ArrayType(BooleanType())),
List[datetime.date]: (np.dtype("object"), ArrayType(DateType())),
List[np.int8]: (np.dtype("object"), ArrayType(ByteType())),
List[np.byte]: (np.dtype("object"), ArrayType(ByteType())),
List[decimal.Decimal]: (np.dtype("object"), ArrayType(DecimalType(38, 18))),
List[float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float64]: (np.dtype("object"), ArrayType(DoubleType())),
List[np.float32]: (np.dtype("object"), ArrayType(FloatType())),
List[np.int32]: (np.dtype("object"), ArrayType(IntegerType())),
List[int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int]: (np.dtype("object"), ArrayType(LongType())),
List[np.int64]: (np.dtype("object"), ArrayType(LongType())),
List[np.int16]: (np.dtype("object"), ArrayType(ShortType())),
List[str]: (np.dtype("object"), ArrayType(StringType())),
List[np.unicode_]: (np.dtype("object"), ArrayType(StringType())),
List[datetime.datetime]: (np.dtype("object"), ArrayType(TimestampType())),
List[np.datetime64]: (np.dtype("object"), ArrayType(TimestampType())),
# CategoricalDtype
CategoricalDtype(categories=["a", "b", "c"]): (
CategoricalDtype(categories=["a", "b", "c"]),
LongType(),
),
}
for numpy_or_python_type, (dtype, spark_type) in type_mapper.items():
self.assertEqual(as_spark_type(numpy_or_python_type), spark_type)
self.assertEqual(koalas_dtype(numpy_or_python_type), (dtype, spark_type))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
as_spark_type(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
as_spark_type(np.dtype("object"))
with self.assertRaisesRegex(TypeError, "Type uint64 was not understood."):
koalas_dtype(np.dtype("uint64"))
with self.assertRaisesRegex(TypeError, "Type object was not understood."):
koalas_dtype(np.dtype("object"))
@unittest.skipIf(not extension_dtypes_available, "The pandas extension types are not available")
def test_as_spark_type_extension_dtypes(self):
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
type_mapper = {
Int8Dtype(): ByteType(),
Int16Dtype(): ShortType(),
Int32Dtype(): IntegerType(),
Int64Dtype(): LongType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(koalas_dtype(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_object_dtypes_available, "The pandas extension object types are not available"
)
def test_as_spark_type_extension_object_dtypes(self):
from pandas import BooleanDtype, StringDtype
type_mapper = {
BooleanDtype(): BooleanType(),
StringDtype(): StringType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(koalas_dtype(extension_dtype), (extension_dtype, spark_type))
@unittest.skipIf(
not extension_float_dtypes_available, "The pandas extension float types are not available"
)
def test_as_spark_type_extension_float_dtypes(self):
from pandas import Float32Dtype, Float64Dtype
type_mapper = {
Float32Dtype(): FloatType(),
Float64Dtype(): DoubleType(),
}
for extension_dtype, spark_type in type_mapper.items():
self.assertEqual(as_spark_type(extension_dtype), spark_type)
self.assertEqual(koalas_dtype(extension_dtype), (extension_dtype, spark_type))
| 38.232941 | 100 | 0.613453 |
acf36079a1b924fcf9cd1ce28287885e57dbe8e7 | 3,027 | py | Python | projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py | ricsinaruto/ParlAI | 733b627ae456d6b11a2fc4624088a781bc6c1d03 | [
"MIT"
] | 9 | 2021-01-27T22:10:45.000Z | 2021-11-09T23:47:46.000Z | projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py | ricsinaruto/ParlAI | 733b627ae456d6b11a2fc4624088a781bc6c1d03 | [
"MIT"
] | 3 | 2021-03-11T06:04:15.000Z | 2021-08-31T15:44:42.000Z | projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py | ricsinaruto/ParlAI | 733b627ae456d6b11a2fc4624088a781bc6c1d03 | [
"MIT"
] | 1 | 2019-07-21T08:27:10.000Z | 2019-07-21T08:27:10.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.build_data import modelzoo_path
from parlai.core.dict import DictionaryAgent
from collections import defaultdict
import copy
import os
import re
RETOK = re.compile(r'\w+|[^\w\s]|\n', re.UNICODE)
class WizardDictAgent(DictionaryAgent):
def __init__(self, opt, shared=None):
# initialize fields
self.opt = copy.deepcopy(opt)
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self.null_token = '__PAD__'
self.end_token = '__SOC__'
self.unk_token = '__UNK__'
self.start_token = '__SOC__'
self.tokenizer = opt.get('dict_tokenizer', 'whitespace')
self.lower = opt.get('dict_lower',
DictionaryAgent.default_lower)
self.maxtokens = opt.get('dict_maxtokens',
DictionaryAgent.default_maxtokens)
self.textfields = opt.get('dict_textfields',
DictionaryAgent.default_textfields).split(",")
if shared:
self.freq = shared.get('freq', {})
self.tok2ind = shared.get('tok2ind', {})
self.ind2tok = shared.get('ind2tok', {})
else:
self.freq = defaultdict(int)
self.tok2ind = {}
self.ind2tok = {}
if opt.get('dict_file') and os.path.isfile(opt['dict_file']):
# load pre-existing dictionary
self.load(opt['dict_file'])
elif opt.get('dict_initpath'):
# load seed dictionary
opt['dict_initpath'] = modelzoo_path(opt.get('datapath'),
opt['dict_initpath'])
self.load(opt['dict_initpath'])
self.add_token(self.null_token)
self.add_token(self.start_token)
self.add_token(self.end_token)
self.add_token(self.unk_token)
if not shared:
if opt.get('dict_file'):
self.save_path = opt['dict_file']
def tokenize(self, text, building=False):
"""Returns a sequence of tokens from the iterable."""
if self.lower:
text = text.lower()
if self.tokenizer == 're':
return self.re_tokenize(text)
elif self.tokenizer == 'whitespace':
return text.split(' ')
word_tokens = text.replace('.', ' . ').replace('. . .', '...')\
.replace(',', ' , ').replace(';', ' ; ').replace(':', ' : ')\
.replace('!', ' ! ').replace('?', ' ? ').replace(' ', ' ')\
.replace(' ', ' ').strip().split(" ")
return word_tokens
def re_tokenize(self, text):
"""This splits along whitespace and punctuation and keeps the newline
as a token in the returned list.
"""
return RETOK.findall(text)
| 36.035714 | 80 | 0.5593 |
acf3611dd6d6a381239bfa2fc344e13e98083659 | 1,521 | py | Python | jetcam/usb_camera.py | viotemp1/jetcam | 7f46625bffb0a9b87016bd33f80308e3da6e843f | [
"MIT"
] | null | null | null | jetcam/usb_camera.py | viotemp1/jetcam | 7f46625bffb0a9b87016bd33f80308e3da6e843f | [
"MIT"
] | null | null | null | jetcam/usb_camera.py | viotemp1/jetcam | 7f46625bffb0a9b87016bd33f80308e3da6e843f | [
"MIT"
] | null | null | null | from .camera import Camera
import atexit
import cv2
import numpy as np
import threading
import traitlets
class USBCamera(Camera):
capture_fps = traitlets.Integer(default_value=30)
capture_width = traitlets.Integer(default_value=640)
capture_height = traitlets.Integer(default_value=480)
capture_device = traitlets.Integer(default_value=0)
def __init__(self, *args, **kwargs):
super(USBCamera, self).__init__(*args, **kwargs)
try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)
re , image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
except:
raise RuntimeError(
'Could not initialize camera. Please see error trace.')
atexit.register(self.cap.release)
def _gst_str(self):
return 'v4l2src device=/dev/video{} ! video/x-raw, width=(int){}, height=(int){}, framerate=(fraction){}/1 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink'.format(self.capture_device, self.capture_width, self.capture_height, self.capture_fps)
def _read(self):
re, image = self.cap.read()
if re:
image_resized = cv2.resize(image,(int(self.width),int(self.height)))
return image_resized
else:
raise RuntimeError('Could not read image from camera')
def close(self):
self.cap.release()
| 33.8 | 262 | 0.620644 |
acf36215e76c2275a605a9d857a9c7c02ba1213a | 3,889 | py | Python | palo_alto_pan_os/komand_palo_alto_pan_os/actions/add_to_policy/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | palo_alto_pan_os/komand_palo_alto_pan_os/actions/add_to_policy/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | palo_alto_pan_os/komand_palo_alto_pan_os/actions/add_to_policy/action.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | import komand
from .schema import AddToPolicyInput, AddToPolicyOutput
from komand.exceptions import PluginException
# Custom imports below
from komand_palo_alto_pan_os.util import util
class AddToPolicy(komand.Action):
# used to convert from keys used by plugin input to keys expected by PAN-OS
_CONVERSION_KEY = {'source': 'source',
'destination': 'destination',
'service': 'service',
'application': 'application',
'source-user': 'source_user',
'to': 'src_zone',
'from': 'dst_zone',
'category': 'url_category',
'hip-profiles': 'hip_profiles',
'action': 'action'}
def __init__(self):
super(self.__class__, self).__init__(
name='add_to_policy',
description='Add a rule to a PAN-OS security policy',
input=AddToPolicyInput(),
output=AddToPolicyOutput())
def run(self, params={}):
update = util.SecurityPolicy()
rule_name = params.get('rule_name')
policy_type = False
if params.get('update_active_or_candidate_configuration') == 'active':
policy_type = True
# Set xpath to security polices
xpath = "/config/devices/entry/vsys/entry/rulebase/security/rules/entry[@name='{0}']".format(rule_name)
# Get current policy config
if policy_type:
config_output = self.connection.request.show_(xpath=xpath)
else:
config_output = self.connection.request.get_(xpath=xpath)
# Verify and extract needed keys
current_config = update.extract_from_security_policy(policy=config_output)
# Update keys
key_list = ['source', 'destination', 'service',
'application', 'source-user', 'to',
'from', 'category', 'hip-profiles', 'action']
new_policy = {}
for i in key_list:
value = self._CONVERSION_KEY[i]
if params.get(value):
new_policy[i] = update.add_to_key(current_config[i], params.get(value))
else:
new_policy[i] = current_config[i]
# Build new element
element = update.element_for_policy_update(rule_name=rule_name,
to=new_policy['to'],
from_=new_policy['from'],
source=new_policy['source'],
destination=new_policy['destination'],
service=new_policy['service'],
application=new_policy['application'],
category=new_policy['category'],
hip_profiles=new_policy['hip-profiles'],
source_user=new_policy['source-user'],
fire_wall_action=new_policy['action'])
# Update policy
output = self.connection.request.edit_(
xpath=xpath,
element=element)
try:
status = output['response']['response']['@status']
code = output['response']['response']['@code']
message = output['response']['response']['msg']
return {"status": status, 'code': code, 'message': message}
except KeyError:
raise PluginException(cause='The output did not contain expected keys.',
assistance='Contact support for help.',
data=output)
| 45.752941 | 111 | 0.505786 |
acf3627b1f6f7998b5010a0f40509964b3d489c8 | 5,325 | py | Python | pyke/krb_compiler/kfbparser.py | alimon/pyke3 | fc02c50c1c658dce0dc4b6ffa33cb819be03f6e2 | [
"MIT"
] | 5 | 2021-05-30T19:15:21.000Z | 2022-02-09T20:13:17.000Z | pyke/krb_compiler/kfbparser.py | alimon/pyke3 | fc02c50c1c658dce0dc4b6ffa33cb819be03f6e2 | [
"MIT"
] | 1 | 2021-05-30T16:38:09.000Z | 2021-05-30T16:38:09.000Z | pyke/krb_compiler/kfbparser.py | alimon/pyke3 | fc02c50c1c658dce0dc4b6ffa33cb819be03f6e2 | [
"MIT"
] | 2 | 2021-01-31T09:22:47.000Z | 2021-03-13T17:33:53.000Z | # $Id: kfbparser.py 49507964ae64 2010-03-27 mtnyogi $
# coding=utf-8
#
# Copyright © 2008 Bruce Frederiksen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" See http://www.dabeaz.com/ply/ply.html for syntax of grammer definitions.
"""
import os, os.path
from pyke.krb_compiler.ply import yacc
from pyke.krb_compiler import scanner
from pyke import fact_base
tokens = scanner.kfb_tokens
def p_file(p):
''' file : nl_opt facts_opt
facts_opt :
facts_opt : facts nl_opt
facts : fact
facts : facts NL_TOK fact
'''
pass
def p_fact0(p):
''' fact : IDENTIFIER_TOK LP_TOK RP_TOK '''
Fact_base.add_universal_fact(p[1], ())
def p_fact1(p):
''' fact : IDENTIFIER_TOK LP_TOK data_list RP_TOK '''
Fact_base.add_universal_fact(p[1], tuple(p[3]))
def p_none(p):
''' data : NONE_TOK
comma_opt :
comma_opt : ','
nl_opt :
nl_opt : NL_TOK
'''
p[0] = None
def p_number(p):
''' data : NUMBER_TOK
'''
p[0] = p[1]
def p_string(p):
''' data : STRING_TOK
'''
p[0] = eval(p[1])
def p_quoted_last(p):
''' data : IDENTIFIER_TOK
'''
p[0] = p[1]
def p_false(p):
''' data : FALSE_TOK
'''
p[0] = False
def p_true(p):
''' data : TRUE_TOK
'''
p[0] = True
def p_empty_tuple(p):
''' data : LP_TOK RP_TOK
'''
p[0] = ()
def p_start_list(p):
''' data_list : data
'''
p[0] = [p[1]]
def p_append_list(p):
''' data_list : data_list ',' data
'''
p[1].append(p[len(p)-1])
p[0] = p[1]
def p_tuple(p):
''' data : LP_TOK data_list comma_opt RP_TOK '''
p[0] = tuple(p[2])
def p_error(t):
if t is None:
raise SyntaxError("invalid syntax", scanner.syntaxerror_params())
else:
raise SyntaxError("invalid syntax",
scanner.syntaxerror_params(t.lexpos, t.lineno))
parser = None
def init(this_module, check_tables = False, debug = 0):
global parser
if parser is None:
outputdir = os.path.dirname(this_module.__file__)
if debug:
parser = yacc.yacc(module=this_module, write_tables=0,
debug=debug, debugfile='kfbparser.yacc.out',
outputdir=outputdir)
else:
if check_tables:
kfbparser_mtime = os.path.getmtime(this_module.__file__)
tables_name = os.path.join(outputdir, 'kfbparser_tables.py')
try:
ok = os.path.getmtime(tables_name) >= kfbparser_mtime
except OSError:
ok = False
if not ok:
#print "regenerating kfbparser_tables"
try: os.remove(tables_name)
except OSError: pass
try: os.remove(tables_name + 'c')
except OSError: pass
try: os.remove(tables_name + 'o')
except OSError: pass
parser = yacc.yacc(module=this_module, debug=0,
optimize=1, write_tables=1,
tabmodule='pyke.krb_compiler.kfbparser_tables',
outputdir=outputdir)
# Use the first line for normal use, the second for testing changes in the
# grammer (the first line does not report grammer errors!).
def parse(this_module, filename, check_tables = False, debug = 0):
#def parse(this_module, filename, check_tables = False, debug = 1):
'''
>>> from pyke.krb_compiler import kfbparser
>>> kfbparser.parse(kfbparser,
... os.path.join(os.path.dirname(__file__),
... 'TEST/kfbparse_test.kfb'),
... True)
<fact_base kfbparse_test>
'''
global Fact_base
init(this_module, check_tables, debug)
name = os.path.basename(filename)[:-4]
Fact_base = fact_base.fact_base(None, name, False)
with open(filename) as f:
scanner.init(scanner, debug, check_tables, True)
scanner.lexer.lineno = 1
scanner.lexer.filename = filename
#parser.restart()
parser.parse(f.read(), lexer=scanner.lexer, tracking=True, debug=debug)
ans = Fact_base
Fact_base = None
return ans
| 31.323529 | 79 | 0.605446 |
acf3634d57bb42b137b55937f9492df3d8bb2108 | 3,648 | py | Python | labour/migrations/0018_auto_20160202_2235.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 13 | 2015-11-29T12:19:12.000Z | 2021-02-21T15:42:11.000Z | labour/migrations/0018_auto_20160202_2235.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 23 | 2015-04-29T19:43:34.000Z | 2021-02-10T05:50:17.000Z | labour/migrations/0018_auto_20160202_2235.py | darkismus/kompassi | 35dea2c7af2857a69cae5c5982b48f01ba56da1f | [
"CC-BY-3.0"
] | 11 | 2015-09-20T18:59:00.000Z | 2020-02-07T08:47:34.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-02 20:35
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labour', '0017_auto_20160201_0050'),
]
operations = [
migrations.AlterField(
model_name='alternativesignupform',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='job',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='jobcategory',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='perk',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='personnelclass',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
migrations.AlterField(
model_name='qualification',
name='slug',
field=models.CharField(help_text='Tekninen nimi eli "slug" n\xe4kyy URL-osoitteissa. Sallittuja merkkej\xe4 ovat pienet kirjaimet, numerot ja v\xe4liviiva. Teknist\xe4 nime\xe4 ei voi muuttaa luomisen j\xe4lkeen.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator(message='Tekninen nimi saa sis\xe4lt\xe4\xe4 vain pieni\xe4 kirjaimia, numeroita sek\xe4 v\xe4liviivoja.', regex='[a-z0-9-]+')], verbose_name='Tekninen nimi'),
),
]
| 77.617021 | 465 | 0.708882 |
acf3639bfdaa054497c7fe272ee1ae5a65e43686 | 1,332 | py | Python | blogWebApp/www/config.py | Shelmanxie/blogAppWeb | c65273e6080a3d1cec2c780def014123160c2212 | [
"Apache-2.0"
] | 1 | 2017-11-14T03:48:45.000Z | 2017-11-14T03:48:45.000Z | blogWebApp/www/config.py | Shelmanxie/blogAppWeb | c65273e6080a3d1cec2c780def014123160c2212 | [
"Apache-2.0"
] | null | null | null | blogWebApp/www/config.py | Shelmanxie/blogAppWeb | c65273e6080a3d1cec2c780def014123160c2212 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Configuration
'''
__author__ = 'shelman'
import config_default
class Dict(dict):
'''
Simple dict but support access as x.y style.
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def merge(defaults, override):
r = {}
for k, v in defaults.items():
if k in override:
if isinstance(v, dict):
r[k] = merge(v, override[k])
else:
r[k] = override[k]
else:
r[k] = v
return r
def toDict(d):
D = Dict()
for k, v in d.items():
D[k] = toDict(v) if isinstance(v, dict) else v
return D
configs = config_default.configs
try:
import config_override
configs = merge(configs, config_override.configs)
except ImportError:
pass
configs = toDict(configs)
# 这个toDict的主要功能是添加一种取值方式a_dict.key,相当于a_dict['key']
if __name__== '__main__':
print(configs)
print('\n %s'% configs.debug) # 这个错误可以不管 | 22.2 | 78 | 0.570571 |
acf363abb2b8a9fd6159ad6ec7bba2f1fa45d8da | 21,015 | py | Python | autotest/ogr/ogr_georss.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,100 | 2015-01-02T10:33:40.000Z | 2022-03-31T02:06:51.000Z | autotest/ogr/ogr_georss.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 3,496 | 2015-01-06T16:53:30.000Z | 2022-03-31T20:18:51.000Z | autotest/ogr/ogr_georss.py | jpapadakis/gdal | f07aa15fd65af36b04291303cc6834c87f662814 | [
"MIT"
] | 2,036 | 2015-01-08T20:22:12.000Z | 2022-03-31T10:24:08.000Z | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_georss.py 15604 2008-10-26 11:21:34Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GeoRSS driver functionality.
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import gdaltest
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import pytest
pytestmark = pytest.mark.require_driver('GeoRSS')
###############################################################################
@pytest.fixture(autouse=True, scope='module')
def startup_and_cleanup():
ds = ogr.Open('data/georss/atom_rfc_sample.xml')
if ds is None:
gdaltest.georss_read_support = 0
else:
gdaltest.georss_read_support = 1
ds = None
gdaltest.have_gml_reader = 0
try:
ds = ogr.Open('data/gml/ionic_wfs.gml')
if ds is not None:
gdaltest.have_gml_reader = 1
ds = None
except:
pass
gdaltest.atom_field_values = [('title', 'Atom draft-07 snapshot', ogr.OFTString),
('link_rel', 'alternate', ogr.OFTString),
('link_type', 'text/html', ogr.OFTString),
('link_href', 'http://example.org/2005/04/02/atom', ogr.OFTString),
('link2_rel', 'enclosure', ogr.OFTString),
('link2_type', 'audio/mpeg', ogr.OFTString),
('link2_length', '1337', ogr.OFTInteger),
('link2_href', 'http://example.org/audio/ph34r_my_podcast.mp3', ogr.OFTString),
('id', 'tag:example.org,2003:3.2397', ogr.OFTString),
('updated', '2005/07/31 12:29:29+00', ogr.OFTDateTime),
('published', '2003/12/13 08:29:29-04', ogr.OFTDateTime),
('author_name', 'Mark Pilgrim', ogr.OFTString),
('author_uri', 'http://example.org/', ogr.OFTString),
('author_email', 'f8dy@example.com', ogr.OFTString),
('contributor_name', 'Sam Ruby', ogr.OFTString),
('contributor2_name', 'Joe Gregorio', ogr.OFTString),
('content_type', 'xhtml', ogr.OFTString),
('content_xml_lang', 'en', ogr.OFTString),
('content_xml_base', 'http://diveintomark.org/', ogr.OFTString)]
yield
list_files = ['tmp/test_rss2.xml', 'tmp/test_atom.xml', 'tmp/test32631.rss', 'tmp/broken.rss', 'tmp/nonstandard.rss']
for filename in list_files:
try:
os.remove(filename)
except OSError:
pass
files = os.listdir('data')
for filename in files:
if len(filename) > 13 and filename[-13:] == '.resolved.gml':
os.unlink('data/georss/' + filename)
###############################################################################
# Used by ogr_georss_1 and ogr_georss_1ter
def ogr_georss_test_atom(filename):
if not gdaltest.georss_read_support:
pytest.skip()
ds = ogr.Open(filename)
lyr = ds.GetLayerByName('georss')
assert lyr.GetSpatialRef() is None, 'No spatial ref expected'
feat = lyr.GetNextFeature()
for field_value in gdaltest.atom_field_values:
assert feat.GetFieldAsString(field_value[0]) == field_value[1], \
('For field "%s", got "%s" instead of "%s"' % (field_value[0], feat.GetFieldAsString(field_value[0]), field_value[1]))
assert feat.GetFieldAsString('content').find('<div xmlns="http://www.w3.org/1999/xhtml">') != -1, \
('For field "%s", got "%s"' % ('content', feat.GetFieldAsString('content')))
###############################################################################
# Test reading an ATOM document without any geometry
def test_ogr_georss_1():
return ogr_georss_test_atom('data/georss/atom_rfc_sample.xml')
###############################################################################
# Test reading an ATOM document with atom: prefiw
def test_ogr_georss_1_atom_ns():
return ogr_georss_test_atom('data/georss/atom_rfc_sample_atom_ns.xml')
###############################################################################
# Test writing a Atom 1.0 document (doesn't need read support)
def test_ogr_georss_1bis():
try:
os.remove('tmp/test_atom.xml')
except OSError:
pass
ds = ogr.GetDriverByName('GeoRSS').CreateDataSource('tmp/test_atom.xml', options=['FORMAT=ATOM'])
lyr = ds.CreateLayer('georss')
for field_value in gdaltest.atom_field_values:
lyr.CreateField(ogr.FieldDefn(field_value[0], field_value[2]))
lyr.CreateField(ogr.FieldDefn('content', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
for field_value in gdaltest.atom_field_values:
dst_feat.SetField(field_value[0], field_value[1])
dst_feat.SetField('content', '<div xmlns="http://www.w3.org/1999/xhtml"><p><i>[Update: The Atom draft is finished.]</i></p></div>')
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
ds = None
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_1ter():
return ogr_georss_test_atom('tmp/test_atom.xml')
###############################################################################
# Common for ogr_georss_2 and ogr_georss_3
def ogr_georss_test_rss(filename, only_first_feature):
if not gdaltest.georss_read_support:
pytest.skip()
ds = ogr.Open(filename)
assert ds is not None
lyr = ds.GetLayer(0)
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS84')
assert lyr.GetSpatialRef() is not None and lyr.GetSpatialRef().IsSame(srs, options = ['IGNORE_DATA_AXIS_TO_SRS_AXIS_MAPPING=YES']), \
'SRS is not the one expected.'
assert lyr.GetSpatialRef().GetDataAxisToSRSAxisMapping() == [2, 1]
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (2 49)'
assert feat.GetGeometryRef().ExportToWkt() == expected_wkt, \
('%s' % feat.GetGeometryRef().ExportToWkt())
assert feat.GetFieldAsString('title') == 'A point'
assert feat.GetFieldAsString('author') == 'Author'
assert feat.GetFieldAsString('link') == 'http://gdal.org'
assert feat.GetFieldAsString('pubDate') == '2008/12/07 20:13:00+02'
assert feat.GetFieldAsString('category') == 'First category'
assert feat.GetFieldAsString('category_domain') == 'first_domain'
assert feat.GetFieldAsString('category2') == 'Second category'
assert feat.GetFieldAsString('category2_domain') == 'second_domain'
feat = lyr.GetNextFeature()
expected_wkt = 'LINESTRING (2 48,2.1 48.1,2.2 48.0)'
assert only_first_feature is not False or feat.GetGeometryRef().ExportToWkt() == expected_wkt, \
('%s' % feat.GetGeometryRef().ExportToWkt())
assert feat.GetFieldAsString('title') == 'A line'
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'
assert only_first_feature is not False or feat.GetGeometryRef().ExportToWkt() == expected_wkt, \
('%s' % feat.GetGeometryRef().ExportToWkt())
assert feat.GetFieldAsString('title') == 'A polygon'
feat = lyr.GetNextFeature()
expected_wkt = 'POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'
assert only_first_feature is not False or feat.GetGeometryRef().ExportToWkt() == expected_wkt, \
('%s' % feat.GetGeometryRef().ExportToWkt())
assert feat.GetFieldAsString('title') == 'A box'
###############################################################################
# Test reading a RSS 2.0 document with GeoRSS simple geometries
def test_ogr_georss_2():
return ogr_georss_test_rss('data/georss/test_georss_simple.xml', False)
###############################################################################
# Test reading a RSS 2.0 document with GeoRSS GML geometries
def test_ogr_georss_3():
if not gdaltest.have_gml_reader:
pytest.skip()
return ogr_georss_test_rss('data/georss/test_georss_gml.xml', False)
###############################################################################
# Test writing a RSS 2.0 document (doesn't need read support)
def ogr_georss_create(filename, options):
try:
os.remove(filename)
except OSError:
pass
ds = ogr.GetDriverByName('GeoRSS').CreateDataSource(filename, options=options)
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('title', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('author', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('link', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('pubDate', ogr.OFTDateTime))
lyr.CreateField(ogr.FieldDefn('description', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category_domain', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('category2_domain', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A point')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetField('category', 'First category')
dst_feat.SetField('category_domain', 'first_domain')
dst_feat.SetField('category2', 'Second category')
dst_feat.SetField('category2_domain', 'second_domain')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A line')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (2 48,2.1 48.1,2.2 48.0)'))
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A polygon')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((2 50,2.1 50.1,2.2 48.1,2.1 46.1,2 50))'))
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('title', 'A box')
dst_feat.SetField('author', 'Author')
dst_feat.SetField('link', 'http://gdal.org')
dst_feat.SetField('pubDate', '2008/12/07 20:13:00+02')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((2 49,2.0 49.5,2.2 49.5,2.2 49.0,2 49))'))
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
ds = None
###############################################################################
# Test writing a RSS 2.0 document in Simple dialect (doesn't need read support)
def test_ogr_georss_4():
ogr_georss_create('tmp/test_rss2.xml', [])
content = open('tmp/test_rss2.xml').read()
assert content.find('<georss:point>49 2') != -1, ('%s' % content)
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_5():
return ogr_georss_test_rss('tmp/test_rss2.xml', False)
###############################################################################
# Test writing a RSS 2.0 document in GML dialect (doesn't need read support)
def test_ogr_georss_6():
ogr_georss_create('tmp/test_rss2.xml', ['GEOM_DIALECT=GML'])
content = open('tmp/test_rss2.xml').read()
assert content.find('<georss:where><gml:Point><gml:pos>49 2') != -1, \
('%s' % content)
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_7():
if not gdaltest.have_gml_reader:
pytest.skip()
return ogr_georss_test_rss('tmp/test_rss2.xml', False)
###############################################################################
# Test writing a RSS 2.0 document in W3C Geo dialect (doesn't need read support)
def test_ogr_georss_8():
ogr_georss_create('tmp/test_rss2.xml', ['GEOM_DIALECT=W3C_GEO'])
content = open('tmp/test_rss2.xml').read()
assert not (content.find('<geo:lat>49') == -1 or content.find('<geo:long>2') == -1), \
('%s' % content)
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_9():
return ogr_georss_test_rss('tmp/test_rss2.xml', True)
###############################################################################
# Test writing a RSS 2.0 document in GML dialect with EPSG:32631
def test_ogr_georss_10():
try:
os.remove('tmp/test32631.rss')
except OSError:
pass
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
ds = ogr.GetDriverByName('GeoRSS').CreateDataSource('tmp/test32631.rss')
gdal.PushErrorHandler('CPLQuietErrorHandler')
try:
lyr = ds.CreateLayer('georss', srs=srs)
except:
lyr = None
gdal.PopErrorHandler()
assert lyr is None, 'should not have accepted EPSG:32631 with GEOM_DIALECT != GML'
ds = None
try:
os.remove('tmp/test32631.rss')
except OSError:
pass
ds = ogr.GetDriverByName('GeoRSS').CreateDataSource('tmp/test32631.rss', options=['GEOM_DIALECT=GML'])
lyr = ds.CreateLayer('georss', srs=srs)
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (500000 4000000)'))
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
ds = None
content = open('tmp/test32631.rss').read()
assert content.find('<georss:where><gml:Point srsName="urn:ogc:def:crs:EPSG::32631"><gml:pos>500000 4000000') != -1, \
('%s' % content)
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_11():
if not gdaltest.georss_read_support:
pytest.skip()
if not gdaltest.have_gml_reader:
pytest.skip()
ds = ogr.Open('tmp/test32631.rss')
lyr = ds.GetLayer(0)
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
assert lyr.GetSpatialRef() is not None and lyr.GetSpatialRef().IsSame(srs), \
'SRS is not the one expected.'
if lyr.GetSpatialRef().ExportToWkt().find('AXIS["Easting",EAST],AXIS["Northing",NORTH]') == -1:
print(('%s' % lyr.GetSpatialRef().ExportToWkt()))
pytest.fail('AXIS definition expected is AXIS["Easting",EAST],AXIS["Northing",NORTH]!')
feat = lyr.GetNextFeature()
expected_wkt = 'POINT (500000 4000000)'
assert feat.GetGeometryRef().ExportToWkt() == expected_wkt, \
('%s' % feat.GetGeometryRef().ExportToWkt())
###############################################################################
# Test various broken documents
def test_ogr_georss_12():
if not gdaltest.georss_read_support:
pytest.skip()
open('tmp/broken.rss', 'wt').write('<?xml version="1.0"?><rss><item><a></item></rss>')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.Open('tmp/broken.rss')
gdal.PopErrorHandler()
assert ds is None
open('tmp/broken.rss', 'wt').write('<?xml version="1.0"?><rss><channel><item><georss:box>49 2 49.5</georss:box></item></channel></rss>')
ds = ogr.Open('tmp/broken.rss')
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = ds.GetLayer(0).GetNextFeature()
gdal.PopErrorHandler()
assert feat.GetGeometryRef() is None
open('tmp/broken.rss', 'wt').write('<?xml version="1.0"?><rss><channel><item><georss:where><gml:LineString><gml:posList>48 2 48.1 2.1 48</gml:posList></gml:LineString></georss:where></item></channel></rss>')
ds = ogr.Open('tmp/broken.rss')
gdal.PushErrorHandler('CPLQuietErrorHandler')
feat = ds.GetLayer(0).GetNextFeature()
gdal.PopErrorHandler()
assert feat.GetGeometryRef() is None
###############################################################################
# Test writing non standard fields
def test_ogr_georss_13():
try:
os.remove('tmp/nonstandard.rss')
except OSError:
pass
ds = ogr.GetDriverByName('GeoRSS').CreateDataSource('tmp/nonstandard.rss', options=['USE_EXTENSIONS=YES'])
lyr = ds.CreateLayer('georss')
lyr.CreateField(ogr.FieldDefn('myns_field', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('field2', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn('ogr_field3', ogr.OFTString))
dst_feat = ogr.Feature(feature_def=lyr.GetLayerDefn())
dst_feat.SetField('myns_field', 'val')
dst_feat.SetField('field2', 'val2')
dst_feat.SetField('ogr_field3', 'val3')
assert lyr.CreateFeature(dst_feat) == 0, 'CreateFeature failed.'
ds = None
content = open('tmp/nonstandard.rss').read()
assert content.find('<myns:field>val</myns:field>') != -1, ('%s' % content)
assert content.find('<ogr:field2>val2</ogr:field2>') != -1, ('%s' % content)
assert content.find('<ogr:field3>val3</ogr:field3>') != -1, ('%s' % content)
###############################################################################
# Test reading document created at previous step
def test_ogr_georss_14():
if not gdaltest.georss_read_support:
pytest.skip()
ds = ogr.Open('tmp/nonstandard.rss')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetFieldAsString('myns_field') == 'val', \
('Expected %s. Got %s' % ('val', feat.GetFieldAsString('myns_field')))
assert feat.GetFieldAsString('ogr_field2') == 'val2', \
('Expected %s. Got %s' % ('val2', feat.GetFieldAsString('ogr_field2')))
assert feat.GetFieldAsString('ogr_field3') == 'val3', \
('Expected %s. Got %s' % ('val3', feat.GetFieldAsString('ogr_field3')))
###############################################################################
# Test reading an in memory file (#2931)
def test_ogr_georss_15():
if not gdaltest.georss_read_support:
pytest.skip()
try:
gdal.FileFromMemBuffer
except AttributeError:
pytest.skip()
content = """<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:georss="http://www.georss.org/georss" xmlns:gml="http://www.opengis.net/gml">
<channel>
<link>http://mylink.com</link>
<title>channel title</title>
<item>
<guid isPermaLink="false">0</guid>
<pubDate>Thu, 2 Apr 2009 23:03:00 +0000</pubDate>
<title>item title</title>
<georss:point>49 2</georss:point>
</item>
</channel>
</rss>"""
# Create in-memory file
gdal.FileFromMemBuffer('/vsimem/georssinmem', content)
ds = ogr.Open('/vsimem/georssinmem')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
assert feat.GetFieldAsString('title') == 'item title', \
('Expected %s. Got %s' % ('item title', feat.GetFieldAsString('title')))
# Release memory associated to the in-memory file
gdal.Unlink('/vsimem/georssinmem')
| 37.326821 | 211 | 0.597716 |
acf363f777b48c824607185e846f3118fcf91f3c | 3,538 | py | Python | streaming/python/examples/wordcount.py | sunho/ray | 0ac8138b26cc66978df150c89ef291263f23c9a1 | [
"Apache-2.0"
] | 2 | 2019-06-17T12:38:24.000Z | 2020-11-11T07:52:26.000Z | streaming/python/examples/wordcount.py | sunho/ray | 0ac8138b26cc66978df150c89ef291263f23c9a1 | [
"Apache-2.0"
] | 3 | 2018-08-15T19:19:25.000Z | 2021-06-30T01:54:46.000Z | streaming/python/examples/wordcount.py | sunho/ray | 0ac8138b26cc66978df150c89ef291263f23c9a1 | [
"Apache-2.0"
] | 2 | 2017-10-31T23:20:07.000Z | 2019-11-13T20:16:03.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import time
import ray
import wikipedia
from ray.streaming.streaming import Environment
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--titles-file",
required=True,
help="the file containing the wikipedia titles to lookup")
# A custom data source that reads articles from wikipedia
# Custom data sources need to implement a get_next() method
# that returns the next data element, in this case sentences
class Wikipedia:
def __init__(self, title_file):
# Titles in this file will be as queries
self.title_file = title_file
# TODO (john): Handle possible exception here
self.title_reader = iter(list(open(self.title_file, "r").readlines()))
self.done = False
self.article_done = True
self.sentences = iter([])
# Returns next sentence from a wikipedia article
def get_next(self):
if self.done:
return None # Source exhausted
while True:
if self.article_done:
try: # Try next title
next_title = next(self.title_reader)
except StopIteration:
self.done = True # Source exhausted
return None
# Get next article
logger.debug("Next article: {}".format(next_title))
article = wikipedia.page(next_title).content
# Split article in sentences
self.sentences = iter(article.split("."))
self.article_done = False
try: # Try next sentence
sentence = next(self.sentences)
logger.debug("Next sentence: {}".format(sentence))
return sentence
except StopIteration:
self.article_done = True
# Splits input line into words and
# outputs records of the form (word,1)
def splitter(line):
records = []
words = line.split()
for w in words:
records.append((w, 1))
return records
# Returns the first attribute of a tuple
def key_selector(tuple):
return tuple[0]
# Returns the second attribute of a tuple
def attribute_selector(tuple):
return tuple[1]
if __name__ == "__main__":
# Get program parameters
args = parser.parse_args()
titles_file = str(args.titles_file)
ray.init()
# A Ray streaming environment with the default configuration
env = Environment()
env.set_parallelism(2) # Each operator will be executed by two actors
# The following dataflow is a simple streaming wordcount
# with a rolling sum operator.
# It reads articles from wikipedia, splits them in words,
# shuffles words, and counts the occurences of each word.
stream = env.source(Wikipedia(titles_file)) \
.round_robin() \
.flat_map(splitter) \
.key_by(key_selector) \
.sum(attribute_selector) \
.inspect(print) # Prints the contents of the
# stream to stdout
start = time.time()
env_handle = env.execute() # Deploys and executes the dataflow
ray.get(env_handle) # Stay alive until execution finishes
env.wait_finish()
end = time.time()
logger.info("Elapsed time: {} secs".format(end - start))
logger.debug("Output stream id: {}".format(stream.id))
| 32.163636 | 78 | 0.640475 |
acf366a2b8cf1256b6145dd68c22499de73b540f | 512 | py | Python | apps/users/migrations/0012_auto_20200804_1452.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | null | null | null | apps/users/migrations/0012_auto_20200804_1452.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | 5 | 2021-06-09T17:54:51.000Z | 2022-03-12T00:46:49.000Z | apps/users/migrations/0012_auto_20200804_1452.py | lucasjaroszewski/incremental-game | bae8823f986be0fd046bd50195d43fbc548fad90 | [
"MIT"
] | 1 | 2020-09-27T18:26:15.000Z | 2020-09-27T18:26:15.000Z | # Generated by Django 3.0.6 on 2020-08-04 17:52
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0011_fishx'),
]
operations = [
migrations.AlterField(
model_name='fishx',
name='user',
field=models.ManyToManyField(related_name='fishesX', to=settings.AUTH_USER_MODEL),
),
]
| 24.380952 | 94 | 0.648438 |
acf3670da6609e69a3793dc5361bd5e4ed17d828 | 2,520 | py | Python | main.py | adrianpaniagualeon/twitter-vaccine-bot | c459c0d8169df826d3eb872bf7f9eba4b90c7ae1 | [
"MIT"
] | null | null | null | main.py | adrianpaniagualeon/twitter-vaccine-bot | c459c0d8169df826d3eb872bf7f9eba4b90c7ae1 | [
"MIT"
] | null | null | null | main.py | adrianpaniagualeon/twitter-vaccine-bot | c459c0d8169df826d3eb872bf7f9eba4b90c7ae1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tweepy
import requests
import json
from datetime import date, datetime
from datetime import timedelta
import cv2
import os
import unicodedata
def remove_accents(text):
acentos = {'á': 'a', 'é': 'e', 'í': 'i', 'ó': 'o', 'ú': 'u', 'Á': 'A', 'E': 'E', 'Í': 'I', 'Ó': 'O', 'Ú': 'U'}
for acen in acentos:
if acen in text:
text = text.replace(acen, acentos[acen])
return text
CONSUMER_KEY= os.environ['CONSUMER_KEY']
CONSUMER_SECRET =os.environ['CONSUMER_SECRET']
ACCESS_TOKEN= os.environ['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET =os.environ['ACCESS_TOKEN_SECRET']
today = date.today()
today = today - timedelta(days=1)
url = 'https://analisis.datosabiertos.jcyl.es/api/records/1.0/search/?dataset=vacunacion-covid-19-por-grupo-y-criterio&q=&sort=fecha&facet=fecha&rows=20&facet=provincia&facet=grupo_vacunacion&refine.provincia=Le%C3%B3n&refine.fecha='+str(today)
respuesta = requests.get(url)
print (url)
open('respuesta.json', 'wb').write(respuesta.content)
f = open('respuesta.json')
json_file = json.load(f)
json_str = json.dumps(json_file)
resp = json.loads(json_str)
rows = resp['nhits']
grupo = {}
dosis={}
ciclo = {}
posicion = 105
total = 0
image = cv2.imread('template.png')
for i in range (rows):
print (i)
if (resp['records'][i]['fields']['dosis_administradas_acumulado'] == 0):
i = i+1
else:
grupo[i] = resp['records'][i]['fields']['grupo_vacunacion']
dosis[i] = resp['records'][i]['fields']['dosis_administradas_acumulado']
ciclo[i] = resp['records'][i]['fields']['personas_vacunadas_ciclo_completo_acumulado']
cv2.putText(image, remove_accents(grupo[i]).upper(), (220, posicion), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.putText(image, str(dosis[i]), (70, posicion), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)
posicion = posicion + 85
total = total + dosis[i]
print ("\n\n"+str(grupo[i])+"\n"+str(dosis[i])+"\n"+str(ciclo[i]))
today = today.strftime("%d-%m-%Y")
cv2.putText(image, str(total), (1150, 580), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 2)
cv2.putText(image, str(today), (1160, 270), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.imwrite('output.png', image)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
api.update_with_media("output.png", "La vacunación contra la COVID-19 avanza en León. Estos son los datos acumulados a dia de hoy ("+str(today)+"). Más info en: https://bit.ly/2PLqzwk")
| 31.898734 | 244 | 0.695635 |
acf36737a4a709347eadaf3f24ef0ce3cfd0a02d | 7,743 | py | Python | sys2syz.py | ais2397/NetBSD-syzkaller-automation | 5f31171f66cda851a1c26898fc622ab7f7a082ee | [
"MIT"
] | 14 | 2020-07-22T01:55:04.000Z | 2022-01-13T10:23:50.000Z | sys2syz.py | ais2397/NetBSD-syzkaller-automation | 5f31171f66cda851a1c26898fc622ab7f7a082ee | [
"MIT"
] | 3 | 2020-10-20T15:49:33.000Z | 2021-12-19T05:39:59.000Z | sys2syz.py | ais2397/NetBSD-syzkaller-automation | 5f31171f66cda851a1c26898fc622ab7f7a082ee | [
"MIT"
] | 1 | 2021-05-07T12:46:52.000Z | 2021-05-07T12:46:52.000Z | # User imports
from core.utils import Utils
from core.logger import get_logger
from core.bear import *
from core.extractor import Extractor, Ioctl
from core.c2xml import *
from core.descriptions import *
from core.syscall import *
# Default imports
import argparse
import os
import sys
import string
class Sys2syz(object):
NETBSD = 1
supported_os = {'NetBSD': NETBSD}
def __init__(self, input_type, target, compile_commands, os_name, log_level):
self.input_type = input_type
self.compile_commands = compile_commands
self.os = os_name
self.os_type = None
self.log_level = log_level
if input_type == "ioctl":
self.target = os.path.realpath(target)
self.out_dir = os.path.join(os.getcwd(), "out/preprocessed/", basename(self.target), "out")
self.macro_details = ""
self.ioctls = []
self.bear = Bear(self)
self.c2xml = C2xml(self)
# initialize the sub classes
self.extractor = Extractor(self)
self.descriptions = Descriptions(self)
self.header_files = self.extractor.header_files
logging.debug("[+] Sys2syz init completed")
if input_type == "syscall":
self.target = target
self.out_dir = os.path.join(os.getcwd(), "out/preprocessed/", basename(self.target), "out")
self.bear = Bear(self)
self.c2xml = C2xml(self)
self.syscall = Syscall(self)
self.descriptions = Descriptions(self)
if not self._sanity_check():
logging.critical("[+] Sys2syz failed to init")
sys.exit(-1)
def _sanity_check(self) -> bool:
"""Perform Sanity check on the arguments passed
Returns:
bool: Reflect passed or failed
"""
if self.input_type == "ioctl":
if not os.path.isdir(self.target):
logging.error("[+] The target file is not found at %s", self.target)
return False
logging.debug("[+] The target file is %s", self.target)
if not os.path.isfile(self.compile_commands):
logging.error("[+] The compile commands not found at %s", self.compile_commands)
return False
logging.debug("[+] The compile commands file is %s", self.compile_commands)
for os_type in self.supported_os.keys():
if os_type.lower() == self.os.lower():
self.os_type = self.supported_os[os_type]
return True
logging.error("[+] Target OS not supported/found %s", self.os)
return False
def get_ioctls(self) -> bool:
""" Get's the IOCTL calls as a list and does sanity check and some stats
Returns:
bool: True is ioctls were found
"""
self.extractor.get_ioctls()
self.ioctls = self.extractor.ioctls
if len(self.ioctls) == 0:
return False
if self.log_level > 1:
ioctl_string = ""
for ioctl in self.ioctls:
ioctl_string += str(ioctl) + "\n"
open("test.log", "w+").write(ioctl_string)
logging.info(f"[+] {len(self.ioctls)} IOCTL calls were found!")
return True
@property
def undefined_macros(self) -> list:
und_macros = self.extractor.fetch_flags()
logging.info(f"[+] {len(und_macros)} undefined macros were found from the file!")
return und_macros
def get_macro_details(self):
self.macro_details = self.extractor.flag_details(self.undefined_macros)
logging.info(f"[+] Extracted details of {len(self.macro_details)} macros from c2xml!")
def preprocess_files(self, file_name=None) -> bool:
""" Preprocess the files
"""
try:
if self.bear.parse_compile_commands(file_name):
return True
except Exception as e:
logging.critical("Unable to run bear and parse compile commands")
logging.error(e)
return False
def create_xml_files(self):
try:
self.c2xml.run_c2xml()
return True
except Exception as e:
logging.critical("Failed to convert C files to XML")
return False
def generate_descriptions(self):
if self.input_type == "ioctl":
self.descriptions.ioctl_run()
#Store the descriptions in the syzkaller's syscall description file format
output_path = self.descriptions.make_file()
if Utils.file_exists(output_path, True):
logging.info("[+] Description file: " + output_path)
return True
return False
if self.input_type == "syscall":
self.descriptions.syscall_run()
return True
'''except Exception as e:
logging.critical("Unable to generate descriptions for ioctl calls")
return False'''
def main():
global logging
# Parse the command line arguments
parser = argparse.ArgumentParser(description="Sys2Syz : A Utility to convert Syscalls and Ioctls to Syzkaller representation")
parser.add_argument("-i", "--input_type", help="input type ioctl/syscall", type=str, required=True)
parser.add_argument("-t", "--target", help="target file to generate descriptions for", type=str, required=True)
parser.add_argument("-o", "--operating-system", help="target operating system", type=str, required=True)
parser.add_argument("-c", "--compile-commands", help="path to compile_commands.json", type=str, required=True)
parser.add_argument("-v", "--verbosity", help="Sys2syz log level", action="count")
args = parser.parse_args()
logging = get_logger("Syz2syz", args.verbosity)
# get the header files
sysobj = Sys2syz(args.input_type, args.target, args.compile_commands, args.operating_system, args.verbosity)
if sysobj.input_type == "ioctl":
if len(sysobj.header_files) == 0:
logging.error("No header files found!")
sys.exit(-1)
logging.debug(sysobj.header_files)
# get the IOCTL calls
if not sysobj.get_ioctls():
logging.error("No IOCTL calls found!")
sys.exit(-1)
if not sysobj.preprocess_files():
logging.error("Can't continue.. Exiting")
sys.exit(-1)
# Extract the macros/flags
sysobj.get_macro_details()
logging.info("[+] Completed the initial pre processing of the target")
# Generate XML files
if not sysobj.create_xml_files():
logging.error("Can't continue.. Exiting")
sys.exit(-1)
# TODO: you can create wrapper functions for all these in sysobj.
# TODO: change the descriptions object so that it take sysobj as constructor parameter
# TODO: change the functions in the object so they use self.sysobj.macro_details to get the detials
#Get syz-lang descriptions
if not sysobj.generate_descriptions():
logging.error("Exiting")
sys.exit(-1)
if sysobj.input_type == "syscall":
file_name = sysobj.syscall.find_file()
if not sysobj.preprocess_files(file_name):
logging.error("Can't continue.. Exiting")
sys.exit(-1)
if not sysobj.create_xml_files():
logging.error("Can't continue.. Exiting")
sys.exit(-1)
if not sysobj.generate_descriptions():
logging.error("Exiting")
sys.exit(-1)
if __name__ == "__main__":
logging = None
main()
| 35.195455 | 130 | 0.604029 |
acf3678528be5ef554ef571d6d9a45cdc8150d84 | 22,635 | py | Python | tracklib/analysis/bild/models.py | SGrosse-Holz/tracklib | e0b88e3959db2ce65869d8292ce5792f4c77c7a4 | [
"MIT"
] | 1 | 2022-01-30T15:10:51.000Z | 2022-01-30T15:10:51.000Z | tracklib/analysis/bild/models.py | SGrosse-Holz/tracklib | e0b88e3959db2ce65869d8292ce5792f4c77c7a4 | [
"MIT"
] | null | null | null | tracklib/analysis/bild/models.py | SGrosse-Holz/tracklib | e0b88e3959db2ce65869d8292ce5792f4c77c7a4 | [
"MIT"
] | null | null | null | """
The inference models, and the interface they have to conform to.
"""
import abc
import functools
import numpy as np
import scipy.optimize
import scipy.stats
from tracklib import Trajectory
from tracklib.models import rouse
from .util import Loopingprofile
LOG_SQRT_2_PI = 0.5*np.log(2*np.pi)
class MultiStateModel(metaclass=abc.ABCMeta):
"""
Abstract base class for inference models
The most important capability of any model is the likelihood function
`logL` for a combination of `Loopingprofile` and `Trajectory`. Furthermore, a
model should provide an initial guess for a good `Loopingprofile`.
The method `trajectory_from_loopingprofile` is considered an optional part of
the interface, since it is not important to the inference, but might come
in handy when working with a `MultiStateModel`. So it is recommended but not
required.
"""
@property
def nStates(self):
"""
How many internal states does this model have?
"""
raise NotImplementedError # pragma: no cover
@property
def d(self):
"""
Spatial dimension
"""
raise NotImplementedError # pragma: no cover
def initial_loopingprofile(self, traj):
"""
Give a quick guess for a good `Loopingprofile` for a `Trajectory`.
The default implementation gives a random `Loopingprofile`.
Parameters
----------
traj : Trajectory
Returns
-------
Loopingprofile
"""
return Loopingprofile(np.random.choice(self.nStates, size=len(traj)))
@abc.abstractmethod
def logL(self, loopingprofile, traj):
"""
Calculate (log-)likelihood for (`Loopingprofile`, `Trajectory`) pair.
Parameters
----------
loopingprofile : Loopingprofile
traj : Trajectory
Returns
-------
float
log-likelihood associated with the inputs
"""
raise NotImplementedError # pragma: no cover
def trajectory_from_loopingprofile(self, loopingprofile, localization_error, missing_frames):
"""
Generate a `Trajectory` from this `MultiStateModel` and the given `Loopingprofile`
Parameters
----------
loopingprofile : Loopingprofile
localization_error : float, optional
how much Gaussian noise to add to the trajectory.
Returns
-------
Trajectory
"""
raise NotImplementedError # pragma: no cover
class MultiStateRouse(MultiStateModel):
"""
A multi-state Rouse model
This inference model uses a given number of `rouse.Model` instances to
choose from for each propagation interval. In the default use case this
switches between a looped and unlooped model, but it could be way more
general than that, e.g. incorporating different looped states, loop
positions, numbers of loops, etc.
Parameters
----------
N : int
number of monomers
D, k : float
Rouse parameters: 1d diffusion constant of free monomers and backbone
spring constant
d : int, optional
spatial dimension
looppositions : list of 2-tuples of int, optional
list of positions of the extra bond. For each entry, a new
`rouse.Model` instance will be set up. Remember to include an unlooped
model (if wanted) by including a position like ``(0, 0)``. Each entry
can alternatively be a 3-tuple, where the 3rd entry then specifies the
strength of the extra bond relative to the backbone, e.g. ``(0, 5,
0.5)`` introduces an additional bond between monomers 0 and 5 with
strength ``0.5*k``. Finally, instead of a single tuple, each bond
specification can be a list of such tuples if multiple added bonds are
needed.
measurement : "end2end" or (N,) np.ndarray
which distance to measure. The default setting "end2end" is equivalent
to specifying a vector ``np.array([-1, 0, ..., 0, 1])``, i.e. measuring
the distance from the first to the last monomer.
localization_error : float or np.array, optional
a global value for the localization error. By default, we use the value
stored in ``traj.meta['localization_error']``, which allows
trajectory-wise specification of error. But for example for fitting it
might be useful to have one global setting for localization error, at
which point it becomes part of the model. Give a scalar value to have
the same error apply to all dimensions
Attributes
----------
models : list of `rouse.Model`
the models used for inference
measurement : (N,) np.ndarray
the measurement vector for this model
localization_error : array or None
if ``None``, use ``traj.meta['localization_error']`` for each
trajectory ``traj``.
Notes
-----
The `initial_loopingprofile` for this `MultiStateModel` is the MLE assuming time scale
separation. I.e. we calculate the timepoint-wise MLE using the exact steady
state distributions of each model.
See also
--------
MultiStateModel, rouse.Model
"""
def __init__(self, N, D, k, d=3,
looppositions=((0, 0), (0, -1)), # no mutable default parameters!
# (thus tuple instead of list)
measurement="end2end",
localization_error=None,
):
self._d = d
if str(measurement) == "end2end":
measurement = np.zeros(N)
measurement[0] = -1
measurement[-1] = 1
self.measurement = measurement
if localization_error is not None and np.isscalar(localization_error):
localization_error = np.array(d*[localization_error])
self.localization_error = localization_error
self.models = []
for loop in looppositions:
if np.isscalar(loop[0]):
loop = [loop]
mod = rouse.Model(N, D, k, d, add_bonds=loop)
self.models.append(mod)
@property
def nStates(self):
return len(self.models)
@property
def d(self):
return self._d
def _get_noise(self, traj):
if self.localization_error is not None:
return np.asarray(self.localization_error)
else:
return np.asarray(traj.meta['localization_error'])
def initial_loopingprofile(self, traj):
# We give the MLE assuming time scale separation
# This is exactly the same procedure as for FactorizedModel, where we
# utilize the steady state distributions of the individual Rouse
# models.
noise = self._get_noise(traj)
Ms = []
Cs = []
for mod in self.models:
M, C = mod.steady_state()
Ms.append(self.measurement @ M)
Cs.append(self.measurement @ C @ self.measurement)
Ms = np.expand_dims(Ms, 0) # (1, n, d)
Cs = np.expand_dims(Cs, (0, 2)) # (1, n, 1)
Cs = Cs + np.expand_dims(noise*noise, (0, 1)) # (1, n, d)
# assemble (T, n, d) array
valid_times = np.nonzero(~np.any(np.isnan(traj[:]), axis=1))[0]
chi2s = (traj[valid_times][:, np.newaxis, :] - Ms)**2 / Cs
logLs = -0.5*(chi2s + np.log(Cs)) - 0.5*np.log(2*np.pi)*np.ones(chi2s.shape)
logLs = np.sum(logLs, axis=2) # (T, n)
best_states = np.argmax(logLs, axis=1)
states = np.zeros(len(traj), dtype=int)
states[:(valid_times[0]+1)] = best_states[0]
last_time = valid_times[0]
for cur_time, cur_state in zip(valid_times[1:], best_states[1:]):
states[(last_time+1):(cur_time+1)] = cur_state
last_time = cur_time
if last_time < len(traj):
states[(last_time+1):] = best_states[-1]
return Loopingprofile(states)
def logL(self, profile, traj):
localization_error = self._get_noise(traj)
for model in self.models:
model.check_dynamics()
model = self.models[profile[0]]
M, C_single = model.steady_state()
C = self.d * [C_single]
valid_times = np.nonzero(~np.any(np.isnan(traj[:]), axis=1))[0]
L_log = np.empty((len(valid_times), self.d), dtype=float)
def Kalman_update(t, M, C, L_log, i_write):
for d in range(self.d):
l, m, c = model.Kalman_update_1d(M[:, d], C[d],
traj[t][d], localization_error[d],
self.measurement)
L_log[i_write, d] = l
M[:, d] = m
C[d] = c
return M, C
# First update
i_write = 0
if 0 in valid_times:
M, C = Kalman_update(0, M, C, L_log, i_write)
i_write += 1
# Propagate, then update
for t, state in enumerate(profile[1:], start=1):
model = self.models[state]
# Propagate
M = model.propagate_M(M, check_dynamics=False)
C = [model.propagate_C(myC, check_dynamics=False) for myC in C]
# Update
if t in valid_times:
M, C = Kalman_update(t, M, C, L_log, i_write)
i_write += 1
if i_write != len(L_log):
raise RuntimeError # pragma: no cover
return np.sum(L_log)
def trajectory_from_loopingprofile(self, profile,
localization_error=None,
missing_frames=None,
):
# Pre-processing
# localization_error
if localization_error is None:
if self.localization_error is None:
raise ValueError("Need to specify either localization_error or model.localization_error") # pragma: no cover
else:
localization_error = self.localization_error
if np.isscalar(localization_error):
localization_error = self.d*[localization_error]
localization_error = np.asarray(localization_error)
if localization_error.shape != (self.d,):
raise ValueError("Did not understand localization_error") # pragma: no cover
# missing_frames
if missing_frames is None:
missing_frames = np.array([], dtype=int)
if np.isscalar(missing_frames):
if 0 < missing_frames and missing_frames < 1:
missing_frames = np.nonzero(np.random.rand(len(profile)) < missing_frames)[0]
else:
missing_frames = np.random.choice(len(profile), size=missing_frames, replace=False)
missing_frames = missing_frames.astype(int)
# Assemble trajectory
data = np.empty((len(profile), self.d), dtype=float)
data[:] = np.nan
model = self.models[profile[0]]
conf = model.conf_ss()
data[0, :] = self.measurement @ conf
for i in range(1, len(profile)):
model = self.models[profile[i]]
conf = model.evolve(conf)
data[i, :] = self.measurement @ conf
# Kick out frames that should be missing
data[missing_frames, :] = np.nan
# Return as Trajectory
noise = localization_error[np.newaxis, :]
return Trajectory.fromArray(data + noise*np.random.normal(size=data.shape),
localization_error=localization_error,
loopingprofile=profile,
)
def toFactorized(self):
"""
Give the corresponding `FactorizedModel`
This is the model that simply calculates likelihoods from the steady
state probabilities of each of the individual states.
Returns
-------
FactorizedModel
"""
distributions = []
for mod in self.models:
_, C = mod.steady_state()
s2 = self.measurement @ C @ self.measurement + np.sum(self.localization_error**2)/self.d
distributions.append(scipy.stats.maxwell(scale=np.sqrt(s2)))
return FactorizedModel(distributions, d=self.d)
class FactorizedModel(MultiStateModel):
"""
A simplified model, assuming time scale separation
This model assumes that each point is sampled from one of a given list of
distributions, where there is no correlation between the choice of
distribution for each point. It runs significantly faster than the full
`RouseModel`, but is of course inaccurate if the Rouse time is longer or
comparable to the frame rate of the recorded trajectories.
Parameters
----------
distributions : list of distribution objects
these will usually be ``scipy.stats.rv_continuous`` objects (e.g.
Maxwell), but can be pretty arbitrary. The only function they have to
provide is ``logpdf()``, which should take a scalar or vector of
distance values and return a corresponding number of outputs. If you
plan on using `trajectory_from_loopingtrace`, the distributions should
also have an ``rvs()`` method for sampling.
Attributes
----------
distributions : list of distribution objects
Notes
-----
This being a heuristical model, we assume that the localization error is
already incorporated in the `!distributions`, as would be the case if they
come from experimental data. Therefore, this class ignores the
``meta['localization_error']`` field of `Trajectory`.
Instances of this class memoize trajectories they have seen before. To
reset the memoization, you can either reinstantiate or clear the cache
manually:
>>> model = FactorizedModel(model.distributions)
... model.clear_memo()
If using ``scipy.stats.maxwell``, make sure to use it correctly, i.e. you
have to specify ``scale=...``. Writing ``scipy.stats.maxwell(5)`` instead
of ``scipy.stats.maxwell(scale=5)`` shifts the distribution instead of
scaling it and leads to ``-inf`` values in the likelihood, which then screw
up the MCMC. The classic error to get for this is ``invalid value
encountered in double_scalars``. This is caused by ``new_logL - cur_logL``
reading ``- inf + inf`` at the first MCMC iteration, if `logL` returns
``-inf``.
Examples
--------
Experimentally measured distributions can be used straightforwardly using
``scipy.stats.gaussian_kde``: assuming we have measured ensembles of
distances ``dists_i`` for reference states ``i``, we can use
>>> model = FactorizedModel([scipy.stats.gaussian_kde(dists_0),
... scipy.stats.gaussian_kde(dists_1),
... scipy.stats.gaussian_kde(dists_1)])
"""
def __init__(self, distributions, d=3):
self.distributions = distributions
self._d = d
self._known_trajs = dict()
@property
def nStates(self):
return len(self.distributions)
@property
def d(self):
return self._d
def _memo(self, traj):
"""
(internal) memoize `traj`
"""
if not traj in self._known_trajs:
with np.errstate(divide='ignore'): # nans in the trajectory raise 'divide by zero in log'
logL_table = np.array([dist.logpdf(traj.abs()[:][:, 0])
for dist in self.distributions
])
self._known_trajs[traj] = {'logL_table' : logL_table}
def clear_memo(self):
"""
Clear the memoization cache
"""
self._known_trajs = dict()
def initial_loopingprofile(self, traj):
self._memo(traj)
valid_times = np.nonzero(~np.any(np.isnan(traj[:]), axis=1))[0]
best_states = np.argmax(self._known_trajs[traj]['logL_table'][:, valid_times], axis=0)
states = np.zeros(len(traj), dtype=int)
states[:(valid_times[0]+1)] = best_states[0]
last_time = valid_times[0]
for cur_time, cur_state in zip(valid_times[1:], best_states[1:]):
states[(last_time+1):(cur_time+1)] = cur_state
last_time = cur_time
if last_time < len(traj):
states[(last_time+1):] = best_states[-1]
return Loopingprofile(states)
def logL(self, profile, traj):
self._memo(traj)
return np.nansum(self._known_trajs[traj]['logL_table'][profile.state, :])
def trajectory_from_loopingprofile(self, profile, localization_error=0., missing_frames=None):
# Pre-proc missing_frames
if missing_frames is None:
missing_frames = np.array([], dtype=int)
if np.isscalar(missing_frames):
if 0 < missing_frames and missing_frames < 1:
missing_frames = np.nonzero(np.random.rand(len(profile)) < missing_frames)[0]
else:
missing_frames = np.random.choice(len(profile), size=missing_frames, replace=False)
missing_frames = missing_frames.astype(int)
# Note that the distributions in the model give us only the length, not
# the orientation. So we also have to sample unit vectors
# Furthermore, localization_error should not be added, since
# self.distributions already contain it. It will be written to the
# meta entry though!
magnitudes = np.array([self.distributions[state].rvs() for state in profile[:]])
data = np.random.normal(size=(len(magnitudes), self.d))
data *= np.expand_dims(magnitudes / np.linalg.norm(data, axis=1), 1)
data[missing_frames, :] = np.nan
return Trajectory.fromArray(data,
localization_error=np.array(self.d*[localization_error]),
loopingprofile=profile,
)
def _neg_logL_traj(traj, model):
# For internal use in parallelization
return -model.logL(traj.meta['loopingprofile'], traj)
def fit(data, modelfamily,
show_progress=False, assume_notebook_for_progressbar=True,
map_function=map,
**kwargs):
"""
Find the best fit model to a calibration dataset
Parameters
----------
data : TaggedSet of Trajectory
the calibration data. Each `Trajectory` should have a `meta
<Trajectory.meta>` entry ``'loopingtrace'`` indicating the true/assumed
`Loopingtrace` for this trajectory.
modelfamily : ParametricFamily of Models
the family of models to consider.
show_progress : bool, optional
set to ``True`` to get progress info. Note that since we do not know
how many iterations we need for convergence, there is no ETA, just
elapsed time.
map_function : map-like callable
a function to replace the built-in ``map()``, e.g. with a parallel
version. Will be used as
``np.sum(list(map_function(likelihood_given_parameters, data)))``, i.e.
order does not matter. ``multiprocessing.Pool.imap_unordered`` would be
a good go-to.
kwargs : kwargs
will be forwarded to `!scipy.optimize.minimize`. We use the defaults
``method='L-BFGS-B'``, ``maxfun=300``, ``ftol=1e-5``.
Returns
-------
res : fitresult
the structure returned by `!scipy.optimize.minimize`. The best fit
parameters are ``res.x``, while their covariance matrix can be obtained
as ``res.hess_inv.todens()``.
Examples
--------
A good measure for relative uncertainty of the estimate is given by
``(√det(Σ) / Π(x))^(1/n)``, i.e. the major axes of the covariance ellipsoid
over the point estimates, normalized by the dimensionality:
>>> res = neda.models.fit(data, modelfam)
... relative_uncertainty = ( np.sqrt(np.linalg.det(res.hess_inv.todense())) \
... / np.prod(res.x) )**(1/modelfam.nParams)
The function being minimized here is the negative log-likelihood of the
data set, given parameters to the `modelfamily`. Specifically, this
function is
>>> def minimization_target(params):
... mappable = functools.partial(_neg_logL_traj, model=modelfamily(*params))
... return np.nansum(list(map_function(mappable, data)))
See also
--------
ParametricFamily
**Troubleshooting**
- make sure that the magnitude of parameter values is around one. The
minimizer (see `!scipy.optimize.minimize`) defaults to a fixed step
gradient descent, which is useless if parameters are orders of magnitude
bigger than 1. You can also try to play with the minimizer's options to
make it use an adaptive step size.
- make sure units match up. A common mistake is to have a unit mismatch
between localization error and trajectories (e.g. one in μm and the
other in nm). If the localization error is too big (here by a factor of
1000), the fit for `!D` will converge to zero (i.e. ``1e-10``).
- the ``'hess_inv'`` field returned with ``method='L-BFGS-B'`` might not
be super reliable, even if the point estimate is pretty good. Check
initial conditions when using this.
"""
# Set up progressbar
if show_progress: # pragma: no cover
if assume_notebook_for_progressbar:
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
pbar = tqdm()
else:
class Nullbar:
def update(*args): pass
def close(*args): pass
pbar = Nullbar()
# Set up minimization target
def neg_logL_ds(params):
mappable = functools.partial(_neg_logL_traj, model=modelfamily(*params))
out = np.nansum(list(map_function(mappable, data)))
pbar.update()
return out
minimize_kwargs = {
'method' : 'L-BFGS-B',
'bounds' : modelfamily.bounds,
'options' : {'maxfun' : 300, 'ftol' : 1e-5},
}
# 'People' might try to override the defaults individually
if not 'options' in kwargs:
for key in minimize_kwargs['options']:
if key in kwargs:
minimize_kwargs['options'][key] = kwargs[key]
del kwargs[key]
minimize_kwargs.update(kwargs)
res = scipy.optimize.minimize(neg_logL_ds, modelfamily.start_params, **minimize_kwargs)
pbar.close()
return res
| 37.78798 | 124 | 0.61184 |
acf369783f40231fc8e2af600ac6eadd4d89f79d | 4,988 | py | Python | lazaro/agents/explorers/noisy_explorer.py | GabrielMusat/lazaro | 9879e938eb1e6da1b6974edf8ab41ece7f33063c | [
"Apache-2.0"
] | 4 | 2021-05-03T15:48:44.000Z | 2021-05-23T16:05:42.000Z | lazaro/agents/explorers/noisy_explorer.py | GabrielMusat/lazaro | 9879e938eb1e6da1b6974edf8ab41ece7f33063c | [
"Apache-2.0"
] | null | null | null | lazaro/agents/explorers/noisy_explorer.py | GabrielMusat/lazaro | 9879e938eb1e6da1b6974edf8ab41ece7f33063c | [
"Apache-2.0"
] | null | null | null | import math
import typing as T
from abc import ABC
import numpy as np
import torch
import torch.nn.functional as F
from .base.explorer import Explorer
from .base.params import NoisyExplorerParams
class NoisyLinear(torch.nn.Module):
def __init__(self, in_features: int, out_features: int, std_init: float):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = torch.nn.Parameter(torch.empty(out_features, in_features))
self.weight_sigma = torch.nn.Parameter(torch.empty(out_features, in_features))
self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
self.bias_mu = torch.nn.Parameter(torch.empty(out_features))
self.bias_sigma = torch.nn.Parameter(torch.empty(out_features))
self.register_buffer('bias_epsilon', torch.empty(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self) -> None:
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))
def _scale_noise(self, size: int) -> torch.Tensor:
x = torch.FloatTensor(np.random.normal(loc=0.0, scale=1.0, size=size)).to(self.weight_mu.device)
return x.sign().mul_(x.abs().sqrt_())
def reset_noise(self) -> None:
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}'.format(self.in_features, self.out_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return F.linear(x,
self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon)
class ModelWithNoisyLayers(torch.nn.Module):
def __init__(self, model: torch.nn.Module, noisy_layers: T.List[torch.nn.Module]):
super(ModelWithNoisyLayers, self).__init__()
self.model = model
self.noisy_layers = torch.nn.Sequential(*noisy_layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.model(x)
x = self.noisy_layers(x)
return x
class NoisyExplorer(Explorer, ABC):
def __init__(self, explorer_params: NoisyExplorerParams = NoisyExplorerParams(), *args, **kwargs):
if not isinstance(explorer_params, NoisyExplorerParams):
raise ValueError("argument ep must be an instance of NoisyExplorerParams")
self.ep: NoisyExplorerParams = explorer_params
self.noisy_layers_reference: T.List[NoisyLinear] = []
self.reset_count: int = 0
super(NoisyExplorer, self).__init__(*args, **kwargs)
def last_layers_model_modifier(self, model: torch.nn.Module) -> torch.nn.Module:
self.log.info(f"wrapping model with noisy layers")
last_layer = list(model.modules())[-1]
if not isinstance(last_layer, torch.nn.Linear):
raise ValueError("the model you have created must have a torch.nn.Linear in the last layer")
if len(self.ep.extra_layers) == 0:
return model
noisy_layers = [NoisyLinear(last_layer.out_features, self.ep.extra_layers[0], self.ep.std_init)]
for i in range(1, len(self.ep.extra_layers)):
noisy_layers.append(torch.nn.ReLU())
noisy_layers.append(NoisyLinear(self.ep.extra_layers[i - 1], self.ep.extra_layers[i], self.ep.std_init))
self.noisy_layers_reference += noisy_layers
return ModelWithNoisyLayers(model, noisy_layers)
def ex_choose(self, actions: np.ndarray, f: T.Callable[[np.ndarray], int]) -> int:
return f(actions)
def reset_noise(self, *_, **__):
self.reset_count += 1
if self.reset_count < self.ep.reset_noise_every:
return
self.reset_count = 0
i = 0
for layer in self.noisy_layers_reference:
if isinstance(layer, NoisyLinear):
self.log.debug(f"resetting noise for noise layer {i}")
layer.reset_noise()
i += 1
def last_layer_factory(self, in_features: int, out_features: int) -> NoisyLinear:
noisy_linear = NoisyLinear(in_features, out_features, self.ep.std_init)
self.noisy_layers_reference.append(noisy_linear)
return noisy_linear
def ex_link(self):
self.log.info(f"linking {type(self).__name__}...")
self.add_step_callback("noisy explorer reset noisy", self.reset_noise)
def ex_get_stats(self) -> T.Dict[str, float]:
return {}
| 43 | 116 | 0.675621 |
acf369a50c67c7e4dfd2af58fd1e7eb17bc8ff8a | 1,664 | py | Python | example_docker/example/__init__.py | Ilhasoft/celery-broker-on-demand | 801ae98fd8ee4e79bc1cd811013381b96ba3ddf0 | [
"MIT"
] | 4 | 2018-12-01T21:43:05.000Z | 2020-04-13T15:11:49.000Z | example_docker/example/__init__.py | Ilhasoft/celery-broker-on-demand | 801ae98fd8ee4e79bc1cd811013381b96ba3ddf0 | [
"MIT"
] | 2 | 2018-12-05T17:30:01.000Z | 2018-12-20T18:12:40.000Z | example_docker/example/__init__.py | Ilhasoft/celery-broker-on-demand | 801ae98fd8ee4e79bc1cd811013381b96ba3ddf0 | [
"MIT"
] | 1 | 2021-07-05T11:31:54.000Z | 2021-07-05T11:31:54.000Z | import docker
import logging
from time import sleep
from celery_worker_on_demand import CeleryWorkerOnDemand
from celery_worker_on_demand import Agent
from celery_worker_on_demand import UpWorker
from celery_worker_on_demand import DownWorker
from .celery_app import celery_app # noqa:F401
from . import tasks # noqa:F401
logger = logging.getLogger('test-docker')
docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock')
CONTAINERS = {}
class MyAgent(Agent):
def flag_down(self, queue):
return super().flag_down(queue) and CONTAINERS.get(queue.name)
class MyUpWorker(UpWorker):
def run(self):
container = CONTAINERS.get(self.queue.name)
if container:
container.start()
else:
container = docker_client.containers.run(
'cwod-example-docker:latest',
entrypoint='celery -A example worker -l INFO -Q ' +
f'{self.queue.name} -E',
environment={
'BROKER': 'redis://redis:6379/0',
'BACKEND': 'redis://redis:6379/0',
},
network='cwod-example-docker',
detach=True,
)
CONTAINERS[self.queue.name] = container
while not self.queue.has_worker:
container.reload()
logger.debug(f'container.status is: {container.status}')
sleep(1)
class MyDownWorker(DownWorker):
def run(self):
CONTAINERS[self.queue.name].stop()
class MyCeleryWorkerOnDemand(CeleryWorkerOnDemand):
Agent = MyAgent
UpWorker = MyUpWorker
DownWorker = MyDownWorker
| 28.689655 | 74 | 0.63101 |
acf369d16a4b4c89e12ae2557c277ed7ace2e018 | 7,212 | py | Python | SoftLayer/fixtures/SoftLayer_Network_Storage.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/fixtures/SoftLayer_Network_Storage.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | SoftLayer/fixtures/SoftLayer_Network_Storage.py | ko101/softlayer-python | f4cc9fa2eb01d97c0e890907ef6735390f1a5b10 | [
"MIT"
] | null | null | null | STAAS_TEST_VOLUME = {
'accountId': 1234,
'activeTransactions': None,
'activeTransactionCount': 0,
'billingItem': {
'activeChildren': [{
'categoryCode': 'storage_snapshot_space',
'id': 125,
'cancellationDate': '',
}],
'cancellationDate': '',
'categoryCode': 'storage_as_a_service',
'hourlyFlag': None,
'id': 454,
'location': {'id': 449500}
},
'capacityGb': 500,
'hasEncryptionAtRest': 1,
'id': 102,
'iops': 1000,
'lunId': 2,
'osType': {'keyName': 'LINUX'},
'originalVolumeSize': '500',
'parentVolume': {'snapshotSizeBytes': 1024},
'provisionedIops': '1000',
'replicationPartnerCount': 0,
'schedules': [{
'id': 978,
'type': {'keyname': 'SNAPSHOT_WEEKLY'},
}],
'serviceResource': {'datacenter': {'id': 449500, 'name': 'dal05'}},
'serviceResourceBackendIpAddress': '10.1.2.3',
'snapshotCapacityGb': '10',
'staasVersion': '2',
'storageTierLevel': 'READHEAVY_TIER',
'storageType': {'keyName': 'ENDURANCE_BLOCK_STORAGE'},
'username': 'duplicatable_volume_username'
}
getObject = {
'accountId': 1234,
'activeTransactionCount': 1,
'activeTransactions': [{
'transactionStatus': {'friendlyName': 'This is a buffer time in which the customer may cancel the server'}
}],
'allowedHardware': [{
'allowedHost': {
'credential': {'username': 'joe', 'password': '12345'},
'name': 'test-server',
},
'domain': 'example.com',
'hostname': 'test-server',
'id': 1234,
'primaryBackendIpAddress': '10.0.0.2',
}],
'allowedIpAddresses': [{
'allowedHost': {
'credential': {'username': 'joe', 'password': '12345'},
'name': 'test-server',
},
'id': 1234,
'ipAddress': '10.0.0.1',
'note': 'backend ip',
}],
'allowedSubnets': [{
'allowedHost': {
'credential': {'username': 'joe', 'password': '12345'},
'name': 'test-server',
},
'cidr': '24',
'id': 1234,
'networkIdentifier': '10.0.0.1',
'note': 'backend subnet',
}],
'allowedVirtualGuests': [{
'allowedHost': {
'credential': {'username': 'joe', 'password': '12345'},
'name': 'test-server',
},
'domain': 'example.com',
'hostname': 'test-server',
'id': 1234,
'primaryBackendIpAddress': '10.0.0.1',
}],
'billingItem': {
'activeChildren': [{
'cancellationDate': '',
'categoryCode': 'storage_snapshot_space',
'id': 123,
}],
'cancellationDate': '',
'categoryCode': 'storage_service_enterprise',
'id': 449,
'location': {'id': 449500}
},
'bytesUsed': 0,
'capacityGb': 20,
'createDate': '2015:50:15-04:00',
'fileNetworkMountAddress': '127.0.0.1:/TEST',
'guestId': '',
'hardwareId': '',
'hasEncryptionAtRest': 0,
'hostId': '',
'id': 100,
'iops': 1000,
'lunId': 2,
'nasType': 'ISCSI',
'notes': """{'status': 'available'}""",
'originalSnapshotName': 'test-original-snapshot-name',
'originalVolumeName': 'test-original-volume-name',
'originalVolumeSize': '20',
'osType': {'keyName': 'LINUX'},
'parentVolume': {'snapshotSizeBytes': 1024},
'password': '',
'provisionedIops': '1000',
'replicationPartnerCount': 1,
'replicationPartners': [{
'createDate': '2017:50:15-04:00',
'id': 1784,
'nasType': 'ISCSI_REPLICANT',
'replicationSchedule': {'type': {'keyname': 'REPLICATION_HOURLY'}},
'serviceResource': {'datacenter': {'name': 'wdc01'}},
'serviceResourceBackendIpAddress': '10.3.174.79',
'username': 'TEST_REP_1',
}, {
'createDate': '2017:50:15-04:00',
'id': 1785,
'nasType': 'ISCSI_REPLICANT',
'replicationSchedule': {'type': {'keyname': 'REPLICATION_DAILY'}},
'serviceResource': {'datacenter': {'name': 'dal01'}},
'serviceResourceBackendIpAddress': '10.3.177.84',
'username': 'TEST_REP_2',
}],
'replicationStatus': 'Replicant Volume Provisioning has completed.',
'schedules': [
{
'id': 978,
'type': {'keyname': 'SNAPSHOT_WEEKLY'},
'properties': [
{'type': {'keyname': 'MINUTE'}, 'value': '30'},
]
},
{
'id': 988,
'type': {'keyname': 'REPLICATION_INTERVAL'},
'properties': [
{'type': {'keyname': 'MINUTE'}, 'value': '-1'},
]
}
],
'serviceProviderId': 1,
'serviceResource': {'datacenter': {'id': 449500, 'name': 'dal05'}},
'serviceResourceBackendIpAddress': '10.1.2.3',
'serviceResourceName': 'Storage Type 01 Aggregate staaspar0101_pc01',
'snapshotCapacityGb': '10',
'staasVersion': '1',
'storageTierLevel': 'READHEAVY_TIER',
'storageType': {'keyName': 'ENDURANCE_STORAGE'},
'username': 'username',
'dependentDuplicate': 1,
}
getSnapshots = [{
'id': 470,
'notes': 'unit_testing_note',
'snapshotCreationTimestamp': '2016-07-06T07:41:19-05:00',
'snapshotSizeBytes': '42',
}]
getReplicationPartners = [{
'id': 1784,
'accountId': 3000,
'capacityGb': 20,
'username': 'TEST_REP_1',
'serviceResourceBackendIpAddress': '10.3.174.79',
'nasType': 'ISCSI_REPLICANT',
'hostId': None,
'guestId': None,
'hardwareId': None,
'createDate': '2017:50:15-04:00',
'serviceResource': {'datacenter': {'name': 'wdc01'}},
'replicationSchedule': {'type': {'keyname': 'REPLICATION_HOURLY'}},
}, {
'id': 1785,
'accountId': 3001,
'capacityGb': 20,
'username': 'TEST_REP_2',
'serviceResourceBackendIpAddress': '10.3.177.84',
'nasType': 'ISCSI_REPLICANT',
'hostId': None,
'guestId': None,
'hardwareId': None,
'createDate': '2017:50:15-04:00',
'serviceResource': {'datacenter': {'name': 'dal01'}},
'replicationSchedule': {'type': {'keyname': 'REPLICATION_DAILY'}},
}]
getValidReplicationTargetDatacenterLocations = [{
'id': 12345,
'longName': 'Dallas 05',
'name': 'dal05'
}]
listVolumeSchedules = [
{
'id': 978,
'type': {'keyname': 'SNAPSHOT_WEEKLY'},
'properties': [{'type': {'keyname': 'MINUTE'}, 'value': '30'}]
},
{
'id': 988,
'type': {'keyname': 'REPLICATION_INTERVAL'},
'properties': [{'type': {'keyname': 'MINUTE'}, 'value': '-1'}]
}
]
deleteObject = True
editObject = True
allowAccessFromHostList = True
removeAccessFromHostList = True
failoverToReplicant = True
failbackFromReplicant = True
restoreFromSnapshot = True
disasterRecoveryFailoverToReplicant = True
createSnapshot = {
'id': 449
}
enableSnapshots = True
disableSnapshots = True
getVolumeCountLimits = {
'datacenterName': 'global',
'maximumAvailableCount': 300,
'provisionedCount': 100
}
refreshDuplicate = {
'dependentDuplicate': 1
}
convertCloneDependentToIndependent = {
'dependentDuplicate': 1
}
| 29.317073 | 114 | 0.557266 |
acf369e1095abe175ecd58f53d691eb76c7d8b3e | 8,299 | py | Python | benchmarks/SimResults/myNumpy0102.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/myNumpy0102.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | benchmarks/SimResults/myNumpy0102.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import sys
import numpy as np
import json
import numpy.matlib as matlab
import time
stats_to_keep = [7, 15, 19, 22, 23, 24, 25, 26, 27]
def mag_func(s):
return np.ndarray.tolist(np.array(np.matrix(s.strip('[]'))))
x1_step1 = [{
"xoffset":
mag_func("[0;0;0;0;0;0;0.0691521565902343;0;0]"),
"gain":
mag_func(
"[2;2;2;5.73284623960209;4.77506387667949;5.39128041995259;2.14857886190492;3.46052845373869;3.51959729112496]"
),
"ymin":
-1
}, {
"xoffset":
mag_func("[0;0;0;0;0;0;0.0700397327039092;0;0]"),
"gain":
mag_func(
"[2;2;2.00095793086932;5.7820297402784;4.77510723250418;5.39127621293902;2.15062951647935;3.46047762128126;3.57502318518594]"
),
"ymin":
-1
}]
b1 = [
mag_func(
"[-3.3688891117011054988;-8.2293320391583790752;-1.7334601286131952058;-1.9986327217042907378;-2.5053416288235963449;-8.1569709032064885434]"
),
mag_func(
"[-7.2631126800595460224;8.6061465985414429269;-4.3428289981997449942;0.55150336971624480675;-2.3831986848534247869;-1.136130394707973057]"
)
]
IW1_1 = [
mag_func(
"[-2.348719775813743027 -0.22018421716286579182 -1.5802631609840778193 -1.0964083883061301883 -0.10537089047915063067 -0.74392906528934610311 1.3830645128128626897 1.2418361423265078525 2.6389354645902578511;-18.503534653654000408 -1.3567171047952162333 0.9353195637161398901 1.7843220361644791527 4.9499390443557942376 0.035633396763988384182 3.0720821989576103661 1.2256923832207147207 8.4971835470628871434;-3.1653454964820197937 -0.24397203355505769906 -0.050656300689952832306 -0.43268941768256152791 -0.34550645485595032902 1.5441107245900400624 1.2381003255939935226 0.86308100857537672912 0.65459415694979783407;-4.1503988951666155316 -0.69938494574904730428 -0.56663954206674249647 0.73916581338310038962 0.71037692604899072002 0.78001478279345903832 1.1514522124851906959 0.53636795180432417229 0.37414444893035841977;-3.1284099936813904996 -0.26424477417882641372 -0.065946878526028610001 -0.56674909651603255778 -0.53975104505686499756 1.214029544014185058 0.60899329555857206753 0.48664204405458943992 0.3504562959543336409;-15.958109415874996984 -1.4108139497226916959 -1.107162493705539541 3.7211058149574802734 8.454120803477470858 -4.0913589188123138385 -0.63216115828409791266 -2.2097843316655958468 12.95443630772610355]"
),
mag_func(
"[-11.151061612802777745 -2.3754600684280768874 -3.4270900252152554089 2.2372886535732163793 6.2466158022573363695 1.1398207227553740495 2.2912079801602440732 -1.198803123987999486 7.1871140353689613178;10.026890670306940478 0.7079582208038459612 1.1084845179171047835 5.2704996581265222133 0.30254475882770542894 -2.8093578450872396246 -8.5236978717515512471 1.3669854977724549272 -6.0603960666125722412;-7.3054406886265352838 -1.0061101796225433436 -0.69337277021656618103 -1.4970328567824255916 2.1189064734119922306 0.72350139140024982698 5.0554285063535067124 0.20365686556143400288 3.089141114144779543;0.041389560461311702966 -1.9070487947773706594 1.6195790729541386099 2.6916686499005018085 0.81598720968669646858 0.69392087901276178208 -0.71956239370546548617 -1.7515802695923516907 1.5213821656437727103;-6.2800010694820365131 -1.0219787535179749582 6.951886166240443643 -15.655404693513082393 27.769965023358935241 -21.385386034999452676 15.930924097392697547 -16.717348546531678011 -3.0914624654054656538;-3.8335441060579777961 0.55557827475754828495 -1.8867834548247639503 1.6919331036091977971 -0.41422100720762333737 2.3776921263563539632 1.0605699249411038032 3.5050659497700134004 0.69874633431861554733]"
)
]
b2 = [-0.44054822888600259079, -0.46192405142443110355]
LW2_1 = [
mag_func(
"[0.43254536454252051625 0.23379572177786439591 7.5090219908447366493 0.56053540768479948042 -8.0606532740041636487 -0.23229911349299361967]"
),
mag_func(
"[-0.64516238158876371145 0.79553441369156374652 1.0685437307833010045 0.46925996203884168256 0.19421705061403940484 0.37276823104822237598]"
)
]
y1_step1 = [{
"ymin": -1,
"gain": 0.755287009063444,
"xoffset": 0.027
}, {
"ymin": -1,
"gain": 0.616142945163278,
"xoffset": 0.008
}]
for i in range(0, 2):
x1_step1[i]["xoffset"] = np.array(x1_step1[i]["xoffset"]).reshape(
len(x1_step1[i]["xoffset"]), 1)
x1_step1[i]["gain"] = np.array(x1_step1[i]["gain"]).reshape(
len(x1_step1[i]["gain"]), 1)
x1_step1[i]["ymin"] = np.array(x1_step1[i]["ymin"])
b1[i] = np.array(b1[i])
IW1_1[i] = np.array(IW1_1[i])
LW2_1[i] = np.array(LW2_1[i])
LW2_1[i] = LW2_1[i][0].reshape(len(LW2_1[i][0]), 1)
def mapminmax_apply(x, settings):
y = np.subtract(x, settings['xoffset'])
y = np.multiply(y, settings['gain'])
y = y + settings['ymin']
return y
def tansig_apply(n):
n = np.array(n)
n = (1 + np.exp(-2 * n))
n = np.divide(2, n)
n = n - 1
return n
def mapminmax_reverse(y, settings):
x = np.subtract(y, settings['ymin'])
x = np.divide(x, settings['gain'])
x = x + settings['xoffset']
return x
def formatRawData(x):
result = []
dtlbs = x[6]
if dtlbs == 0:
dtlbs = 1
result.append(x[7] / dtlbs)
result.append(1 - (x[7] / dtlbs))
itlb = x[8]
if itlb == 0:
itlb = 0
result.append(x[9] / itlb)
result.append(1 - (x[9] / itlb))
stlb = x[10]
if stlb == 0:
stlb = 1
result.append(x[11] / stlb)
result.append(1 - (x[11] / stlb))
dl1 = x[12] + x[14]
if dl1 == 0:
dl1 = 1
result.append(x[12] / dl1)
result.append(x[13] / dl1)
result.append(1 - (x[13] / dl1))
result.append(x[14] / dl1)
il1 = x[15] + x[17]
if il1 == 0:
il1 = 1
result.append(x[15] / il1)
result.append(x[16] / il1)
result.append(1 - (x[16] / il1))
result.append(x[17] / il1)
l2 = x[18] + x[20]
if l2 == 0:
l2 = 1
result.append(x[18] / l2)
result.append(x[19] / l2)
result.append(1 - (x[19] / l2))
result.append(x[20] / l2)
l3 = x[21] + x[23]
if l3 == 0:
l3 = 1
result.append(x[21] / l3)
result.append(x[22] / l3)
result.append(1 - (x[22] / l3))
result.append(x[23] / l3)
inst_mix = x[30]
if inst_mix == 0:
inst_mix = 1
result.append(x[24] / inst_mix)
result.append(x[25] / inst_mix)
result.append(x[26] / inst_mix)
result.append(x[27] / inst_mix)
result.append(x[28] / inst_mix)
result.append(x[29] / inst_mix)
return result
def removeconstantrows_apply(stats):
result = []
for stat in stats_to_keep:
result.append(stats[int(stat)])
return result
if __name__ == '__main__':
start = time.time()
#if this fails try to change the os.popen paramater for this scripts location in demo.py, called path
'''
arg[0] is the script name
arg[1] is the core number stats are from
arg[2] is the stats json format
'''
core = int(sys.argv[1])
x1 = json.loads(sys.argv[2])
x1 = formatRawData(x1)
# f = open("/home/tugberk/stat0102.txt", "a")
x1 = removeconstantrows_apply(x1)
# if core == 0:
# f.write("Big to small ")
# for stat in x1:
# f.write(str(stat) + " ")
# else:
# f.write("small to big ")
# for stat in x1:
# f.write(str(stat) + " ")
x1 = np.array(x1)
if type(x1[0]) is not list:
Q = 1
else:
Q = x1.shape[1]
x1 = np.transpose(x1.reshape(Q, x1.shape[0]))
xp1 = mapminmax_apply(x1, x1_step1[core])
xp1 = np.array(xp1)
p1 = np.matlib.repmat(b1[core], 1, Q)
a1 = tansig_apply(p1 + np.dot(IW1_1[core], xp1))
LW2_1 = np.transpose(LW2_1[core])
a2 = np.matlib.repmat(b2[core], 1, Q) + np.dot(LW2_1[0], a1)
y1 = mapminmax_reverse(a2, y1_step1[core])
y1 = np.transpose(y1)
y1 = y1.tolist()
y1 = json.dumps(y1, separators=(',', ':'))
if '.' in y1 or ',' in y1:
y1 = y1[2:-3]
else:
y1 = 0
# f.write(" prediction = " + str(y1) +"\n")
end = time.time()
with open("/scratch/nas/1/dn/sniper-6.0/benchmarks/SimResults/paper1exec.txt", "a") as myfile:
myfile.write("predict time : " + str(start - end))
print y1
| 33.46371 | 1,248 | 0.669599 |
acf36a16d709f0a43517cc144ea2e28b95c0caa1 | 21,332 | py | Python | check.py | vchuravy/binaryen | 8c97dc61a713768d7f8302ec3a695c1207ce7239 | [
"Apache-2.0"
] | null | null | null | check.py | vchuravy/binaryen | 8c97dc61a713768d7f8302ec3a695c1207ce7239 | [
"Apache-2.0"
] | null | null | null | check.py | vchuravy/binaryen | 8c97dc61a713768d7f8302ec3a695c1207ce7239 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2015 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
import subprocess
import sys
import unittest
from collections import OrderedDict
from scripts.test import asm2wasm
from scripts.test import binaryenjs
from scripts.test import lld
from scripts.test import shared
from scripts.test import support
from scripts.test import wasm2js
if shared.options.interpreter:
print('[ using wasm interpreter at "%s" ]' % shared.options.interpreter)
assert os.path.exists(shared.options.interpreter), 'interpreter not found'
def run_help_tests():
print('[ checking --help is useful... ]\n')
not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest']
bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)]
executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)]
executables = sorted(executables)
assert len(executables)
for e in executables:
print('.. %s --help' % e)
out, err = subprocess.Popen([e, '--help'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected help to contain program name, got:\n%s' % out
assert len(out.split('\n')) > 8, 'Expected some help, got:\n%s' % out
print('[ checking --version ... ]\n')
for e in executables:
print('.. %s --version' % e)
out, err = subprocess.Popen([e, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out
assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out
def run_wasm_opt_tests():
print('\n[ checking wasm-opt -o notation... ]\n')
for extra_args in [[], ['--no-validation']]:
wast = os.path.join(shared.options.binaryen_test, 'hello_world.wast')
shared.delete_from_orbit('a.wast')
out = 'a.wast'
cmd = shared.WASM_OPT + [wast, '-o', out, '-S'] + extra_args
support.run_command(cmd)
shared.fail_if_not_identical_to_file(open(out).read(), wast)
print('\n[ checking wasm-opt binary reading/writing... ]\n')
shutil.copyfile(os.path.join(shared.options.binaryen_test, 'hello_world.wast'), 'a.wast')
shared.delete_from_orbit('a.wasm')
shared.delete_from_orbit('b.wast')
support.run_command(shared.WASM_OPT + ['a.wast', '-o', 'a.wasm'])
assert open('a.wasm', 'rb').read()[0] == 0, 'we emit binary by default'
support.run_command(shared.WASM_OPT + ['a.wasm', '-o', 'b.wast', '-S'])
assert open('b.wast', 'rb').read()[0] != 0, 'we emit text with -S'
print('\n[ checking wasm-opt passes... ]\n')
for t in shared.get_tests(shared.get_test_dir('passes'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
binary = '.wasm' in t
base = os.path.basename(t).replace('.wast', '').replace('.wasm', '')
passname = base
if passname.isdigit():
passname = open(os.path.join(shared.get_test_dir('passes'), passname + '.passes')).read().strip()
opts = [('--' + p if not p.startswith('O') else '-' + p) for p in passname.split('_')]
actual = ''
for module, asserts in support.split_wast(t):
assert len(asserts) == 0
support.write_wast('split.wast', module)
cmd = shared.WASM_OPT + opts + ['split.wast', '--print']
curr = support.run_command(cmd)
actual += curr
# also check debug mode output is valid
debugged = support.run_command(cmd + ['--debug'], stderr=subprocess.PIPE)
shared.fail_if_not_contained(actual, debugged)
# also check pass-debug mode
def check():
pass_debug = support.run_command(cmd)
shared.fail_if_not_identical(curr, pass_debug)
shared.with_pass_debug(check)
expected_file = os.path.join(shared.get_test_dir('passes'), base + ('.bin' if binary else '') + '.txt')
shared.fail_if_not_identical_to_file(actual, expected_file)
if 'emit-js-wrapper' in t:
with open('a.js') as actual:
shared.fail_if_not_identical_to_file(actual.read(), t + '.js')
if 'emit-spec-wrapper' in t:
with open('a.wat') as actual:
shared.fail_if_not_identical_to_file(actual.read(), t + '.wat')
print('\n[ checking wasm-opt parsing & printing... ]\n')
for t in shared.get_tests(shared.get_test_dir('print'), ['.wast']):
print('..', os.path.basename(t))
wasm = os.path.basename(t).replace('.wast', '')
cmd = shared.WASM_OPT + [t, '--print', '-all']
print(' ', ' '.join(cmd))
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
expected_file = os.path.join(shared.get_test_dir('print'), wasm + '.txt')
shared.fail_if_not_identical_to_file(actual, expected_file)
cmd = shared.WASM_OPT + [os.path.join(shared.get_test_dir('print'), t), '--print-minified', '-all']
print(' ', ' '.join(cmd))
actual, err = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
shared.fail_if_not_identical(actual.strip(), open(os.path.join(shared.get_test_dir('print'), wasm + '.minified.txt')).read().strip())
print('\n[ checking wasm-opt testcases... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wast']):
print('..', os.path.basename(t))
f = t + '.from-wast'
cmd = shared.WASM_OPT + [t, '--print', '-all']
actual = support.run_command(cmd)
actual = actual.replace('printing before:\n', '')
shared.fail_if_not_identical_to_file(actual, f)
shared.binary_format_check(t, wasm_as_args=['-g']) # test with debuginfo
shared.binary_format_check(t, wasm_as_args=[], binary_suffix='.fromBinary.noDebugInfo') # test without debuginfo
shared.minify_check(t)
print('\n[ checking wasm-opt debugInfo read-write... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.fromasm']):
if 'debugInfo' not in t:
continue
print('..', os.path.basename(t))
f = t + '.read-written'
support.run_command(shared.WASM_AS + [t, '--source-map=a.map', '-o', 'a.wasm', '-g'])
support.run_command(shared.WASM_OPT + ['a.wasm', '--input-source-map=a.map', '-o', 'b.wasm', '--output-source-map=b.map', '-g'])
actual = support.run_command(shared.WASM_DIS + ['b.wasm', '--source-map=b.map'])
shared.fail_if_not_identical_to_file(actual, f)
def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
if os.path.isfile(t + '.map'):
cmd += ['--source-map', t + '.map']
actual = support.run_command(cmd)
shared.fail_if_not_identical_to_file(actual, t + '.fromBinary')
# also verify there are no validation errors
def check():
cmd = shared.WASM_OPT + [t, '-all']
support.run_command(cmd)
shared.with_pass_debug(check)
shared.validate_binary(t)
def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t]
# expect a parse error to be reported
support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t, '-o', 'a.wasm']
support.run_command(cmd)
with open('a.wasm', 'rb') as output:
index = output.read().find(b'dylink')
print(' ', index)
assert index == 11, 'dylink section must be first, right after the magic number etc.'
def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
cmd = shared.WASM_CTOR_EVAL + [t, '-o', 'a.wast', '-S', '--ctors', ctors]
support.run_command(cmd)
actual = open('a.wast').read()
out = t + '.out'
shared.fail_if_not_identical_to_file(actual, out)
def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wast', '-S', '-all']
stdout = support.run_command(cmd)
expected = t + '.dced'
with open('a.wast') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
return
print('\n[ checking wasm-reduce testcases]\n')
# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm'])
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4'])
expected = t + '.txt'
support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wast'])
with open('a.wast') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'unreachable-import_wasm-only.asm.js'), '-ttf', '-Os', '-o', 'a.wasm', '-all'])
before = os.stat('a.wasm').st_size
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])
after = os.stat('c.wasm').st_size
# 0.65 is a custom threshold to check if we have shrunk the output
# sufficiently
assert after < 0.7 * before, [before, after]
def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
for wast in shared.options.spec_tests:
print('..', os.path.basename(wast))
def run_spec_test(wast):
cmd = shared.WASM_SHELL + [wast]
return support.run_command(cmd, stderr=subprocess.PIPE)
def run_opt_test(wast):
# check optimization validation
cmd = shared.WASM_OPT + [wast, '-O', '-all']
support.run_command(cmd)
def check_expected(actual, expected):
if expected and os.path.exists(expected):
expected = open(expected).read()
print(' (using expected output)')
actual = actual.strip()
expected = expected.strip()
if actual != expected:
shared.fail(actual, expected)
expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log')
# some spec tests should fail (actual process failure, not just assert_invalid)
try:
actual = run_spec_test(wast)
except Exception as e:
if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in os.path.basename(wast):
print('<< test failed as expected >>')
continue # don't try all the binary format stuff TODO
else:
shared.fail_with_error(str(e))
check_expected(actual, expected)
# skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature
if os.path.basename(wast) in ['exports.wast']: # FIXME
continue
# we must ignore some binary format splits
splits_to_skip = {
'func.wast': [2],
'return.wast': [2]
}
# check binary format. here we can verify execution of the final
# result, no need for an output verification
# some wast files cannot be split:
# * comments.wast: contains characters that are not valid utf-8,
# so our string splitting code fails there
if os.path.basename(wast) not in ['comments.wast']:
split_num = 0
actual = ''
for module, asserts in support.split_wast(wast):
skip = splits_to_skip.get(os.path.basename(wast)) or []
if split_num in skip:
print(' skipping split module', split_num - 1)
split_num += 1
continue
print(' testing split module', split_num)
split_num += 1
support.write_wast('split.wast', module, asserts)
run_spec_test('split.wast') # before binary stuff - just check it's still ok split out
run_opt_test('split.wast') # also that our optimizer doesn't break on it
result_wast = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast)
# add the asserts, and verify that the test still passes
open(result_wast, 'a').write('\n' + '\n'.join(asserts))
actual += run_spec_test(result_wast)
# compare all the outputs to the expected output
check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', os.path.basename(wast) + '.log'))
else:
# handle unsplittable wast files
run_spec_test(wast)
def run_validator_tests():
print('\n[ running validation tests... ]\n')
# Ensure the tests validate by default
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast')]
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast')]
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast')]
support.run_command(cmd, expected_status=1)
def run_gcc_tests():
print('\n[ checking native gcc testcases...]\n')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
for t in sorted(os.listdir(shared.get_test_dir('example'))):
output_file = 'example'
cmd = ['-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-pthread', '-o', output_file]
if t.endswith('.txt'):
# check if there is a trace in the file, if so, we should build it
out = subprocess.check_output([os.path.join(shared.options.binaryen_root, 'scripts', 'clean_c_api_trace.py'), os.path.join(shared.get_test_dir('example'), t)])
if len(out) == 0:
print(' (no trace in ', t, ')')
continue
print(' (will check trace in ', t, ')')
src = 'trace.cpp'
with open(src, 'wb') as o:
o.write(out)
expected = os.path.join(shared.get_test_dir('example'), t + '.txt')
else:
src = os.path.join(shared.get_test_dir('example'), t)
expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt')
if src.endswith(('.c', '.cpp')):
# build the C file separately
libpath = os.path.join(os.path.dirname(shared.options.binaryen_bin), 'lib')
extra = [shared.NATIVECC, src, '-c', '-o', 'example.o',
'-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread']
if src.endswith('.cpp'):
extra += ['-std=c++11']
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
extra.append(f)
print('build: ', ' '.join(extra))
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath]
else:
continue
print(' ', t, src, expected)
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [shared.NATIVEXX, '-std=c++11'] + cmd
print('link: ', ' '.join(cmd))
subprocess.check_call(cmd)
print('run...', output_file)
actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8')
os.remove(output_file)
if sys.platform == 'darwin':
# Also removes debug directory produced on Mac OS
shutil.rmtree(output_file + '.dSYM')
shared.fail_if_not_identical_to_file(actual, expected)
def run_unittest():
print('\n[ checking unit tests...]\n')
# equivalent to `python -m unittest discover -s ./test -v`
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite)
shared.num_failures += len(result.errors) + len(result.failures)
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("unittest failed")
TEST_SUITES = OrderedDict([
('help-messages', run_help_tests),
('wasm-opt', run_wasm_opt_tests),
('asm2wasm', asm2wasm.test_asm2wasm),
('asm2wasm-binary', asm2wasm.test_asm2wasm_binary),
('wasm-dis', run_wasm_dis_tests),
('crash', run_crash_tests),
('dylink', run_dylink_tests),
('ctor-eval', run_ctor_eval_tests),
('wasm-metadce', run_wasm_metadce_tests),
('wasm-reduce', run_wasm_reduce_tests),
('spec', run_spec_tests),
('binaryenjs', binaryenjs.test_binaryen_js),
('lld', lld.test_wasm_emscripten_finalize),
('wasm2js', wasm2js.test_wasm2js),
('validator', run_validator_tests),
('gcc', run_gcc_tests),
('unit', run_unittest),
])
# Run all the tests
def main():
if shared.options.list_suites:
for suite in TEST_SUITES.keys():
print(suite)
return 0
for test in shared.requested or TEST_SUITES.keys():
TEST_SUITES[test]()
# Check/display the results
if shared.num_failures == 0:
print('\n[ success! ]')
if shared.warnings:
print('\n' + '\n'.join(shared.warnings))
if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 43.802875 | 171 | 0.609179 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.